diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 20:12:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 20:12:13 -0400 |
commit | 7f0ef0267e20d62d45d527911a993b1e998f4968 (patch) | |
tree | de51abc7da5903f59d83e23937f22420164c9477 | |
parent | 862f0012549110d6f2586bf54b52ed4540cbff3a (diff) | |
parent | 9307c29524502c21f0e8a6d96d850b2f5bc0bd9a (diff) |
Merge branch 'akpm' (updates from Andrew Morton)
Merge first patch-bomb from Andrew Morton:
- various misc bits
- I'm been patchmonkeying ocfs2 for a while, as Joel and Mark have been
distracted. There has been quite a bit of activity.
- About half the MM queue
- Some backlight bits
- Various lib/ updates
- checkpatch updates
- zillions more little rtc patches
- ptrace
- signals
- exec
- procfs
- rapidio
- nbd
- aoe
- pps
- memstick
- tools/testing/selftests updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (445 commits)
tools/testing/selftests: don't assume the x bit is set on scripts
selftests: add .gitignore for kcmp
selftests: fix clean target in kcmp Makefile
selftests: add .gitignore for vm
selftests: add hugetlbfstest
self-test: fix make clean
selftests: exit 1 on failure
kernel/resource.c: remove the unneeded assignment in function __find_resource
aio: fix wrong comment in aio_complete()
drivers/w1/slaves/w1_ds2408.c: add magic sequence to disable P0 test mode
drivers/memstick/host/r592.c: convert to module_pci_driver
drivers/memstick/host/jmb38x_ms: convert to module_pci_driver
pps-gpio: add device-tree binding and support
drivers/pps/clients/pps-gpio.c: convert to module_platform_driver
drivers/pps/clients/pps-gpio.c: convert to devm_* helpers
drivers/parport/share.c: use kzalloc
Documentation/accounting/getdelays.c: avoid strncpy in accounting tool
aoe: update internal version number to v83
aoe: update copyright date
aoe: perform I/O completions in parallel
...
429 files changed, 6278 insertions, 4934 deletions
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle index e00b8f0dde52..7fe0546c504a 100644 --- a/Documentation/CodingStyle +++ b/Documentation/CodingStyle | |||
@@ -389,7 +389,8 @@ Albeit deprecated by some people, the equivalent of the goto statement is | |||
389 | used frequently by compilers in form of the unconditional jump instruction. | 389 | used frequently by compilers in form of the unconditional jump instruction. |
390 | 390 | ||
391 | The goto statement comes in handy when a function exits from multiple | 391 | The goto statement comes in handy when a function exits from multiple |
392 | locations and some common work such as cleanup has to be done. | 392 | locations and some common work such as cleanup has to be done. If there is no |
393 | cleanup needed then just return directly. | ||
393 | 394 | ||
394 | The rationale is: | 395 | The rationale is: |
395 | 396 | ||
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 67e7ab41c0a6..09e884e5b9f5 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
@@ -1955,12 +1955,17 @@ machines due to caching. | |||
1955 | </sect1> | 1955 | </sect1> |
1956 | </chapter> | 1956 | </chapter> |
1957 | 1957 | ||
1958 | <chapter id="apiref"> | 1958 | <chapter id="apiref-mutex"> |
1959 | <title>Mutex API reference</title> | 1959 | <title>Mutex API reference</title> |
1960 | !Iinclude/linux/mutex.h | 1960 | !Iinclude/linux/mutex.h |
1961 | !Ekernel/mutex.c | 1961 | !Ekernel/mutex.c |
1962 | </chapter> | 1962 | </chapter> |
1963 | 1963 | ||
1964 | <chapter id="apiref-futex"> | ||
1965 | <title>Futex API reference</title> | ||
1966 | !Ikernel/futex.c | ||
1967 | </chapter> | ||
1968 | |||
1964 | <chapter id="references"> | 1969 | <chapter id="references"> |
1965 | <title>Further reading</title> | 1970 | <title>Further reading</title> |
1966 | 1971 | ||
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c index f8ebcde43b17..c6a06b71594d 100644 --- a/Documentation/accounting/getdelays.c +++ b/Documentation/accounting/getdelays.c | |||
@@ -272,7 +272,7 @@ int main(int argc, char *argv[]) | |||
272 | char *logfile = NULL; | 272 | char *logfile = NULL; |
273 | int loop = 0; | 273 | int loop = 0; |
274 | int containerset = 0; | 274 | int containerset = 0; |
275 | char containerpath[1024]; | 275 | char *containerpath = NULL; |
276 | int cfd = 0; | 276 | int cfd = 0; |
277 | int forking = 0; | 277 | int forking = 0; |
278 | sigset_t sigset; | 278 | sigset_t sigset; |
@@ -299,7 +299,7 @@ int main(int argc, char *argv[]) | |||
299 | break; | 299 | break; |
300 | case 'C': | 300 | case 'C': |
301 | containerset = 1; | 301 | containerset = 1; |
302 | strncpy(containerpath, optarg, strlen(optarg) + 1); | 302 | containerpath = optarg; |
303 | break; | 303 | break; |
304 | case 'w': | 304 | case 'w': |
305 | logfile = strdup(optarg); | 305 | logfile = strdup(optarg); |
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index ddf4f93967a9..327acec6f90b 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt | |||
@@ -834,10 +834,9 @@ Test: | |||
834 | 834 | ||
835 | 12. TODO | 835 | 12. TODO |
836 | 836 | ||
837 | 1. Add support for accounting huge pages (as a separate controller) | 837 | 1. Make per-cgroup scanner reclaim not-shared pages first |
838 | 2. Make per-cgroup scanner reclaim not-shared pages first | 838 | 2. Teach controller to account for shared-pages |
839 | 3. Teach controller to account for shared-pages | 839 | 3. Start reclamation in the background when the limit is |
840 | 4. Start reclamation in the background when the limit is | ||
841 | not yet hit but the usage is getting closer | 840 | not yet hit but the usage is getting closer |
842 | 841 | ||
843 | Summary | 842 | Summary |
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index ba046b8fa92f..7bf1be20d93a 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt | |||
@@ -222,5 +222,4 @@ drivers/dma/: location for offload engine drivers | |||
222 | include/linux/async_tx.h: core header file for the async_tx api | 222 | include/linux/async_tx.h: core header file for the async_tx api |
223 | crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code | 223 | crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code |
224 | crypto/async_tx/async_memcpy.c: copy offload | 224 | crypto/async_tx/async_memcpy.c: copy offload |
225 | crypto/async_tx/async_memset.c: memory fill offload | ||
226 | crypto/async_tx/async_xor.c: xor and xor zero sum offload | 225 | crypto/async_tx/async_xor.c: xor and xor zero sum offload |
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index b9015912bca6..23721d3be3e6 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
@@ -100,8 +100,7 @@ Your cooperation is appreciated. | |||
100 | 10 = /dev/aio Asynchronous I/O notification interface | 100 | 10 = /dev/aio Asynchronous I/O notification interface |
101 | 11 = /dev/kmsg Writes to this come out as printk's, reads | 101 | 11 = /dev/kmsg Writes to this come out as printk's, reads |
102 | export the buffered printk records. | 102 | export the buffered printk records. |
103 | 12 = /dev/oldmem Used by crashdump kernels to access | 103 | 12 = /dev/oldmem OBSOLETE - replaced by /proc/vmcore |
104 | the memory of the kernel that crashed. | ||
105 | 104 | ||
106 | 1 block RAM disk | 105 | 1 block RAM disk |
107 | 0 = /dev/ram0 First RAM disk | 106 | 0 = /dev/ram0 First RAM disk |
diff --git a/Documentation/devicetree/bindings/pps/pps-gpio.txt b/Documentation/devicetree/bindings/pps/pps-gpio.txt new file mode 100644 index 000000000000..40bf9c3564a5 --- /dev/null +++ b/Documentation/devicetree/bindings/pps/pps-gpio.txt | |||
@@ -0,0 +1,20 @@ | |||
1 | Device-Tree Bindings for a PPS Signal on GPIO | ||
2 | |||
3 | These properties describe a PPS (pulse-per-second) signal connected to | ||
4 | a GPIO pin. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible: should be "pps-gpio" | ||
8 | - gpios: one PPS GPIO in the format described by ../gpio/gpio.txt | ||
9 | |||
10 | Optional properties: | ||
11 | - assert-falling-edge: when present, assert is indicated by a falling edge | ||
12 | (instead of by a rising edge) | ||
13 | |||
14 | Example: | ||
15 | pps { | ||
16 | compatible = "pps-gpio"; | ||
17 | gpios = <&gpio2 6 0>; | ||
18 | |||
19 | assert-falling-edge; | ||
20 | }; | ||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index fd8d0d594fc7..fcc22c982a25 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -473,7 +473,8 @@ This file is only present if the CONFIG_MMU kernel configuration option is | |||
473 | enabled. | 473 | enabled. |
474 | 474 | ||
475 | The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG | 475 | The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG |
476 | bits on both physical and virtual pages associated with a process. | 476 | bits on both physical and virtual pages associated with a process, and the |
477 | soft-dirty bit on pte (see Documentation/vm/soft-dirty.txt for details). | ||
477 | To clear the bits for all the pages associated with the process | 478 | To clear the bits for all the pages associated with the process |
478 | > echo 1 > /proc/PID/clear_refs | 479 | > echo 1 > /proc/PID/clear_refs |
479 | 480 | ||
@@ -482,6 +483,10 @@ To clear the bits for the anonymous pages associated with the process | |||
482 | 483 | ||
483 | To clear the bits for the file mapped pages associated with the process | 484 | To clear the bits for the file mapped pages associated with the process |
484 | > echo 3 > /proc/PID/clear_refs | 485 | > echo 3 > /proc/PID/clear_refs |
486 | |||
487 | To clear the soft-dirty bit | ||
488 | > echo 4 > /proc/PID/clear_refs | ||
489 | |||
485 | Any other value written to /proc/PID/clear_refs will have no effect. | 490 | Any other value written to /proc/PID/clear_refs will have no effect. |
486 | 491 | ||
487 | The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags | 492 | The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags |
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 1f0ba30ae47e..f93a88250a44 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt | |||
@@ -559,7 +559,6 @@ your filesystem. The following members are defined: | |||
559 | struct address_space_operations { | 559 | struct address_space_operations { |
560 | int (*writepage)(struct page *page, struct writeback_control *wbc); | 560 | int (*writepage)(struct page *page, struct writeback_control *wbc); |
561 | int (*readpage)(struct file *, struct page *); | 561 | int (*readpage)(struct file *, struct page *); |
562 | int (*sync_page)(struct page *); | ||
563 | int (*writepages)(struct address_space *, struct writeback_control *); | 562 | int (*writepages)(struct address_space *, struct writeback_control *); |
564 | int (*set_page_dirty)(struct page *page); | 563 | int (*set_page_dirty)(struct page *page); |
565 | int (*readpages)(struct file *filp, struct address_space *mapping, | 564 | int (*readpages)(struct file *filp, struct address_space *mapping, |
@@ -581,6 +580,9 @@ struct address_space_operations { | |||
581 | /* migrate the contents of a page to the specified target */ | 580 | /* migrate the contents of a page to the specified target */ |
582 | int (*migratepage) (struct page *, struct page *); | 581 | int (*migratepage) (struct page *, struct page *); |
583 | int (*launder_page) (struct page *); | 582 | int (*launder_page) (struct page *); |
583 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | ||
584 | unsigned long); | ||
585 | void (*is_dirty_writeback) (struct page *, bool *, bool *); | ||
584 | int (*error_remove_page) (struct mapping *mapping, struct page *page); | 586 | int (*error_remove_page) (struct mapping *mapping, struct page *page); |
585 | int (*swap_activate)(struct file *); | 587 | int (*swap_activate)(struct file *); |
586 | int (*swap_deactivate)(struct file *); | 588 | int (*swap_deactivate)(struct file *); |
@@ -612,13 +614,6 @@ struct address_space_operations { | |||
612 | In this case, the page will be relocated, relocked and if | 614 | In this case, the page will be relocated, relocked and if |
613 | that all succeeds, ->readpage will be called again. | 615 | that all succeeds, ->readpage will be called again. |
614 | 616 | ||
615 | sync_page: called by the VM to notify the backing store to perform all | ||
616 | queued I/O operations for a page. I/O operations for other pages | ||
617 | associated with this address_space object may also be performed. | ||
618 | |||
619 | This function is optional and is called only for pages with | ||
620 | PG_Writeback set while waiting for the writeback to complete. | ||
621 | |||
622 | writepages: called by the VM to write out pages associated with the | 617 | writepages: called by the VM to write out pages associated with the |
623 | address_space object. If wbc->sync_mode is WBC_SYNC_ALL, then | 618 | address_space object. If wbc->sync_mode is WBC_SYNC_ALL, then |
624 | the writeback_control will specify a range of pages that must be | 619 | the writeback_control will specify a range of pages that must be |
@@ -747,6 +742,20 @@ struct address_space_operations { | |||
747 | prevent redirtying the page, it is kept locked during the whole | 742 | prevent redirtying the page, it is kept locked during the whole |
748 | operation. | 743 | operation. |
749 | 744 | ||
745 | is_partially_uptodate: Called by the VM when reading a file through the | ||
746 | pagecache when the underlying blocksize != pagesize. If the required | ||
747 | block is up to date then the read can complete without needing the IO | ||
748 | to bring the whole page up to date. | ||
749 | |||
750 | is_dirty_writeback: Called by the VM when attempting to reclaim a page. | ||
751 | The VM uses dirty and writeback information to determine if it needs | ||
752 | to stall to allow flushers a chance to complete some IO. Ordinarily | ||
753 | it can use PageDirty and PageWriteback but some filesystems have | ||
754 | more complex state (unstable pages in NFS prevent reclaim) or | ||
755 | do not set those flags due to locking problems (jbd). This callback | ||
756 | allows a filesystem to indicate to the VM if a page should be | ||
757 | treated as dirty or writeback for the purposes of stalling. | ||
758 | |||
750 | error_remove_page: normally set to generic_error_remove_page if truncation | 759 | error_remove_page: normally set to generic_error_remove_page if truncation |
751 | is ok for this address space. Used for memory failure handling. | 760 | is ok for this address space. Used for memory failure handling. |
752 | Setting this implies you deal with pages going away under you, | 761 | Setting this implies you deal with pages going away under you, |
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index 9c7fd988e299..bec123e466ae 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt | |||
@@ -47,19 +47,12 @@ parameter. Optionally the size of the ELF header can also be passed | |||
47 | when using the elfcorehdr=[size[KMG]@]offset[KMG] syntax. | 47 | when using the elfcorehdr=[size[KMG]@]offset[KMG] syntax. |
48 | 48 | ||
49 | 49 | ||
50 | With the dump-capture kernel, you can access the memory image, or "old | 50 | With the dump-capture kernel, you can access the memory image through |
51 | memory," in two ways: | 51 | /proc/vmcore. This exports the dump as an ELF-format file that you can |
52 | 52 | write out using file copy commands such as cp or scp. Further, you can | |
53 | - Through a /dev/oldmem device interface. A capture utility can read the | 53 | use analysis tools such as the GNU Debugger (GDB) and the Crash tool to |
54 | device file and write out the memory in raw format. This is a raw dump | 54 | debug the dump file. This method ensures that the dump pages are correctly |
55 | of memory. Analysis and capture tools must be intelligent enough to | 55 | ordered. |
56 | determine where to look for the right information. | ||
57 | |||
58 | - Through /proc/vmcore. This exports the dump as an ELF-format file that | ||
59 | you can write out using file copy commands such as cp or scp. Further, | ||
60 | you can use analysis tools such as the GNU Debugger (GDB) and the Crash | ||
61 | tool to debug the dump file. This method ensures that the dump pages are | ||
62 | correctly ordered. | ||
63 | 56 | ||
64 | 57 | ||
65 | Setup and Installation | 58 | Setup and Installation |
@@ -423,18 +416,6 @@ the following command: | |||
423 | 416 | ||
424 | cp /proc/vmcore <dump-file> | 417 | cp /proc/vmcore <dump-file> |
425 | 418 | ||
426 | You can also access dumped memory as a /dev/oldmem device for a linear | ||
427 | and raw view. To create the device, use the following command: | ||
428 | |||
429 | mknod /dev/oldmem c 1 12 | ||
430 | |||
431 | Use the dd command with suitable options for count, bs, and skip to | ||
432 | access specific portions of the dump. | ||
433 | |||
434 | To see the entire memory, use the following command: | ||
435 | |||
436 | dd if=/dev/oldmem of=oldmem.001 | ||
437 | |||
438 | 419 | ||
439 | Analysis | 420 | Analysis |
440 | ======== | 421 | ======== |
diff --git a/Documentation/rapidio/rapidio.txt b/Documentation/rapidio/rapidio.txt index a9c16c979da2..717f5aa388b1 100644 --- a/Documentation/rapidio/rapidio.txt +++ b/Documentation/rapidio/rapidio.txt | |||
@@ -73,28 +73,44 @@ data structure. This structure includes lists of all devices and local master | |||
73 | ports that form the same network. It also contains a pointer to the default | 73 | ports that form the same network. It also contains a pointer to the default |
74 | master port that is used to communicate with devices within the network. | 74 | master port that is used to communicate with devices within the network. |
75 | 75 | ||
76 | 2.5 Device Drivers | ||
77 | |||
78 | RapidIO device-specific drivers follow Linux Kernel Driver Model and are | ||
79 | intended to support specific RapidIO devices attached to the RapidIO network. | ||
80 | |||
81 | 2.6 Subsystem Interfaces | ||
82 | |||
83 | RapidIO interconnect specification defines features that may be used to provide | ||
84 | one or more common service layers for all participating RapidIO devices. These | ||
85 | common services may act separately from device-specific drivers or be used by | ||
86 | device-specific drivers. Example of such service provider is the RIONET driver | ||
87 | which implements Ethernet-over-RapidIO interface. Because only one driver can be | ||
88 | registered for a device, all common RapidIO services have to be registered as | ||
89 | subsystem interfaces. This allows to have multiple common services attached to | ||
90 | the same device without blocking attachment of a device-specific driver. | ||
91 | |||
76 | 3. Subsystem Initialization | 92 | 3. Subsystem Initialization |
77 | --------------------------- | 93 | --------------------------- |
78 | 94 | ||
79 | In order to initialize the RapidIO subsystem, a platform must initialize and | 95 | In order to initialize the RapidIO subsystem, a platform must initialize and |
80 | register at least one master port within the RapidIO network. To register mport | 96 | register at least one master port within the RapidIO network. To register mport |
81 | within the subsystem controller driver initialization code calls function | 97 | within the subsystem controller driver's initialization code calls function |
82 | rio_register_mport() for each available master port. | 98 | rio_register_mport() for each available master port. |
83 | 99 | ||
84 | RapidIO subsystem uses subsys_initcall() or device_initcall() to perform | ||
85 | controller initialization (depending on controller device type). | ||
86 | |||
87 | After all active master ports are registered with a RapidIO subsystem, | 100 | After all active master ports are registered with a RapidIO subsystem, |
88 | an enumeration and/or discovery routine may be called automatically or | 101 | an enumeration and/or discovery routine may be called automatically or |
89 | by user-space command. | 102 | by user-space command. |
90 | 103 | ||
104 | RapidIO subsystem can be configured to be built as a statically linked or | ||
105 | modular component of the kernel (see details below). | ||
106 | |||
91 | 4. Enumeration and Discovery | 107 | 4. Enumeration and Discovery |
92 | ---------------------------- | 108 | ---------------------------- |
93 | 109 | ||
94 | 4.1 Overview | 110 | 4.1 Overview |
95 | ------------ | 111 | ------------ |
96 | 112 | ||
97 | RapidIO subsystem configuration options allow users to specify enumeration and | 113 | RapidIO subsystem configuration options allow users to build enumeration and |
98 | discovery methods as statically linked components or loadable modules. | 114 | discovery methods as statically linked components or loadable modules. |
99 | An enumeration/discovery method implementation and available input parameters | 115 | An enumeration/discovery method implementation and available input parameters |
100 | define how any given method can be attached to available RapidIO mports: | 116 | define how any given method can be attached to available RapidIO mports: |
@@ -115,8 +131,8 @@ several methods to initiate an enumeration and/or discovery process: | |||
115 | endpoint waits for enumeration to be completed. If the specified timeout | 131 | endpoint waits for enumeration to be completed. If the specified timeout |
116 | expires the discovery process is terminated without obtaining RapidIO network | 132 | expires the discovery process is terminated without obtaining RapidIO network |
117 | information. NOTE: a timed out discovery process may be restarted later using | 133 | information. NOTE: a timed out discovery process may be restarted later using |
118 | a user-space command as it is described later if the given endpoint was | 134 | a user-space command as it is described below (if the given endpoint was |
119 | enumerated successfully. | 135 | enumerated successfully). |
120 | 136 | ||
121 | (b) Statically linked enumeration and discovery process can be started by | 137 | (b) Statically linked enumeration and discovery process can be started by |
122 | a command from user space. This initiation method provides more flexibility | 138 | a command from user space. This initiation method provides more flexibility |
@@ -138,15 +154,42 @@ When a network scan process is started it calls an enumeration or discovery | |||
138 | routine depending on the configured role of a master port: host or agent. | 154 | routine depending on the configured role of a master port: host or agent. |
139 | 155 | ||
140 | Enumeration is performed by a master port if it is configured as a host port by | 156 | Enumeration is performed by a master port if it is configured as a host port by |
141 | assigning a host device ID greater than or equal to zero. A host device ID is | 157 | assigning a host destination ID greater than or equal to zero. The host |
142 | assigned to a master port through the kernel command line parameter "riohdid=", | 158 | destination ID can be assigned to a master port using various methods depending |
143 | or can be configured in a platform-specific manner. If the host device ID for | 159 | on RapidIO subsystem build configuration: |
144 | a specific master port is set to -1, the discovery process will be performed | 160 | |
145 | for it. | 161 | (a) For a statically linked RapidIO subsystem core use command line parameter |
162 | "rapidio.hdid=" with a list of destination ID assignments in order of mport | ||
163 | device registration. For example, in a system with two RapidIO controllers | ||
164 | the command line parameter "rapidio.hdid=-1,7" will result in assignment of | ||
165 | the host destination ID=7 to the second RapidIO controller, while the first | ||
166 | one will be assigned destination ID=-1. | ||
167 | |||
168 | (b) If the RapidIO subsystem core is built as a loadable module, in addition | ||
169 | to the method shown above, the host destination ID(s) can be specified using | ||
170 | traditional methods of passing module parameter "hdid=" during its loading: | ||
171 | - from command line: "modprobe rapidio hdid=-1,7", or | ||
172 | - from modprobe configuration file using configuration command "options", | ||
173 | like in this example: "options rapidio hdid=-1,7". An example of modprobe | ||
174 | configuration file is provided in the section below. | ||
175 | |||
176 | NOTES: | ||
177 | (i) if "hdid=" parameter is omitted all available mport will be assigned | ||
178 | destination ID = -1; | ||
179 | (ii) the "hdid=" parameter in systems with multiple mports can have | ||
180 | destination ID assignments omitted from the end of list (default = -1). | ||
181 | |||
182 | If the host device ID for a specific master port is set to -1, the discovery | ||
183 | process will be performed for it. | ||
146 | 184 | ||
147 | The enumeration and discovery routines use RapidIO maintenance transactions | 185 | The enumeration and discovery routines use RapidIO maintenance transactions |
148 | to access the configuration space of devices. | 186 | to access the configuration space of devices. |
149 | 187 | ||
188 | NOTE: If RapidIO switch-specific device drivers are built as loadable modules | ||
189 | they must be loaded before enumeration/discovery process starts. | ||
190 | This requirement is cased by the fact that enumeration/discovery methods invoke | ||
191 | vendor-specific callbacks on early stages. | ||
192 | |||
150 | 4.2 Automatic Start of Enumeration and Discovery | 193 | 4.2 Automatic Start of Enumeration and Discovery |
151 | ------------------------------------------------ | 194 | ------------------------------------------------ |
152 | 195 | ||
@@ -266,7 +309,36 @@ method's module initialization routine calls rio_register_scan() to attach | |||
266 | an enumerator to a specified mport device (or devices). The basic enumerator | 309 | an enumerator to a specified mport device (or devices). The basic enumerator |
267 | implementation demonstrates this process. | 310 | implementation demonstrates this process. |
268 | 311 | ||
269 | 5. References | 312 | 4.6 Using Loadable RapidIO Switch Drivers |
313 | ----------------------------------------- | ||
314 | |||
315 | In the case when RapidIO switch drivers are built as loadable modules a user | ||
316 | must ensure that they are loaded before the enumeration/discovery starts. | ||
317 | This process can be automated by specifying pre- or post- dependencies in the | ||
318 | RapidIO-specific modprobe configuration file as shown in the example below. | ||
319 | |||
320 | File /etc/modprobe.d/rapidio.conf: | ||
321 | ---------------------------------- | ||
322 | |||
323 | # Configure RapidIO subsystem modules | ||
324 | |||
325 | # Set enumerator host destination ID (overrides kernel command line option) | ||
326 | options rapidio hdid=-1,2 | ||
327 | |||
328 | # Load RapidIO switch drivers immediately after rapidio core module was loaded | ||
329 | softdep rapidio post: idt_gen2 idtcps tsi57x | ||
330 | |||
331 | # OR : | ||
332 | |||
333 | # Load RapidIO switch drivers just before rio-scan enumerator module is loaded | ||
334 | softdep rio-scan pre: idt_gen2 idtcps tsi57x | ||
335 | |||
336 | -------------------------- | ||
337 | |||
338 | NOTE: In the example above, one of "softdep" commands must be removed or | ||
339 | commented out to keep required module loading sequence. | ||
340 | |||
341 | A. References | ||
270 | ------------- | 342 | ------------- |
271 | 343 | ||
272 | [1] RapidIO Trade Association. RapidIO Interconnect Specifications. | 344 | [1] RapidIO Trade Association. RapidIO Interconnect Specifications. |
diff --git a/Documentation/rapidio/sysfs.txt b/Documentation/rapidio/sysfs.txt index 19878179da4c..271438c0617f 100644 --- a/Documentation/rapidio/sysfs.txt +++ b/Documentation/rapidio/sysfs.txt | |||
@@ -40,6 +40,7 @@ device_rev - returns the device revision level | |||
40 | (see 4.1 for switch specific details) | 40 | (see 4.1 for switch specific details) |
41 | lprev - returns name of previous device (switch) on the path to the device | 41 | lprev - returns name of previous device (switch) on the path to the device |
42 | that that owns this attribute | 42 | that that owns this attribute |
43 | modalias - returns the device modalias | ||
43 | 44 | ||
44 | In addition to the files listed above, each device has a binary attribute file | 45 | In addition to the files listed above, each device has a binary attribute file |
45 | that allows read/write access to the device configuration registers using | 46 | that allows read/write access to the device configuration registers using |
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt index 32aa4002de4a..596b60c08b74 100644 --- a/Documentation/rtc.txt +++ b/Documentation/rtc.txt | |||
@@ -153,9 +153,10 @@ since_epoch: The number of seconds since the epoch according to the RTC | |||
153 | time: RTC-provided time | 153 | time: RTC-provided time |
154 | wakealarm: The time at which the clock will generate a system wakeup | 154 | wakealarm: The time at which the clock will generate a system wakeup |
155 | event. This is a one shot wakeup event, so must be reset | 155 | event. This is a one shot wakeup event, so must be reset |
156 | after wake if a daily wakeup is required. Format is either | 156 | after wake if a daily wakeup is required. Format is seconds since |
157 | seconds since the epoch or, if there's a leading +, seconds | 157 | the epoch by default, or if there's a leading +, seconds in the |
158 | in the future. | 158 | future, or if there is a leading +=, seconds ahead of the current |
159 | alarm. | ||
159 | 160 | ||
160 | IOCTL INTERFACE | 161 | IOCTL INTERFACE |
161 | --------------- | 162 | --------------- |
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt index 7587493c67f1..fd7c3cfddd8e 100644 --- a/Documentation/vm/pagemap.txt +++ b/Documentation/vm/pagemap.txt | |||
@@ -15,7 +15,8 @@ There are three components to pagemap: | |||
15 | * Bits 0-54 page frame number (PFN) if present | 15 | * Bits 0-54 page frame number (PFN) if present |
16 | * Bits 0-4 swap type if swapped | 16 | * Bits 0-4 swap type if swapped |
17 | * Bits 5-54 swap offset if swapped | 17 | * Bits 5-54 swap offset if swapped |
18 | * Bits 55-60 page shift (page size = 1<<page shift) | 18 | * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) |
19 | * Bits 56-60 zero | ||
19 | * Bit 61 page is file-page or shared-anon | 20 | * Bit 61 page is file-page or shared-anon |
20 | * Bit 62 page swapped | 21 | * Bit 62 page swapped |
21 | * Bit 63 page present | 22 | * Bit 63 page present |
diff --git a/Documentation/vm/soft-dirty.txt b/Documentation/vm/soft-dirty.txt new file mode 100644 index 000000000000..9a12a5956bc0 --- /dev/null +++ b/Documentation/vm/soft-dirty.txt | |||
@@ -0,0 +1,36 @@ | |||
1 | SOFT-DIRTY PTEs | ||
2 | |||
3 | The soft-dirty is a bit on a PTE which helps to track which pages a task | ||
4 | writes to. In order to do this tracking one should | ||
5 | |||
6 | 1. Clear soft-dirty bits from the task's PTEs. | ||
7 | |||
8 | This is done by writing "4" into the /proc/PID/clear_refs file of the | ||
9 | task in question. | ||
10 | |||
11 | 2. Wait some time. | ||
12 | |||
13 | 3. Read soft-dirty bits from the PTEs. | ||
14 | |||
15 | This is done by reading from the /proc/PID/pagemap. The bit 55 of the | ||
16 | 64-bit qword is the soft-dirty one. If set, the respective PTE was | ||
17 | written to since step 1. | ||
18 | |||
19 | |||
20 | Internally, to do this tracking, the writable bit is cleared from PTEs | ||
21 | when the soft-dirty bit is cleared. So, after this, when the task tries to | ||
22 | modify a page at some virtual address the #PF occurs and the kernel sets | ||
23 | the soft-dirty bit on the respective PTE. | ||
24 | |||
25 | Note, that although all the task's address space is marked as r/o after the | ||
26 | soft-dirty bits clear, the #PF-s that occur after that are processed fast. | ||
27 | This is so, since the pages are still mapped to physical memory, and thus all | ||
28 | the kernel does is finds this fact out and puts both writable and soft-dirty | ||
29 | bits on the PTE. | ||
30 | |||
31 | |||
32 | This feature is actively used by the checkpoint-restore project. You | ||
33 | can find more details about it on http://criu.org | ||
34 | |||
35 | |||
36 | -- Pavel Emelyanov, Apr 9, 2013 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index f4c2bc73fd24..3f7710151a75 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1617,6 +1617,7 @@ F: drivers/net/wireless/b43legacy/ | |||
1617 | 1617 | ||
1618 | BACKLIGHT CLASS/SUBSYSTEM | 1618 | BACKLIGHT CLASS/SUBSYSTEM |
1619 | M: Richard Purdie <rpurdie@rpsys.net> | 1619 | M: Richard Purdie <rpurdie@rpsys.net> |
1620 | M: Jingoo Han <jg1.han@samsung.com> | ||
1620 | S: Maintained | 1621 | S: Maintained |
1621 | F: drivers/video/backlight/ | 1622 | F: drivers/video/backlight/ |
1622 | F: include/linux/backlight.h | 1623 | F: include/linux/backlight.h |
@@ -5974,8 +5975,10 @@ M: Willem Riede <osst@riede.org> | |||
5974 | L: osst-users@lists.sourceforge.net | 5975 | L: osst-users@lists.sourceforge.net |
5975 | L: linux-scsi@vger.kernel.org | 5976 | L: linux-scsi@vger.kernel.org |
5976 | S: Maintained | 5977 | S: Maintained |
5977 | F: drivers/scsi/osst* | 5978 | F: Documentation/scsi/osst.txt |
5978 | F: drivers/scsi/st* | 5979 | F: drivers/scsi/osst.* |
5980 | F: drivers/scsi/osst_*.h | ||
5981 | F: drivers/scsi/st.h | ||
5979 | 5982 | ||
5980 | OPENCORES I2C BUS DRIVER | 5983 | OPENCORES I2C BUS DRIVER |
5981 | M: Peter Korsgaard <jacmet@sunsite.dk> | 5984 | M: Peter Korsgaard <jacmet@sunsite.dk> |
@@ -7133,7 +7136,8 @@ M: Kai Mäkisara <Kai.Makisara@kolumbus.fi> | |||
7133 | L: linux-scsi@vger.kernel.org | 7136 | L: linux-scsi@vger.kernel.org |
7134 | S: Maintained | 7137 | S: Maintained |
7135 | F: Documentation/scsi/st.txt | 7138 | F: Documentation/scsi/st.txt |
7136 | F: drivers/scsi/st* | 7139 | F: drivers/scsi/st.* |
7140 | F: drivers/scsi/st_*.h | ||
7137 | 7141 | ||
7138 | SCTP PROTOCOL | 7142 | SCTP PROTOCOL |
7139 | M: Vlad Yasevich <vyasevich@gmail.com> | 7143 | M: Vlad Yasevich <vyasevich@gmail.com> |
diff --git a/arch/Kconfig b/arch/Kconfig index a4429bcd609e..8d2ae24b9f4a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -365,6 +365,9 @@ config HAVE_IRQ_TIME_ACCOUNTING | |||
365 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE | 365 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE |
366 | bool | 366 | bool |
367 | 367 | ||
368 | config HAVE_ARCH_SOFT_DIRTY | ||
369 | bool | ||
370 | |||
368 | config HAVE_MOD_ARCH_SPECIFIC | 371 | config HAVE_MOD_ARCH_SPECIFIC |
369 | bool | 372 | bool |
370 | help | 373 | help |
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index c5b5d6bac9ed..14ce27bccd24 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h | |||
@@ -71,8 +71,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | |||
71 | 71 | ||
72 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 72 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
73 | 73 | ||
74 | #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) | ||
75 | |||
76 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) | 74 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) |
77 | #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32)) | 75 | #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32)) |
78 | #define pte_pfn(pte) (pte_val(pte) >> 32) | 76 | #define pte_pfn(pte) (pte_val(pte) >> 32) |
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 1d4aabfcf9a1..837c0fa58317 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c | |||
@@ -238,8 +238,8 @@ nautilus_init_pci(void) | |||
238 | if (pci_mem < memtop) | 238 | if (pci_mem < memtop) |
239 | memtop = pci_mem; | 239 | memtop = pci_mem; |
240 | if (memtop > alpha_mv.min_mem_address) { | 240 | if (memtop > alpha_mv.min_mem_address) { |
241 | free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address), | 241 | free_reserved_area(__va(alpha_mv.min_mem_address), |
242 | (unsigned long)__va(memtop), 0, NULL); | 242 | __va(memtop), -1, NULL); |
243 | printk("nautilus_init_pci: %ldk freed\n", | 243 | printk("nautilus_init_pci: %ldk freed\n", |
244 | (memtop - alpha_mv.min_mem_address) >> 10); | 244 | (memtop - alpha_mv.min_mem_address) >> 10); |
245 | } | 245 | } |
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 0ba85ee4a466..a1bea91df56a 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c | |||
@@ -276,56 +276,25 @@ srm_paging_stop (void) | |||
276 | } | 276 | } |
277 | #endif | 277 | #endif |
278 | 278 | ||
279 | #ifndef CONFIG_DISCONTIGMEM | ||
280 | static void __init | ||
281 | printk_memory_info(void) | ||
282 | { | ||
283 | unsigned long codesize, reservedpages, datasize, initsize, tmp; | ||
284 | extern int page_is_ram(unsigned long) __init; | ||
285 | |||
286 | /* printk all informations */ | ||
287 | reservedpages = 0; | ||
288 | for (tmp = 0; tmp < max_low_pfn; tmp++) | ||
289 | /* | ||
290 | * Only count reserved RAM pages | ||
291 | */ | ||
292 | if (page_is_ram(tmp) && PageReserved(mem_map+tmp)) | ||
293 | reservedpages++; | ||
294 | |||
295 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
296 | datasize = (unsigned long) &_edata - (unsigned long) &_data; | ||
297 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
298 | |||
299 | printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n", | ||
300 | nr_free_pages() << (PAGE_SHIFT-10), | ||
301 | max_mapnr << (PAGE_SHIFT-10), | ||
302 | codesize >> 10, | ||
303 | reservedpages << (PAGE_SHIFT-10), | ||
304 | datasize >> 10, | ||
305 | initsize >> 10); | ||
306 | } | ||
307 | |||
308 | void __init | 279 | void __init |
309 | mem_init(void) | 280 | mem_init(void) |
310 | { | 281 | { |
311 | max_mapnr = num_physpages = max_low_pfn; | 282 | set_max_mapnr(max_low_pfn); |
312 | totalram_pages += free_all_bootmem(); | ||
313 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 283 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
314 | 284 | free_all_bootmem(); | |
315 | printk_memory_info(); | 285 | mem_init_print_info(NULL); |
316 | } | 286 | } |
317 | #endif /* CONFIG_DISCONTIGMEM */ | ||
318 | 287 | ||
319 | void | 288 | void |
320 | free_initmem(void) | 289 | free_initmem(void) |
321 | { | 290 | { |
322 | free_initmem_default(0); | 291 | free_initmem_default(-1); |
323 | } | 292 | } |
324 | 293 | ||
325 | #ifdef CONFIG_BLK_DEV_INITRD | 294 | #ifdef CONFIG_BLK_DEV_INITRD |
326 | void | 295 | void |
327 | free_initrd_mem(unsigned long start, unsigned long end) | 296 | free_initrd_mem(unsigned long start, unsigned long end) |
328 | { | 297 | { |
329 | free_reserved_area(start, end, 0, "initrd"); | 298 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
330 | } | 299 | } |
331 | #endif | 300 | #endif |
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 33885048fa36..d543d71c28b4 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c | |||
@@ -129,8 +129,6 @@ setup_memory_node(int nid, void *kernel_end) | |||
129 | if (node_max_pfn > max_low_pfn) | 129 | if (node_max_pfn > max_low_pfn) |
130 | max_pfn = max_low_pfn = node_max_pfn; | 130 | max_pfn = max_low_pfn = node_max_pfn; |
131 | 131 | ||
132 | num_physpages += node_max_pfn - node_min_pfn; | ||
133 | |||
134 | #if 0 /* we'll try this one again in a little while */ | 132 | #if 0 /* we'll try this one again in a little while */ |
135 | /* Cute trick to make sure our local node data is on local memory */ | 133 | /* Cute trick to make sure our local node data is on local memory */ |
136 | node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); | 134 | node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); |
@@ -321,41 +319,3 @@ void __init paging_init(void) | |||
321 | /* Initialize the kernel's ZERO_PGE. */ | 319 | /* Initialize the kernel's ZERO_PGE. */ |
322 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); | 320 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); |
323 | } | 321 | } |
324 | |||
325 | void __init mem_init(void) | ||
326 | { | ||
327 | unsigned long codesize, reservedpages, datasize, initsize, pfn; | ||
328 | extern int page_is_ram(unsigned long) __init; | ||
329 | unsigned long nid, i; | ||
330 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); | ||
331 | |||
332 | reservedpages = 0; | ||
333 | for_each_online_node(nid) { | ||
334 | /* | ||
335 | * This will free up the bootmem, ie, slot 0 memory | ||
336 | */ | ||
337 | totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); | ||
338 | |||
339 | pfn = NODE_DATA(nid)->node_start_pfn; | ||
340 | for (i = 0; i < node_spanned_pages(nid); i++, pfn++) | ||
341 | if (page_is_ram(pfn) && | ||
342 | PageReserved(nid_page_nr(nid, i))) | ||
343 | reservedpages++; | ||
344 | } | ||
345 | |||
346 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
347 | datasize = (unsigned long) &_edata - (unsigned long) &_data; | ||
348 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
349 | |||
350 | printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " | ||
351 | "%luk data, %luk init)\n", | ||
352 | nr_free_pages() << (PAGE_SHIFT-10), | ||
353 | num_physpages << (PAGE_SHIFT-10), | ||
354 | codesize >> 10, | ||
355 | reservedpages << (PAGE_SHIFT-10), | ||
356 | datasize >> 10, | ||
357 | initsize >> 10); | ||
358 | #if 0 | ||
359 | mem_stress(); | ||
360 | #endif | ||
361 | } | ||
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 4a177365b2c4..a08ce7185423 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c | |||
@@ -74,7 +74,7 @@ void __init setup_arch_memory(void) | |||
74 | /* Last usable page of low mem (no HIGHMEM yet for ARC port) */ | 74 | /* Last usable page of low mem (no HIGHMEM yet for ARC port) */ |
75 | max_low_pfn = max_pfn = PFN_DOWN(end_mem); | 75 | max_low_pfn = max_pfn = PFN_DOWN(end_mem); |
76 | 76 | ||
77 | max_mapnr = num_physpages = max_low_pfn - min_low_pfn; | 77 | max_mapnr = max_low_pfn - min_low_pfn; |
78 | 78 | ||
79 | /*------------- reserve kernel image -----------------------*/ | 79 | /*------------- reserve kernel image -----------------------*/ |
80 | memblock_reserve(CONFIG_LINUX_LINK_BASE, | 80 | memblock_reserve(CONFIG_LINUX_LINK_BASE, |
@@ -84,7 +84,7 @@ void __init setup_arch_memory(void) | |||
84 | 84 | ||
85 | /*-------------- node setup --------------------------------*/ | 85 | /*-------------- node setup --------------------------------*/ |
86 | memset(zones_size, 0, sizeof(zones_size)); | 86 | memset(zones_size, 0, sizeof(zones_size)); |
87 | zones_size[ZONE_NORMAL] = num_physpages; | 87 | zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * We can't use the helper free_area_init(zones[]) because it uses | 90 | * We can't use the helper free_area_init(zones[]) because it uses |
@@ -106,39 +106,9 @@ void __init setup_arch_memory(void) | |||
106 | */ | 106 | */ |
107 | void __init mem_init(void) | 107 | void __init mem_init(void) |
108 | { | 108 | { |
109 | int codesize, datasize, initsize, reserved_pages, free_pages; | ||
110 | int tmp; | ||
111 | |||
112 | high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz); | 109 | high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz); |
113 | 110 | free_all_bootmem(); | |
114 | totalram_pages = free_all_bootmem(); | 111 | mem_init_print_info(NULL); |
115 | |||
116 | /* count all reserved pages [kernel code/data/mem_map..] */ | ||
117 | reserved_pages = 0; | ||
118 | for (tmp = 0; tmp < max_mapnr; tmp++) | ||
119 | if (PageReserved(mem_map + tmp)) | ||
120 | reserved_pages++; | ||
121 | |||
122 | /* XXX: nr_free_pages() is equivalent */ | ||
123 | free_pages = max_mapnr - reserved_pages; | ||
124 | |||
125 | /* | ||
126 | * For the purpose of display below, split the "reserve mem" | ||
127 | * kernel code/data is already shown explicitly, | ||
128 | * Show any other reservations (mem_map[ ] et al) | ||
129 | */ | ||
130 | reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >> | ||
131 | PAGE_SHIFT); | ||
132 | |||
133 | codesize = _etext - _text; | ||
134 | datasize = _end - _etext; | ||
135 | initsize = __init_end - __init_begin; | ||
136 | |||
137 | pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n", | ||
138 | PAGES_TO_MB(free_pages), | ||
139 | TO_MB(arc_mem_sz), | ||
140 | TO_KB(codesize), TO_KB(datasize), TO_KB(initsize), | ||
141 | PAGES_TO_KB(reserved_pages)); | ||
142 | } | 112 | } |
143 | 113 | ||
144 | /* | 114 | /* |
@@ -146,13 +116,13 @@ void __init mem_init(void) | |||
146 | */ | 116 | */ |
147 | void __init_refok free_initmem(void) | 117 | void __init_refok free_initmem(void) |
148 | { | 118 | { |
149 | free_initmem_default(0); | 119 | free_initmem_default(-1); |
150 | } | 120 | } |
151 | 121 | ||
152 | #ifdef CONFIG_BLK_DEV_INITRD | 122 | #ifdef CONFIG_BLK_DEV_INITRD |
153 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 123 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
154 | { | 124 | { |
155 | free_reserved_area(start, end, 0, "initrd"); | 125 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
156 | } | 126 | } |
157 | #endif | 127 | #endif |
158 | 128 | ||
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi index 7d1a27949c13..9866cd736dee 100644 --- a/arch/arm/boot/dts/atlas6.dtsi +++ b/arch/arm/boot/dts/atlas6.dtsi | |||
@@ -613,7 +613,7 @@ | |||
613 | }; | 613 | }; |
614 | 614 | ||
615 | rtc-iobg { | 615 | rtc-iobg { |
616 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; | 616 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; |
617 | #address-cells = <1>; | 617 | #address-cells = <1>; |
618 | #size-cells = <1>; | 618 | #size-cells = <1>; |
619 | reg = <0x80030000 0x10000>; | 619 | reg = <0x80030000 0x10000>; |
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 02edd8965f8a..05e9489cf95c 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi | |||
@@ -610,7 +610,7 @@ | |||
610 | }; | 610 | }; |
611 | 611 | ||
612 | rtc-iobg { | 612 | rtc-iobg { |
613 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; | 613 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; |
614 | #address-cells = <1>; | 614 | #address-cells = <1>; |
615 | #size-cells = <1>; | 615 | #size-cells = <1>; |
616 | reg = <0x80030000 0x10000>; | 616 | reg = <0x80030000 0x10000>; |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 584786f740f9..e750a938fd3c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -276,12 +276,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
276 | /* | 276 | /* |
277 | * Conversion between a struct page and a physical address. | 277 | * Conversion between a struct page and a physical address. |
278 | * | 278 | * |
279 | * Note: when converting an unknown physical address to a | ||
280 | * struct page, the resulting pointer must be validated | ||
281 | * using VALID_PAGE(). It must return an invalid struct page | ||
282 | * for any physical address not corresponding to a system | ||
283 | * RAM address. | ||
284 | * | ||
285 | * page_to_pfn(page) convert a struct page * to a PFN number | 279 | * page_to_pfn(page) convert a struct page * to a PFN number |
286 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * | 280 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * |
287 | * | 281 | * |
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 3181f61ea63e..1c5bd7637b05 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c | |||
@@ -469,7 +469,6 @@ void __init iop13xx_platform_init(void) | |||
469 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); | 469 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); |
470 | dma_cap_set(DMA_XOR, plat_data->cap_mask); | 470 | dma_cap_set(DMA_XOR, plat_data->cap_mask); |
471 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); | 471 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); |
472 | dma_cap_set(DMA_MEMSET, plat_data->cap_mask); | ||
473 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); | 472 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); |
474 | break; | 473 | break; |
475 | case IOP13XX_INIT_ADMA_1: | 474 | case IOP13XX_INIT_ADMA_1: |
@@ -479,7 +478,6 @@ void __init iop13xx_platform_init(void) | |||
479 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); | 478 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); |
480 | dma_cap_set(DMA_XOR, plat_data->cap_mask); | 479 | dma_cap_set(DMA_XOR, plat_data->cap_mask); |
481 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); | 480 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); |
482 | dma_cap_set(DMA_MEMSET, plat_data->cap_mask); | ||
483 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); | 481 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); |
484 | break; | 482 | break; |
485 | case IOP13XX_INIT_ADMA_2: | 483 | case IOP13XX_INIT_ADMA_2: |
@@ -489,7 +487,6 @@ void __init iop13xx_platform_init(void) | |||
489 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); | 487 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); |
490 | dma_cap_set(DMA_XOR, plat_data->cap_mask); | 488 | dma_cap_set(DMA_XOR, plat_data->cap_mask); |
491 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); | 489 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); |
492 | dma_cap_set(DMA_MEMSET, plat_data->cap_mask); | ||
493 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); | 490 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); |
494 | dma_cap_set(DMA_PQ, plat_data->cap_mask); | 491 | dma_cap_set(DMA_PQ, plat_data->cap_mask); |
495 | dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); | 492 | dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2ffee02d1d5c..6833cbead6cc 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -583,9 +583,6 @@ static void __init free_highpages(void) | |||
583 | */ | 583 | */ |
584 | void __init mem_init(void) | 584 | void __init mem_init(void) |
585 | { | 585 | { |
586 | unsigned long reserved_pages, free_pages; | ||
587 | struct memblock_region *reg; | ||
588 | int i; | ||
589 | #ifdef CONFIG_HAVE_TCM | 586 | #ifdef CONFIG_HAVE_TCM |
590 | /* These pointers are filled in on TCM detection */ | 587 | /* These pointers are filled in on TCM detection */ |
591 | extern u32 dtcm_end; | 588 | extern u32 dtcm_end; |
@@ -596,57 +593,16 @@ void __init mem_init(void) | |||
596 | 593 | ||
597 | /* this will put all unused low memory onto the freelists */ | 594 | /* this will put all unused low memory onto the freelists */ |
598 | free_unused_memmap(&meminfo); | 595 | free_unused_memmap(&meminfo); |
599 | 596 | free_all_bootmem(); | |
600 | totalram_pages += free_all_bootmem(); | ||
601 | 597 | ||
602 | #ifdef CONFIG_SA1111 | 598 | #ifdef CONFIG_SA1111 |
603 | /* now that our DMA memory is actually so designated, we can free it */ | 599 | /* now that our DMA memory is actually so designated, we can free it */ |
604 | free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); | 600 | free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, -1, NULL); |
605 | #endif | 601 | #endif |
606 | 602 | ||
607 | free_highpages(); | 603 | free_highpages(); |
608 | 604 | ||
609 | reserved_pages = free_pages = 0; | 605 | mem_init_print_info(NULL); |
610 | |||
611 | for_each_bank(i, &meminfo) { | ||
612 | struct membank *bank = &meminfo.bank[i]; | ||
613 | unsigned int pfn1, pfn2; | ||
614 | struct page *page, *end; | ||
615 | |||
616 | pfn1 = bank_pfn_start(bank); | ||
617 | pfn2 = bank_pfn_end(bank); | ||
618 | |||
619 | page = pfn_to_page(pfn1); | ||
620 | end = pfn_to_page(pfn2 - 1) + 1; | ||
621 | |||
622 | do { | ||
623 | if (PageReserved(page)) | ||
624 | reserved_pages++; | ||
625 | else if (!page_count(page)) | ||
626 | free_pages++; | ||
627 | page++; | ||
628 | } while (page < end); | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * Since our memory may not be contiguous, calculate the | ||
633 | * real number of pages we have in this system | ||
634 | */ | ||
635 | printk(KERN_INFO "Memory:"); | ||
636 | num_physpages = 0; | ||
637 | for_each_memblock(memory, reg) { | ||
638 | unsigned long pages = memblock_region_memory_end_pfn(reg) - | ||
639 | memblock_region_memory_base_pfn(reg); | ||
640 | num_physpages += pages; | ||
641 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
642 | } | ||
643 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
644 | |||
645 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", | ||
646 | nr_free_pages() << (PAGE_SHIFT-10), | ||
647 | free_pages << (PAGE_SHIFT-10), | ||
648 | reserved_pages << (PAGE_SHIFT-10), | ||
649 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
650 | 606 | ||
651 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | 607 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 |
652 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | 608 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 |
@@ -712,7 +668,7 @@ void __init mem_init(void) | |||
712 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | 668 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); |
713 | #endif | 669 | #endif |
714 | 670 | ||
715 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 671 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
716 | extern int sysctl_overcommit_memory; | 672 | extern int sysctl_overcommit_memory; |
717 | /* | 673 | /* |
718 | * On a machine this small we won't get | 674 | * On a machine this small we won't get |
@@ -729,12 +685,12 @@ void free_initmem(void) | |||
729 | extern char __tcm_start, __tcm_end; | 685 | extern char __tcm_start, __tcm_end; |
730 | 686 | ||
731 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | 687 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); |
732 | free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); | 688 | free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); |
733 | #endif | 689 | #endif |
734 | 690 | ||
735 | poison_init_mem(__init_begin, __init_end - __init_begin); | 691 | poison_init_mem(__init_begin, __init_end - __init_begin); |
736 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 692 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
737 | free_initmem_default(0); | 693 | free_initmem_default(-1); |
738 | } | 694 | } |
739 | 695 | ||
740 | #ifdef CONFIG_BLK_DEV_INITRD | 696 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -745,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
745 | { | 701 | { |
746 | if (!keep_initrd) { | 702 | if (!keep_initrd) { |
747 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | 703 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); |
748 | free_reserved_area(start, end, 0, "initrd"); | 704 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
749 | } | 705 | } |
750 | } | 706 | } |
751 | 707 | ||
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index 1ff6a37e893c..a4d1f8de3b5b 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c | |||
@@ -192,12 +192,10 @@ static int __init iop3xx_adma_cap_init(void) | |||
192 | 192 | ||
193 | #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ | 193 | #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ |
194 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); | 194 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); |
195 | dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); | ||
196 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); | 195 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); |
197 | #else | 196 | #else |
198 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); | 197 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); |
199 | dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); | 198 | dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); |
200 | dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); | ||
201 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); | 199 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); |
202 | #endif | 200 | #endif |
203 | 201 | ||
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index c019b7aaf776..c66d163d7a2a 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
@@ -666,14 +666,9 @@ void __init orion_xor0_init(unsigned long mapbase_low, | |||
666 | orion_xor0_shared_resources[3].start = irq_1; | 666 | orion_xor0_shared_resources[3].start = irq_1; |
667 | orion_xor0_shared_resources[3].end = irq_1; | 667 | orion_xor0_shared_resources[3].end = irq_1; |
668 | 668 | ||
669 | /* | ||
670 | * two engines can't do memset simultaneously, this limitation | ||
671 | * satisfied by removing memset support from one of the engines. | ||
672 | */ | ||
673 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); | 669 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); |
674 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); | 670 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); |
675 | 671 | ||
676 | dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask); | ||
677 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); | 672 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); |
678 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); | 673 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); |
679 | 674 | ||
@@ -732,14 +727,9 @@ void __init orion_xor1_init(unsigned long mapbase_low, | |||
732 | orion_xor1_shared_resources[3].start = irq_1; | 727 | orion_xor1_shared_resources[3].start = irq_1; |
733 | orion_xor1_shared_resources[3].end = irq_1; | 728 | orion_xor1_shared_resources[3].end = irq_1; |
734 | 729 | ||
735 | /* | ||
736 | * two engines can't do memset simultaneously, this limitation | ||
737 | * satisfied by removing memset support from one of the engines. | ||
738 | */ | ||
739 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); | 730 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); |
740 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); | 731 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); |
741 | 732 | ||
742 | dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask); | ||
743 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); | 733 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); |
744 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); | 734 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); |
745 | 735 | ||
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f497ca77925a..67e8d7ce3fe7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -197,14 +197,6 @@ void __init bootmem_init(void) | |||
197 | max_pfn = max_low_pfn = max; | 197 | max_pfn = max_low_pfn = max; |
198 | } | 198 | } |
199 | 199 | ||
200 | /* | ||
201 | * Poison init memory with an undefined instruction (0x0). | ||
202 | */ | ||
203 | static inline void poison_init_mem(void *s, size_t count) | ||
204 | { | ||
205 | memset(s, 0, count); | ||
206 | } | ||
207 | |||
208 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 200 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
209 | static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 201 | static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
210 | { | 202 | { |
@@ -280,59 +272,17 @@ static void __init free_unused_memmap(void) | |||
280 | */ | 272 | */ |
281 | void __init mem_init(void) | 273 | void __init mem_init(void) |
282 | { | 274 | { |
283 | unsigned long reserved_pages, free_pages; | ||
284 | struct memblock_region *reg; | ||
285 | |||
286 | arm64_swiotlb_init(); | 275 | arm64_swiotlb_init(); |
287 | 276 | ||
288 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; | 277 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; |
289 | 278 | ||
290 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 279 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
291 | /* this will put all unused low memory onto the freelists */ | ||
292 | free_unused_memmap(); | 280 | free_unused_memmap(); |
293 | #endif | 281 | #endif |
282 | /* this will put all unused low memory onto the freelists */ | ||
283 | free_all_bootmem(); | ||
294 | 284 | ||
295 | totalram_pages += free_all_bootmem(); | 285 | mem_init_print_info(NULL); |
296 | |||
297 | reserved_pages = free_pages = 0; | ||
298 | |||
299 | for_each_memblock(memory, reg) { | ||
300 | unsigned int pfn1, pfn2; | ||
301 | struct page *page, *end; | ||
302 | |||
303 | pfn1 = __phys_to_pfn(reg->base); | ||
304 | pfn2 = pfn1 + __phys_to_pfn(reg->size); | ||
305 | |||
306 | page = pfn_to_page(pfn1); | ||
307 | end = pfn_to_page(pfn2 - 1) + 1; | ||
308 | |||
309 | do { | ||
310 | if (PageReserved(page)) | ||
311 | reserved_pages++; | ||
312 | else if (!page_count(page)) | ||
313 | free_pages++; | ||
314 | page++; | ||
315 | } while (page < end); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Since our memory may not be contiguous, calculate the real number | ||
320 | * of pages we have in this system. | ||
321 | */ | ||
322 | pr_info("Memory:"); | ||
323 | num_physpages = 0; | ||
324 | for_each_memblock(memory, reg) { | ||
325 | unsigned long pages = memblock_region_memory_end_pfn(reg) - | ||
326 | memblock_region_memory_base_pfn(reg); | ||
327 | num_physpages += pages; | ||
328 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
329 | } | ||
330 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
331 | |||
332 | pr_notice("Memory: %luk/%luk available, %luk reserved\n", | ||
333 | nr_free_pages() << (PAGE_SHIFT-10), | ||
334 | free_pages << (PAGE_SHIFT-10), | ||
335 | reserved_pages << (PAGE_SHIFT-10)); | ||
336 | 286 | ||
337 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | 287 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 |
338 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | 288 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 |
@@ -374,7 +324,7 @@ void __init mem_init(void) | |||
374 | BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR); | 324 | BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR); |
375 | BUG_ON(TASK_SIZE_64 > MODULES_VADDR); | 325 | BUG_ON(TASK_SIZE_64 > MODULES_VADDR); |
376 | 326 | ||
377 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 327 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
378 | extern int sysctl_overcommit_memory; | 328 | extern int sysctl_overcommit_memory; |
379 | /* | 329 | /* |
380 | * On a machine this small we won't get anywhere without | 330 | * On a machine this small we won't get anywhere without |
@@ -386,7 +336,6 @@ void __init mem_init(void) | |||
386 | 336 | ||
387 | void free_initmem(void) | 337 | void free_initmem(void) |
388 | { | 338 | { |
389 | poison_init_mem(__init_begin, __init_end - __init_begin); | ||
390 | free_initmem_default(0); | 339 | free_initmem_default(0); |
391 | } | 340 | } |
392 | 341 | ||
@@ -396,10 +345,8 @@ static int keep_initrd; | |||
396 | 345 | ||
397 | void free_initrd_mem(unsigned long start, unsigned long end) | 346 | void free_initrd_mem(unsigned long start, unsigned long end) |
398 | { | 347 | { |
399 | if (!keep_initrd) { | 348 | if (!keep_initrd) |
400 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | 349 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
401 | free_reserved_area(start, end, 0, "initrd"); | ||
402 | } | ||
403 | } | 350 | } |
404 | 351 | ||
405 | static int __init keepinitrd_setup(char *__unused) | 352 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index b4247f478065..209ae5ad3495 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c | |||
@@ -555,7 +555,7 @@ void __init setup_arch (char **cmdline_p) | |||
555 | { | 555 | { |
556 | struct clk *cpu_clk; | 556 | struct clk *cpu_clk; |
557 | 557 | ||
558 | init_mm.start_code = (unsigned long)_text; | 558 | init_mm.start_code = (unsigned long)_stext; |
559 | init_mm.end_code = (unsigned long)_etext; | 559 | init_mm.end_code = (unsigned long)_etext; |
560 | init_mm.end_data = (unsigned long)_edata; | 560 | init_mm.end_data = (unsigned long)_edata; |
561 | init_mm.brk = (unsigned long)_end; | 561 | init_mm.brk = (unsigned long)_end; |
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index 9cd2bd91d64a..a4589176bed5 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S | |||
@@ -23,7 +23,7 @@ SECTIONS | |||
23 | { | 23 | { |
24 | . = CONFIG_ENTRY_ADDRESS; | 24 | . = CONFIG_ENTRY_ADDRESS; |
25 | .init : AT(ADDR(.init) - LOAD_OFFSET) { | 25 | .init : AT(ADDR(.init) - LOAD_OFFSET) { |
26 | _stext = .; | 26 | _text = .; |
27 | __init_begin = .; | 27 | __init_begin = .; |
28 | _sinittext = .; | 28 | _sinittext = .; |
29 | *(.text.reset) | 29 | *(.text.reset) |
@@ -46,7 +46,7 @@ SECTIONS | |||
46 | 46 | ||
47 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 47 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
48 | _evba = .; | 48 | _evba = .; |
49 | _text = .; | 49 | _stext = .; |
50 | *(.ex.text) | 50 | *(.ex.text) |
51 | *(.irq.text) | 51 | *(.irq.text) |
52 | KPROBES_TEXT | 52 | KPROBES_TEXT |
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index e66e8406f992..def5391d927a 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c | |||
@@ -100,60 +100,26 @@ void __init paging_init(void) | |||
100 | 100 | ||
101 | void __init mem_init(void) | 101 | void __init mem_init(void) |
102 | { | 102 | { |
103 | int codesize, reservedpages, datasize, initsize; | 103 | pg_data_t *pgdat; |
104 | int nid, i; | ||
105 | 104 | ||
106 | reservedpages = 0; | ||
107 | high_memory = NULL; | 105 | high_memory = NULL; |
106 | for_each_online_pgdat(pgdat) | ||
107 | high_memory = max_t(void *, high_memory, | ||
108 | __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); | ||
108 | 109 | ||
109 | /* this will put all low memory onto the freelists */ | 110 | set_max_mapnr(MAP_NR(high_memory)); |
110 | for_each_online_node(nid) { | 111 | free_all_bootmem(); |
111 | pg_data_t *pgdat = NODE_DATA(nid); | 112 | mem_init_print_info(NULL); |
112 | unsigned long node_pages = 0; | ||
113 | void *node_high_memory; | ||
114 | |||
115 | num_physpages += pgdat->node_present_pages; | ||
116 | |||
117 | if (pgdat->node_spanned_pages != 0) | ||
118 | node_pages = free_all_bootmem_node(pgdat); | ||
119 | |||
120 | totalram_pages += node_pages; | ||
121 | |||
122 | for (i = 0; i < node_pages; i++) | ||
123 | if (PageReserved(pgdat->node_mem_map + i)) | ||
124 | reservedpages++; | ||
125 | |||
126 | node_high_memory = (void *)((pgdat->node_start_pfn | ||
127 | + pgdat->node_spanned_pages) | ||
128 | << PAGE_SHIFT); | ||
129 | if (node_high_memory > high_memory) | ||
130 | high_memory = node_high_memory; | ||
131 | } | ||
132 | |||
133 | max_mapnr = MAP_NR(high_memory); | ||
134 | |||
135 | codesize = (unsigned long)_etext - (unsigned long)_text; | ||
136 | datasize = (unsigned long)_edata - (unsigned long)_data; | ||
137 | initsize = (unsigned long)__init_end - (unsigned long)__init_begin; | ||
138 | |||
139 | printk ("Memory: %luk/%luk available (%dk kernel code, " | ||
140 | "%dk reserved, %dk data, %dk init)\n", | ||
141 | nr_free_pages() << (PAGE_SHIFT - 10), | ||
142 | totalram_pages << (PAGE_SHIFT - 10), | ||
143 | codesize >> 10, | ||
144 | reservedpages << (PAGE_SHIFT - 10), | ||
145 | datasize >> 10, | ||
146 | initsize >> 10); | ||
147 | } | 113 | } |
148 | 114 | ||
149 | void free_initmem(void) | 115 | void free_initmem(void) |
150 | { | 116 | { |
151 | free_initmem_default(0); | 117 | free_initmem_default(-1); |
152 | } | 118 | } |
153 | 119 | ||
154 | #ifdef CONFIG_BLK_DEV_INITRD | 120 | #ifdef CONFIG_BLK_DEV_INITRD |
155 | void free_initrd_mem(unsigned long start, unsigned long end) | 121 | void free_initrd_mem(unsigned long start, unsigned long end) |
156 | { | 122 | { |
157 | free_reserved_area(start, end, 0, "initrd"); | 123 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
158 | } | 124 | } |
159 | #endif | 125 | #endif |
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index 82d01a71207f..166842de3dc7 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c | |||
@@ -90,50 +90,24 @@ asmlinkage void __init init_pda(void) | |||
90 | 90 | ||
91 | void __init mem_init(void) | 91 | void __init mem_init(void) |
92 | { | 92 | { |
93 | unsigned int codek = 0, datak = 0, initk = 0; | 93 | char buf[64]; |
94 | unsigned int reservedpages = 0, freepages = 0; | ||
95 | unsigned long tmp; | ||
96 | unsigned long start_mem = memory_start; | ||
97 | unsigned long end_mem = memory_end; | ||
98 | 94 | ||
99 | end_mem &= PAGE_MASK; | 95 | high_memory = (void *)(memory_end & PAGE_MASK); |
100 | high_memory = (void *)end_mem; | 96 | max_mapnr = MAP_NR(high_memory); |
101 | 97 | printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", max_mapnr); | |
102 | start_mem = PAGE_ALIGN(start_mem); | ||
103 | max_mapnr = num_physpages = MAP_NR(high_memory); | ||
104 | printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); | ||
105 | 98 | ||
106 | /* This will put all low memory onto the freelists. */ | 99 | /* This will put all low memory onto the freelists. */ |
107 | totalram_pages = free_all_bootmem(); | 100 | free_all_bootmem(); |
108 | |||
109 | reservedpages = 0; | ||
110 | for (tmp = ARCH_PFN_OFFSET; tmp < max_mapnr; tmp++) | ||
111 | if (PageReserved(pfn_to_page(tmp))) | ||
112 | reservedpages++; | ||
113 | freepages = max_mapnr - ARCH_PFN_OFFSET - reservedpages; | ||
114 | |||
115 | /* do not count in kernel image between _rambase and _ramstart */ | ||
116 | reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT; | ||
117 | #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) | ||
118 | reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT; | ||
119 | #endif | ||
120 | |||
121 | codek = (_etext - _stext) >> 10; | ||
122 | initk = (__init_end - __init_begin) >> 10; | ||
123 | datak = ((_ramstart - _rambase) >> 10) - codek - initk; | ||
124 | 101 | ||
125 | printk(KERN_INFO | 102 | snprintf(buf, sizeof(buf) - 1, "%uK DMA", DMA_UNCACHED_REGION >> 10); |
126 | "Memory available: %luk/%luk RAM, " | 103 | mem_init_print_info(buf); |
127 | "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", | ||
128 | (unsigned long) freepages << (PAGE_SHIFT-10), (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 10, | ||
129 | initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); | ||
130 | } | 104 | } |
131 | 105 | ||
132 | #ifdef CONFIG_BLK_DEV_INITRD | 106 | #ifdef CONFIG_BLK_DEV_INITRD |
133 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 107 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
134 | { | 108 | { |
135 | #ifndef CONFIG_MPU | 109 | #ifndef CONFIG_MPU |
136 | free_reserved_area(start, end, 0, "initrd"); | 110 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
137 | #endif | 111 | #endif |
138 | } | 112 | } |
139 | #endif | 113 | #endif |
@@ -141,7 +115,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
141 | void __init_refok free_initmem(void) | 115 | void __init_refok free_initmem(void) |
142 | { | 116 | { |
143 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU | 117 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU |
144 | free_initmem_default(0); | 118 | free_initmem_default(-1); |
145 | if (memory_start == (unsigned long)(&__init_end)) | 119 | if (memory_start == (unsigned long)(&__init_end)) |
146 | memory_start = (unsigned long)(&__init_begin); | 120 | memory_start = (unsigned long)(&__init_begin); |
147 | #endif | 121 | #endif |
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 1d81c4c129ec..279d80725128 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S | |||
@@ -54,16 +54,15 @@ SECTIONS | |||
54 | } | 54 | } |
55 | 55 | ||
56 | . = ALIGN(PAGE_SIZE); | 56 | . = ALIGN(PAGE_SIZE); |
57 | __init_begin = .; | ||
57 | .init : | 58 | .init : |
58 | { | 59 | { |
59 | _stext = .; | ||
60 | _sinittext = .; | 60 | _sinittext = .; |
61 | HEAD_TEXT | 61 | HEAD_TEXT |
62 | INIT_TEXT | 62 | INIT_TEXT |
63 | _einittext = .; | 63 | _einittext = .; |
64 | } | 64 | } |
65 | 65 | ||
66 | __init_begin = _stext; | ||
67 | INIT_DATA_SECTION(16) | 66 | INIT_DATA_SECTION(16) |
68 | 67 | ||
69 | PERCPU_SECTION(128) | 68 | PERCPU_SECTION(128) |
@@ -74,6 +73,7 @@ SECTIONS | |||
74 | .text : | 73 | .text : |
75 | { | 74 | { |
76 | _text = .; | 75 | _text = .; |
76 | _stext = .; | ||
77 | TEXT_TEXT | 77 | TEXT_TEXT |
78 | SCHED_TEXT | 78 | SCHED_TEXT |
79 | LOCK_TEXT | 79 | LOCK_TEXT |
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c index a9fcd89b251b..63f5560d6eb2 100644 --- a/arch/c6x/mm/init.c +++ b/arch/c6x/mm/init.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/initrd.h> | 18 | #include <linux/initrd.h> |
19 | 19 | ||
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
21 | #include <asm/uaccess.h> | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * ZERO_PAGE is a special page that is used for zero-initialized | 24 | * ZERO_PAGE is a special page that is used for zero-initialized |
@@ -57,31 +58,22 @@ void __init paging_init(void) | |||
57 | 58 | ||
58 | void __init mem_init(void) | 59 | void __init mem_init(void) |
59 | { | 60 | { |
60 | int codek, datak; | ||
61 | unsigned long tmp; | ||
62 | unsigned long len = memory_end - memory_start; | ||
63 | |||
64 | high_memory = (void *)(memory_end & PAGE_MASK); | 61 | high_memory = (void *)(memory_end & PAGE_MASK); |
65 | 62 | ||
66 | /* this will put all memory onto the freelists */ | 63 | /* this will put all memory onto the freelists */ |
67 | totalram_pages = free_all_bootmem(); | 64 | free_all_bootmem(); |
68 | |||
69 | codek = (_etext - _stext) >> 10; | ||
70 | datak = (_end - _sdata) >> 10; | ||
71 | 65 | ||
72 | tmp = nr_free_pages() << PAGE_SHIFT; | 66 | mem_init_print_info(NULL); |
73 | printk(KERN_INFO "Memory: %luk/%luk RAM (%dk kernel code, %dk data)\n", | ||
74 | tmp >> 10, len >> 10, codek, datak); | ||
75 | } | 67 | } |
76 | 68 | ||
77 | #ifdef CONFIG_BLK_DEV_INITRD | 69 | #ifdef CONFIG_BLK_DEV_INITRD |
78 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 70 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
79 | { | 71 | { |
80 | free_reserved_area(start, end, 0, "initrd"); | 72 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
81 | } | 73 | } |
82 | #endif | 74 | #endif |
83 | 75 | ||
84 | void __init free_initmem(void) | 76 | void __init free_initmem(void) |
85 | { | 77 | { |
86 | free_initmem_default(0); | 78 | free_initmem_default(-1); |
87 | } | 79 | } |
diff --git a/arch/cris/include/asm/page.h b/arch/cris/include/asm/page.h index be45ee366be9..dfc53f9b88ec 100644 --- a/arch/cris/include/asm/page.h +++ b/arch/cris/include/asm/page.h | |||
@@ -51,7 +51,6 @@ typedef struct page *pgtable_t; | |||
51 | */ | 51 | */ |
52 | 52 | ||
53 | #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) | 53 | #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) |
54 | #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) | ||
55 | #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT) | 54 | #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT) |
56 | 55 | ||
57 | /* convert a page (based on mem_map and forward) to a physical address | 56 | /* convert a page (based on mem_map and forward) to a physical address |
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index 9ac80946dada..c81af5bd9167 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c | |||
@@ -19,9 +19,6 @@ unsigned long empty_zero_page; | |||
19 | void __init | 19 | void __init |
20 | mem_init(void) | 20 | mem_init(void) |
21 | { | 21 | { |
22 | int codesize, reservedpages, datasize, initsize; | ||
23 | unsigned long tmp; | ||
24 | |||
25 | BUG_ON(!mem_map); | 22 | BUG_ON(!mem_map); |
26 | 23 | ||
27 | /* max/min_low_pfn was set by setup.c | 24 | /* max/min_low_pfn was set by setup.c |
@@ -29,35 +26,9 @@ mem_init(void) | |||
29 | * | 26 | * |
30 | * high_memory was also set in setup.c | 27 | * high_memory was also set in setup.c |
31 | */ | 28 | */ |
32 | 29 | max_mapnr = max_low_pfn - min_low_pfn; | |
33 | max_mapnr = num_physpages = max_low_pfn - min_low_pfn; | 30 | free_all_bootmem(); |
34 | 31 | mem_init_print_info(NULL); | |
35 | /* this will put all memory onto the freelists */ | ||
36 | totalram_pages = free_all_bootmem(); | ||
37 | |||
38 | reservedpages = 0; | ||
39 | for (tmp = 0; tmp < max_mapnr; tmp++) { | ||
40 | /* | ||
41 | * Only count reserved RAM pages | ||
42 | */ | ||
43 | if (PageReserved(mem_map + tmp)) | ||
44 | reservedpages++; | ||
45 | } | ||
46 | |||
47 | codesize = (unsigned long) &_etext - (unsigned long) &_stext; | ||
48 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
49 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
50 | |||
51 | printk(KERN_INFO | ||
52 | "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " | ||
53 | "%dk init)\n" , | ||
54 | nr_free_pages() << (PAGE_SHIFT-10), | ||
55 | max_mapnr << (PAGE_SHIFT-10), | ||
56 | codesize >> 10, | ||
57 | reservedpages << (PAGE_SHIFT-10), | ||
58 | datasize >> 10, | ||
59 | initsize >> 10 | ||
60 | ); | ||
61 | } | 32 | } |
62 | 33 | ||
63 | /* free the pages occupied by initialization code */ | 34 | /* free the pages occupied by initialization code */ |
@@ -65,5 +36,5 @@ mem_init(void) | |||
65 | void | 36 | void |
66 | free_initmem(void) | 37 | free_initmem(void) |
67 | { | 38 | { |
68 | free_initmem_default(0); | 39 | free_initmem_default(-1); |
69 | } | 40 | } |
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c index a5136474c6fd..ae3a6706419b 100644 --- a/arch/frv/kernel/setup.c +++ b/arch/frv/kernel/setup.c | |||
@@ -735,7 +735,7 @@ static void __init parse_cmdline_early(char *cmdline) | |||
735 | /* "mem=XXX[kKmM]" sets SDRAM size to <mem>, overriding the value we worked | 735 | /* "mem=XXX[kKmM]" sets SDRAM size to <mem>, overriding the value we worked |
736 | * out from the SDRAM controller mask register | 736 | * out from the SDRAM controller mask register |
737 | */ | 737 | */ |
738 | if (!memcmp(cmdline, "mem=", 4)) { | 738 | if (!strncmp(cmdline, "mem=", 4)) { |
739 | unsigned long long mem_size; | 739 | unsigned long long mem_size; |
740 | 740 | ||
741 | mem_size = memparse(cmdline + 4, &cmdline); | 741 | mem_size = memparse(cmdline + 4, &cmdline); |
@@ -876,6 +876,7 @@ late_initcall(setup_arch_serial); | |||
876 | static void __init setup_linux_memory(void) | 876 | static void __init setup_linux_memory(void) |
877 | { | 877 | { |
878 | unsigned long bootmap_size, low_top_pfn, kstart, kend, high_mem; | 878 | unsigned long bootmap_size, low_top_pfn, kstart, kend, high_mem; |
879 | unsigned long physpages; | ||
879 | 880 | ||
880 | kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET; | 881 | kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET; |
881 | kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET; | 882 | kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET; |
@@ -893,19 +894,19 @@ static void __init setup_linux_memory(void) | |||
893 | ); | 894 | ); |
894 | 895 | ||
895 | /* pass the memory that the kernel can immediately use over to the bootmem allocator */ | 896 | /* pass the memory that the kernel can immediately use over to the bootmem allocator */ |
896 | max_mapnr = num_physpages = (memory_end - memory_start) >> PAGE_SHIFT; | 897 | max_mapnr = physpages = (memory_end - memory_start) >> PAGE_SHIFT; |
897 | low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT; | 898 | low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT; |
898 | high_mem = 0; | 899 | high_mem = 0; |
899 | 900 | ||
900 | if (num_physpages > low_top_pfn) { | 901 | if (physpages > low_top_pfn) { |
901 | #ifdef CONFIG_HIGHMEM | 902 | #ifdef CONFIG_HIGHMEM |
902 | high_mem = num_physpages - low_top_pfn; | 903 | high_mem = physpages - low_top_pfn; |
903 | #else | 904 | #else |
904 | max_mapnr = num_physpages = low_top_pfn; | 905 | max_mapnr = physpages = low_top_pfn; |
905 | #endif | 906 | #endif |
906 | } | 907 | } |
907 | else { | 908 | else { |
908 | low_top_pfn = num_physpages; | 909 | low_top_pfn = physpages; |
909 | } | 910 | } |
910 | 911 | ||
911 | min_low_pfn = memory_start >> PAGE_SHIFT; | 912 | min_low_pfn = memory_start >> PAGE_SHIFT; |
@@ -979,7 +980,7 @@ static void __init setup_uclinux_memory(void) | |||
979 | free_bootmem(memory_start, memory_end - memory_start); | 980 | free_bootmem(memory_start, memory_end - memory_start); |
980 | 981 | ||
981 | high_memory = (void *) (memory_end & PAGE_MASK); | 982 | high_memory = (void *) (memory_end & PAGE_MASK); |
982 | max_mapnr = num_physpages = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT; | 983 | max_mapnr = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT; |
983 | 984 | ||
984 | min_low_pfn = memory_start >> PAGE_SHIFT; | 985 | min_low_pfn = memory_start >> PAGE_SHIFT; |
985 | max_low_pfn = memory_end >> PAGE_SHIFT; | 986 | max_low_pfn = memory_end >> PAGE_SHIFT; |
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 4bff48c19d29..a6d105d61b26 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c | |||
@@ -523,7 +523,7 @@ void die_if_kernel(const char *str, ...) | |||
523 | return; | 523 | return; |
524 | 524 | ||
525 | va_start(va, str); | 525 | va_start(va, str); |
526 | vsprintf(buffer, str, va); | 526 | vsnprintf(buffer, sizeof(buffer), str, va); |
527 | va_end(va); | 527 | va_end(va); |
528 | 528 | ||
529 | console_verbose(); | 529 | console_verbose(); |
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index dee354fa6b64..88a159743528 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c | |||
@@ -78,7 +78,7 @@ void __init paging_init(void) | |||
78 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | 78 | memset((void *) empty_zero_page, 0, PAGE_SIZE); |
79 | 79 | ||
80 | #ifdef CONFIG_HIGHMEM | 80 | #ifdef CONFIG_HIGHMEM |
81 | if (num_physpages - num_mappedpages) { | 81 | if (get_num_physpages() - num_mappedpages) { |
82 | pgd_t *pge; | 82 | pgd_t *pge; |
83 | pud_t *pue; | 83 | pud_t *pue; |
84 | pmd_t *pme; | 84 | pmd_t *pme; |
@@ -96,7 +96,7 @@ void __init paging_init(void) | |||
96 | */ | 96 | */ |
97 | zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; | 97 | zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; |
98 | #ifdef CONFIG_HIGHMEM | 98 | #ifdef CONFIG_HIGHMEM |
99 | zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; | 99 | zones_size[ZONE_HIGHMEM] = get_num_physpages() - num_mappedpages; |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | free_area_init(zones_size); | 102 | free_area_init(zones_size); |
@@ -114,45 +114,24 @@ void __init paging_init(void) | |||
114 | */ | 114 | */ |
115 | void __init mem_init(void) | 115 | void __init mem_init(void) |
116 | { | 116 | { |
117 | unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT; | 117 | unsigned long code_size = _etext - _stext; |
118 | unsigned long tmp; | ||
119 | #ifdef CONFIG_MMU | ||
120 | unsigned long loop, pfn; | ||
121 | int datapages = 0; | ||
122 | #endif | ||
123 | int codek = 0, datak = 0; | ||
124 | 118 | ||
125 | /* this will put all low memory onto the freelists */ | 119 | /* this will put all low memory onto the freelists */ |
126 | totalram_pages = free_all_bootmem(); | 120 | free_all_bootmem(); |
127 | 121 | #if defined(CONFIG_MMU) && defined(CONFIG_HIGHMEM) | |
128 | #ifdef CONFIG_MMU | 122 | { |
129 | for (loop = 0 ; loop < npages ; loop++) | 123 | unsigned long pfn; |
130 | if (PageReserved(&mem_map[loop])) | 124 | |
131 | datapages++; | 125 | for (pfn = get_num_physpages() - 1; |
132 | 126 | pfn >= num_mappedpages; pfn--) | |
133 | #ifdef CONFIG_HIGHMEM | 127 | free_highmem_page(&mem_map[pfn]); |
134 | for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) | 128 | } |
135 | free_highmem_page(&mem_map[pfn]); | ||
136 | #endif | ||
137 | |||
138 | codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; | ||
139 | datak = datapages << (PAGE_SHIFT - 10); | ||
140 | |||
141 | #else | ||
142 | codek = (_etext - _stext) >> 10; | ||
143 | datak = 0; //(__bss_stop - _sdata) >> 10; | ||
144 | #endif | 129 | #endif |
145 | 130 | ||
146 | tmp = nr_free_pages() << PAGE_SHIFT; | 131 | mem_init_print_info(NULL); |
147 | printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n", | 132 | if (rom_length > 0 && rom_length >= code_size) |
148 | tmp >> 10, | 133 | printk("Memory available: %luKiB/%luKiB ROM\n", |
149 | npages << (PAGE_SHIFT - 10), | 134 | (rom_length - code_size) >> 10, rom_length >> 10); |
150 | (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, | ||
151 | rom_length >> 10, | ||
152 | codek, | ||
153 | datak | ||
154 | ); | ||
155 | |||
156 | } /* end mem_init() */ | 135 | } /* end mem_init() */ |
157 | 136 | ||
158 | /*****************************************************************************/ | 137 | /*****************************************************************************/ |
@@ -162,7 +141,7 @@ void __init mem_init(void) | |||
162 | void free_initmem(void) | 141 | void free_initmem(void) |
163 | { | 142 | { |
164 | #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) | 143 | #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) |
165 | free_initmem_default(0); | 144 | free_initmem_default(-1); |
166 | #endif | 145 | #endif |
167 | } /* end free_initmem() */ | 146 | } /* end free_initmem() */ |
168 | 147 | ||
@@ -173,6 +152,6 @@ void free_initmem(void) | |||
173 | #ifdef CONFIG_BLK_DEV_INITRD | 152 | #ifdef CONFIG_BLK_DEV_INITRD |
174 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 153 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
175 | { | 154 | { |
176 | free_reserved_area(start, end, 0, "initrd"); | 155 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
177 | } /* end free_initrd_mem() */ | 156 | } /* end free_initrd_mem() */ |
178 | #endif | 157 | #endif |
diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c index 51ab6cbd030f..4a1e3dd43948 100644 --- a/arch/h8300/boot/compressed/misc.c +++ b/arch/h8300/boot/compressed/misc.c | |||
@@ -79,7 +79,6 @@ static void error(char *m); | |||
79 | 79 | ||
80 | int puts(const char *); | 80 | int puts(const char *); |
81 | 81 | ||
82 | extern int _text; /* Defined in vmlinux.lds.S */ | ||
83 | extern int _end; | 82 | extern int _end; |
84 | static unsigned long free_mem_ptr; | 83 | static unsigned long free_mem_ptr; |
85 | static unsigned long free_mem_end_ptr; | 84 | static unsigned long free_mem_end_ptr; |
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 03d356d96e5d..3253fed42ac1 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S | |||
@@ -132,10 +132,12 @@ SECTIONS | |||
132 | { | 132 | { |
133 | . = ALIGN(0x4) ; | 133 | . = ALIGN(0x4) ; |
134 | __sbss = . ; | 134 | __sbss = . ; |
135 | ___bss_start = . ; | ||
135 | *(.bss*) | 136 | *(.bss*) |
136 | . = ALIGN(0x4) ; | 137 | . = ALIGN(0x4) ; |
137 | *(COMMON) | 138 | *(COMMON) |
138 | . = ALIGN(0x4) ; | 139 | . = ALIGN(0x4) ; |
140 | ___bss_stop = . ; | ||
139 | __ebss = . ; | 141 | __ebss = . ; |
140 | __end = . ; | 142 | __end = . ; |
141 | __ramstart = .; | 143 | __ramstart = .; |
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index ff349d70a29b..6c1251e491af 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c | |||
@@ -121,47 +121,27 @@ void __init paging_init(void) | |||
121 | 121 | ||
122 | void __init mem_init(void) | 122 | void __init mem_init(void) |
123 | { | 123 | { |
124 | int codek = 0, datak = 0, initk = 0; | 124 | unsigned long codesize = _etext - _stext; |
125 | /* DAVIDM look at setup memory map generically with reserved area */ | ||
126 | unsigned long tmp; | ||
127 | extern unsigned long _ramend, _ramstart; | ||
128 | unsigned long len = &_ramend - &_ramstart; | ||
129 | unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ | ||
130 | unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */ | ||
131 | 125 | ||
132 | #ifdef DEBUG | 126 | pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end); |
133 | printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem); | ||
134 | #endif | ||
135 | 127 | ||
136 | end_mem &= PAGE_MASK; | 128 | high_memory = (void *) (memory_end & PAGE_MASK); |
137 | high_memory = (void *) end_mem; | 129 | max_mapnr = MAP_NR(high_memory); |
138 | |||
139 | start_mem = PAGE_ALIGN(start_mem); | ||
140 | max_mapnr = num_physpages = MAP_NR(high_memory); | ||
141 | 130 | ||
142 | /* this will put all low memory onto the freelists */ | 131 | /* this will put all low memory onto the freelists */ |
143 | totalram_pages = free_all_bootmem(); | 132 | free_all_bootmem(); |
144 | 133 | ||
145 | codek = (_etext - _stext) >> 10; | 134 | mem_init_print_info(NULL); |
146 | datak = (__bss_stop - _sdata) >> 10; | 135 | if (rom_length > 0 && rom_length > codesize) |
147 | initk = (__init_begin - __init_end) >> 10; | 136 | pr_info("Memory available: %luK/%luK ROM\n", |
148 | 137 | (rom_length - codesize) >> 10, rom_length >> 10); | |
149 | tmp = nr_free_pages() << PAGE_SHIFT; | ||
150 | printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", | ||
151 | tmp >> 10, | ||
152 | len >> 10, | ||
153 | (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, | ||
154 | rom_length >> 10, | ||
155 | codek, | ||
156 | datak | ||
157 | ); | ||
158 | } | 138 | } |
159 | 139 | ||
160 | 140 | ||
161 | #ifdef CONFIG_BLK_DEV_INITRD | 141 | #ifdef CONFIG_BLK_DEV_INITRD |
162 | void free_initrd_mem(unsigned long start, unsigned long end) | 142 | void free_initrd_mem(unsigned long start, unsigned long end) |
163 | { | 143 | { |
164 | free_reserved_area(start, end, 0, "initrd"); | 144 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
165 | } | 145 | } |
166 | #endif | 146 | #endif |
167 | 147 | ||
@@ -169,7 +149,7 @@ void | |||
169 | free_initmem(void) | 149 | free_initmem(void) |
170 | { | 150 | { |
171 | #ifdef CONFIG_RAMKERNEL | 151 | #ifdef CONFIG_RAMKERNEL |
172 | free_initmem_default(0); | 152 | free_initmem_default(-1); |
173 | #endif | 153 | #endif |
174 | } | 154 | } |
175 | 155 | ||
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 2561d259a296..88977e42af0a 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c | |||
@@ -70,10 +70,8 @@ unsigned long long kmap_generation; | |||
70 | void __init mem_init(void) | 70 | void __init mem_init(void) |
71 | { | 71 | { |
72 | /* No idea where this is actually declared. Seems to evade LXR. */ | 72 | /* No idea where this is actually declared. Seems to evade LXR. */ |
73 | totalram_pages += free_all_bootmem(); | 73 | free_all_bootmem(); |
74 | num_physpages = bootmem_lastpg-ARCH_PFN_OFFSET; | 74 | mem_init_print_info(NULL); |
75 | |||
76 | printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages); | ||
77 | 75 | ||
78 | /* | 76 | /* |
79 | * To-Do: someone somewhere should wipe out the bootmem map | 77 | * To-Do: someone somewhere should wipe out the bootmem map |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index f034563aeae5..51bce594eb83 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -1116,11 +1116,6 @@ efi_memmap_init(u64 *s, u64 *e) | |||
1116 | if (!is_memory_available(md)) | 1116 | if (!is_memory_available(md)) |
1117 | continue; | 1117 | continue; |
1118 | 1118 | ||
1119 | #ifdef CONFIG_CRASH_DUMP | ||
1120 | /* saved_max_pfn should ignore max_addr= command line arg */ | ||
1121 | if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) | ||
1122 | saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); | ||
1123 | #endif | ||
1124 | /* | 1119 | /* |
1125 | * Round ends inward to granule boundaries | 1120 | * Round ends inward to granule boundaries |
1126 | * Give trimmings to uncached allocator | 1121 | * Give trimmings to uncached allocator |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 142c3b785944..da5237d636d6 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -294,14 +294,6 @@ find_memory (void) | |||
294 | alloc_per_cpu_data(); | 294 | alloc_per_cpu_data(); |
295 | } | 295 | } |
296 | 296 | ||
297 | static int count_pages(u64 start, u64 end, void *arg) | ||
298 | { | ||
299 | unsigned long *count = arg; | ||
300 | |||
301 | *count += (end - start) >> PAGE_SHIFT; | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | /* | 297 | /* |
306 | * Set up the page tables. | 298 | * Set up the page tables. |
307 | */ | 299 | */ |
@@ -312,9 +304,6 @@ paging_init (void) | |||
312 | unsigned long max_dma; | 304 | unsigned long max_dma; |
313 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 305 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
314 | 306 | ||
315 | num_physpages = 0; | ||
316 | efi_memmap_walk(count_pages, &num_physpages); | ||
317 | |||
318 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 307 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
319 | #ifdef CONFIG_ZONE_DMA | 308 | #ifdef CONFIG_ZONE_DMA |
320 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 309 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 7253d83650bf..2de08f4d9930 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -37,7 +37,6 @@ struct early_node_data { | |||
37 | struct ia64_node_data *node_data; | 37 | struct ia64_node_data *node_data; |
38 | unsigned long pernode_addr; | 38 | unsigned long pernode_addr; |
39 | unsigned long pernode_size; | 39 | unsigned long pernode_size; |
40 | unsigned long num_physpages; | ||
41 | #ifdef CONFIG_ZONE_DMA | 40 | #ifdef CONFIG_ZONE_DMA |
42 | unsigned long num_dma_physpages; | 41 | unsigned long num_dma_physpages; |
43 | #endif | 42 | #endif |
@@ -732,7 +731,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n | |||
732 | { | 731 | { |
733 | unsigned long end = start + len; | 732 | unsigned long end = start + len; |
734 | 733 | ||
735 | mem_data[node].num_physpages += len >> PAGE_SHIFT; | ||
736 | #ifdef CONFIG_ZONE_DMA | 734 | #ifdef CONFIG_ZONE_DMA |
737 | if (start <= __pa(MAX_DMA_ADDRESS)) | 735 | if (start <= __pa(MAX_DMA_ADDRESS)) |
738 | mem_data[node].num_dma_physpages += | 736 | mem_data[node].num_dma_physpages += |
@@ -778,7 +776,6 @@ void __init paging_init(void) | |||
778 | #endif | 776 | #endif |
779 | 777 | ||
780 | for_each_online_node(node) { | 778 | for_each_online_node(node) { |
781 | num_physpages += mem_data[node].num_physpages; | ||
782 | pfn_offset = mem_data[node].min_pfn; | 779 | pfn_offset = mem_data[node].min_pfn; |
783 | 780 | ||
784 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 781 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index d1fe4b402601..b6f7f43424ec 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -154,9 +154,8 @@ ia64_init_addr_space (void) | |||
154 | void | 154 | void |
155 | free_initmem (void) | 155 | free_initmem (void) |
156 | { | 156 | { |
157 | free_reserved_area((unsigned long)ia64_imva(__init_begin), | 157 | free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end), |
158 | (unsigned long)ia64_imva(__init_end), | 158 | -1, "unused kernel"); |
159 | 0, "unused kernel"); | ||
160 | } | 159 | } |
161 | 160 | ||
162 | void __init | 161 | void __init |
@@ -546,19 +545,6 @@ int __init register_active_ranges(u64 start, u64 len, int nid) | |||
546 | return 0; | 545 | return 0; |
547 | } | 546 | } |
548 | 547 | ||
549 | static int __init | ||
550 | count_reserved_pages(u64 start, u64 end, void *arg) | ||
551 | { | ||
552 | unsigned long num_reserved = 0; | ||
553 | unsigned long *count = arg; | ||
554 | |||
555 | for (; start < end; start += PAGE_SIZE) | ||
556 | if (PageReserved(virt_to_page(start))) | ||
557 | ++num_reserved; | ||
558 | *count += num_reserved; | ||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | int | 548 | int |
563 | find_max_min_low_pfn (u64 start, u64 end, void *arg) | 549 | find_max_min_low_pfn (u64 start, u64 end, void *arg) |
564 | { | 550 | { |
@@ -597,8 +583,6 @@ __setup("nolwsys", nolwsys_setup); | |||
597 | void __init | 583 | void __init |
598 | mem_init (void) | 584 | mem_init (void) |
599 | { | 585 | { |
600 | long reserved_pages, codesize, datasize, initsize; | ||
601 | pg_data_t *pgdat; | ||
602 | int i; | 586 | int i; |
603 | 587 | ||
604 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); | 588 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); |
@@ -616,27 +600,12 @@ mem_init (void) | |||
616 | 600 | ||
617 | #ifdef CONFIG_FLATMEM | 601 | #ifdef CONFIG_FLATMEM |
618 | BUG_ON(!mem_map); | 602 | BUG_ON(!mem_map); |
619 | max_mapnr = max_low_pfn; | ||
620 | #endif | 603 | #endif |
621 | 604 | ||
605 | set_max_mapnr(max_low_pfn); | ||
622 | high_memory = __va(max_low_pfn * PAGE_SIZE); | 606 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
623 | 607 | free_all_bootmem(); | |
624 | for_each_online_pgdat(pgdat) | 608 | mem_init_print_info(NULL); |
625 | if (pgdat->bdata->node_bootmem_map) | ||
626 | totalram_pages += free_all_bootmem_node(pgdat); | ||
627 | |||
628 | reserved_pages = 0; | ||
629 | efi_memmap_walk(count_reserved_pages, &reserved_pages); | ||
630 | |||
631 | codesize = (unsigned long) _etext - (unsigned long) _stext; | ||
632 | datasize = (unsigned long) _edata - (unsigned long) _etext; | ||
633 | initsize = (unsigned long) __init_end - (unsigned long) __init_begin; | ||
634 | |||
635 | printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " | ||
636 | "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), | ||
637 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, | ||
638 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); | ||
639 | |||
640 | 609 | ||
641 | /* | 610 | /* |
642 | * For fsyscall entrpoints with no light-weight handler, use the ordinary | 611 | * For fsyscall entrpoints with no light-weight handler, use the ordinary |
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 2c468e8b5853..27196303ce36 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c | |||
@@ -129,11 +129,10 @@ unsigned long __init setup_memory(void) | |||
129 | #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn) | 129 | #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn) |
130 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) | 130 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) |
131 | 131 | ||
132 | unsigned long __init zone_sizes_init(void) | 132 | void __init zone_sizes_init(void) |
133 | { | 133 | { |
134 | unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; | 134 | unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; |
135 | unsigned long low, start_pfn; | 135 | unsigned long low, start_pfn; |
136 | unsigned long holes = 0; | ||
137 | int nid, i; | 136 | int nid, i; |
138 | mem_prof_t *mp; | 137 | mem_prof_t *mp; |
139 | 138 | ||
@@ -147,7 +146,6 @@ unsigned long __init zone_sizes_init(void) | |||
147 | low = MAX_LOW_PFN(nid); | 146 | low = MAX_LOW_PFN(nid); |
148 | zones_size[ZONE_DMA] = low - start_pfn; | 147 | zones_size[ZONE_DMA] = low - start_pfn; |
149 | zholes_size[ZONE_DMA] = mp->holes; | 148 | zholes_size[ZONE_DMA] = mp->holes; |
150 | holes += zholes_size[ZONE_DMA]; | ||
151 | 149 | ||
152 | node_set_state(nid, N_NORMAL_MEMORY); | 150 | node_set_state(nid, N_NORMAL_MEMORY); |
153 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); | 151 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
@@ -161,6 +159,4 @@ unsigned long __init zone_sizes_init(void) | |||
161 | NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; | 159 | NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; |
162 | NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; | 160 | NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; |
163 | NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; | 161 | NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; |
164 | |||
165 | return holes; | ||
166 | } | 162 | } |
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index ab4cbce91a9b..0d4146f644dc 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c | |||
@@ -40,7 +40,6 @@ unsigned long mmu_context_cache_dat; | |||
40 | #else | 40 | #else |
41 | unsigned long mmu_context_cache_dat[NR_CPUS]; | 41 | unsigned long mmu_context_cache_dat[NR_CPUS]; |
42 | #endif | 42 | #endif |
43 | static unsigned long hole_pages; | ||
44 | 43 | ||
45 | /* | 44 | /* |
46 | * function prototype | 45 | * function prototype |
@@ -57,7 +56,7 @@ void free_initrd_mem(unsigned long, unsigned long); | |||
57 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) | 56 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) |
58 | 57 | ||
59 | #ifndef CONFIG_DISCONTIGMEM | 58 | #ifndef CONFIG_DISCONTIGMEM |
60 | unsigned long __init zone_sizes_init(void) | 59 | void __init zone_sizes_init(void) |
61 | { | 60 | { |
62 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 61 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
63 | unsigned long max_dma; | 62 | unsigned long max_dma; |
@@ -83,11 +82,9 @@ unsigned long __init zone_sizes_init(void) | |||
83 | #endif /* CONFIG_MMU */ | 82 | #endif /* CONFIG_MMU */ |
84 | 83 | ||
85 | free_area_init_node(0, zones_size, start_pfn, 0); | 84 | free_area_init_node(0, zones_size, start_pfn, 0); |
86 | |||
87 | return 0; | ||
88 | } | 85 | } |
89 | #else /* CONFIG_DISCONTIGMEM */ | 86 | #else /* CONFIG_DISCONTIGMEM */ |
90 | extern unsigned long zone_sizes_init(void); | 87 | extern void zone_sizes_init(void); |
91 | #endif /* CONFIG_DISCONTIGMEM */ | 88 | #endif /* CONFIG_DISCONTIGMEM */ |
92 | 89 | ||
93 | /*======================================================================* | 90 | /*======================================================================* |
@@ -105,24 +102,7 @@ void __init paging_init(void) | |||
105 | for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++) | 102 | for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++) |
106 | pgd_val(pg_dir[i]) = 0; | 103 | pgd_val(pg_dir[i]) = 0; |
107 | #endif /* CONFIG_MMU */ | 104 | #endif /* CONFIG_MMU */ |
108 | hole_pages = zone_sizes_init(); | 105 | zone_sizes_init(); |
109 | } | ||
110 | |||
111 | int __init reservedpages_count(void) | ||
112 | { | ||
113 | int reservedpages, nid, i; | ||
114 | |||
115 | reservedpages = 0; | ||
116 | for_each_online_node(nid) { | ||
117 | unsigned long flags; | ||
118 | pgdat_resize_lock(NODE_DATA(nid), &flags); | ||
119 | for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++) | ||
120 | if (PageReserved(nid_page_nr(nid, i))) | ||
121 | reservedpages++; | ||
122 | pgdat_resize_unlock(NODE_DATA(nid), &flags); | ||
123 | } | ||
124 | |||
125 | return reservedpages; | ||
126 | } | 106 | } |
127 | 107 | ||
128 | /*======================================================================* | 108 | /*======================================================================* |
@@ -131,48 +111,20 @@ int __init reservedpages_count(void) | |||
131 | *======================================================================*/ | 111 | *======================================================================*/ |
132 | void __init mem_init(void) | 112 | void __init mem_init(void) |
133 | { | 113 | { |
134 | int codesize, reservedpages, datasize, initsize; | ||
135 | int nid; | ||
136 | #ifndef CONFIG_MMU | 114 | #ifndef CONFIG_MMU |
137 | extern unsigned long memory_end; | 115 | extern unsigned long memory_end; |
138 | #endif | ||
139 | |||
140 | num_physpages = 0; | ||
141 | for_each_online_node(nid) | ||
142 | num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1; | ||
143 | |||
144 | num_physpages -= hole_pages; | ||
145 | 116 | ||
146 | #ifndef CONFIG_DISCONTIGMEM | ||
147 | max_mapnr = num_physpages; | ||
148 | #endif /* CONFIG_DISCONTIGMEM */ | ||
149 | |||
150 | #ifdef CONFIG_MMU | ||
151 | high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); | ||
152 | #else | ||
153 | high_memory = (void *)(memory_end & PAGE_MASK); | 117 | high_memory = (void *)(memory_end & PAGE_MASK); |
118 | #else | ||
119 | high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); | ||
154 | #endif /* CONFIG_MMU */ | 120 | #endif /* CONFIG_MMU */ |
155 | 121 | ||
156 | /* clear the zero-page */ | 122 | /* clear the zero-page */ |
157 | memset(empty_zero_page, 0, PAGE_SIZE); | 123 | memset(empty_zero_page, 0, PAGE_SIZE); |
158 | 124 | ||
159 | /* this will put all low memory onto the freelists */ | 125 | set_max_mapnr(get_num_physpages()); |
160 | for_each_online_node(nid) | 126 | free_all_bootmem(); |
161 | totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); | 127 | mem_init_print_info(NULL); |
162 | |||
163 | reservedpages = reservedpages_count() - hole_pages; | ||
164 | codesize = (unsigned long) &_etext - (unsigned long)&_text; | ||
165 | datasize = (unsigned long) &_edata - (unsigned long)&_etext; | ||
166 | initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin; | ||
167 | |||
168 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | ||
169 | "%dk reserved, %dk data, %dk init)\n", | ||
170 | nr_free_pages() << (PAGE_SHIFT-10), | ||
171 | num_physpages << (PAGE_SHIFT-10), | ||
172 | codesize >> 10, | ||
173 | reservedpages << (PAGE_SHIFT-10), | ||
174 | datasize >> 10, | ||
175 | initsize >> 10); | ||
176 | } | 128 | } |
177 | 129 | ||
178 | /*======================================================================* | 130 | /*======================================================================* |
@@ -181,7 +133,7 @@ void __init mem_init(void) | |||
181 | *======================================================================*/ | 133 | *======================================================================*/ |
182 | void free_initmem(void) | 134 | void free_initmem(void) |
183 | { | 135 | { |
184 | free_initmem_default(0); | 136 | free_initmem_default(-1); |
185 | } | 137 | } |
186 | 138 | ||
187 | #ifdef CONFIG_BLK_DEV_INITRD | 139 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -191,6 +143,6 @@ void free_initmem(void) | |||
191 | *======================================================================*/ | 143 | *======================================================================*/ |
192 | void free_initrd_mem(unsigned long start, unsigned long end) | 144 | void free_initrd_mem(unsigned long start, unsigned long end) |
193 | { | 145 | { |
194 | free_reserved_area(start, end, 0, "initrd"); | 146 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
195 | } | 147 | } |
196 | #endif | 148 | #endif |
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 1af2ca3411f6..6b4baa6e4d31 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c | |||
@@ -110,7 +110,7 @@ void __init paging_init(void) | |||
110 | void free_initmem(void) | 110 | void free_initmem(void) |
111 | { | 111 | { |
112 | #ifndef CONFIG_MMU_SUN3 | 112 | #ifndef CONFIG_MMU_SUN3 |
113 | free_initmem_default(0); | 113 | free_initmem_default(-1); |
114 | #endif /* CONFIG_MMU_SUN3 */ | 114 | #endif /* CONFIG_MMU_SUN3 */ |
115 | } | 115 | } |
116 | 116 | ||
@@ -146,38 +146,11 @@ void __init print_memmap(void) | |||
146 | MLK_ROUNDUP(__bss_start, __bss_stop)); | 146 | MLK_ROUNDUP(__bss_start, __bss_stop)); |
147 | } | 147 | } |
148 | 148 | ||
149 | void __init mem_init(void) | 149 | static inline void init_pointer_tables(void) |
150 | { | 150 | { |
151 | pg_data_t *pgdat; | 151 | #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) |
152 | int codepages = 0; | ||
153 | int datapages = 0; | ||
154 | int initpages = 0; | ||
155 | int i; | 152 | int i; |
156 | 153 | ||
157 | /* this will put all memory onto the freelists */ | ||
158 | totalram_pages = num_physpages = 0; | ||
159 | for_each_online_pgdat(pgdat) { | ||
160 | num_physpages += pgdat->node_present_pages; | ||
161 | |||
162 | totalram_pages += free_all_bootmem_node(pgdat); | ||
163 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
164 | struct page *page = pgdat->node_mem_map + i; | ||
165 | char *addr = page_to_virt(page); | ||
166 | |||
167 | if (!PageReserved(page)) | ||
168 | continue; | ||
169 | if (addr >= _text && | ||
170 | addr < _etext) | ||
171 | codepages++; | ||
172 | else if (addr >= __init_begin && | ||
173 | addr < __init_end) | ||
174 | initpages++; | ||
175 | else | ||
176 | datapages++; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) | ||
181 | /* insert pointer tables allocated so far into the tablelist */ | 154 | /* insert pointer tables allocated so far into the tablelist */ |
182 | init_pointer_table((unsigned long)kernel_pg_dir); | 155 | init_pointer_table((unsigned long)kernel_pg_dir); |
183 | for (i = 0; i < PTRS_PER_PGD; i++) { | 156 | for (i = 0; i < PTRS_PER_PGD; i++) { |
@@ -189,19 +162,20 @@ void __init mem_init(void) | |||
189 | if (zero_pgtable) | 162 | if (zero_pgtable) |
190 | init_pointer_table((unsigned long)zero_pgtable); | 163 | init_pointer_table((unsigned long)zero_pgtable); |
191 | #endif | 164 | #endif |
165 | } | ||
192 | 166 | ||
193 | pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", | 167 | void __init mem_init(void) |
194 | nr_free_pages() << (PAGE_SHIFT-10), | 168 | { |
195 | totalram_pages << (PAGE_SHIFT-10), | 169 | /* this will put all memory onto the freelists */ |
196 | codepages << (PAGE_SHIFT-10), | 170 | free_all_bootmem(); |
197 | datapages << (PAGE_SHIFT-10), | 171 | init_pointer_tables(); |
198 | initpages << (PAGE_SHIFT-10)); | 172 | mem_init_print_info(NULL); |
199 | print_memmap(); | 173 | print_memmap(); |
200 | } | 174 | } |
201 | 175 | ||
202 | #ifdef CONFIG_BLK_DEV_INITRD | 176 | #ifdef CONFIG_BLK_DEV_INITRD |
203 | void free_initrd_mem(unsigned long start, unsigned long end) | 177 | void free_initrd_mem(unsigned long start, unsigned long end) |
204 | { | 178 | { |
205 | free_reserved_area(start, end, 0, "initrd"); | 179 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
206 | } | 180 | } |
207 | #endif | 181 | #endif |
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index d05b8455c44c..28813f164730 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c | |||
@@ -376,34 +376,21 @@ void __init paging_init(unsigned long mem_end) | |||
376 | 376 | ||
377 | void __init mem_init(void) | 377 | void __init mem_init(void) |
378 | { | 378 | { |
379 | int nid; | ||
380 | |||
381 | #ifdef CONFIG_HIGHMEM | 379 | #ifdef CONFIG_HIGHMEM |
382 | unsigned long tmp; | 380 | unsigned long tmp; |
381 | |||
382 | /* | ||
383 | * Explicitly reset zone->managed_pages because highmem pages are | ||
384 | * freed before calling free_all_bootmem(); | ||
385 | */ | ||
386 | reset_all_zones_managed_pages(); | ||
383 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) | 387 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) |
384 | free_highmem_page(pfn_to_page(tmp)); | 388 | free_highmem_page(pfn_to_page(tmp)); |
385 | num_physpages += totalhigh_pages; | ||
386 | #endif /* CONFIG_HIGHMEM */ | 389 | #endif /* CONFIG_HIGHMEM */ |
387 | 390 | ||
388 | for_each_online_node(nid) { | 391 | free_all_bootmem(); |
389 | pg_data_t *pgdat = NODE_DATA(nid); | 392 | mem_init_print_info(NULL); |
390 | unsigned long node_pages = 0; | ||
391 | |||
392 | num_physpages += pgdat->node_present_pages; | ||
393 | |||
394 | if (pgdat->node_spanned_pages) | ||
395 | node_pages = free_all_bootmem_node(pgdat); | ||
396 | |||
397 | totalram_pages += node_pages; | ||
398 | } | ||
399 | |||
400 | pr_info("Memory: %luk/%luk available\n", | ||
401 | (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), | ||
402 | num_physpages << (PAGE_SHIFT - 10)); | ||
403 | |||
404 | show_mem(0); | 393 | show_mem(0); |
405 | |||
406 | return; | ||
407 | } | 394 | } |
408 | 395 | ||
409 | void free_initmem(void) | 396 | void free_initmem(void) |
@@ -414,7 +401,8 @@ void free_initmem(void) | |||
414 | #ifdef CONFIG_BLK_DEV_INITRD | 401 | #ifdef CONFIG_BLK_DEV_INITRD |
415 | void free_initrd_mem(unsigned long start, unsigned long end) | 402 | void free_initrd_mem(unsigned long start, unsigned long end) |
416 | { | 403 | { |
417 | free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); | 404 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
405 | "initrd"); | ||
418 | } | 406 | } |
419 | #endif | 407 | #endif |
420 | 408 | ||
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 85a5ae8e9bd0..fd850879854d 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
@@ -168,7 +168,6 @@ extern int page_is_ram(unsigned long pfn); | |||
168 | # else /* CONFIG_MMU */ | 168 | # else /* CONFIG_MMU */ |
169 | # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) | 169 | # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) |
170 | # define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) | 170 | # define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) |
171 | # define VALID_PAGE(page) ((page - mem_map) < max_mapnr) | ||
172 | # endif /* CONFIG_MMU */ | 171 | # endif /* CONFIG_MMU */ |
173 | 172 | ||
174 | # endif /* __ASSEMBLY__ */ | 173 | # endif /* __ASSEMBLY__ */ |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index b38ae3acfeb4..74c7bcc1e82d 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -71,24 +71,17 @@ static void __init highmem_init(void) | |||
71 | kmap_prot = PAGE_KERNEL; | 71 | kmap_prot = PAGE_KERNEL; |
72 | } | 72 | } |
73 | 73 | ||
74 | static unsigned long highmem_setup(void) | 74 | static void highmem_setup(void) |
75 | { | 75 | { |
76 | unsigned long pfn; | 76 | unsigned long pfn; |
77 | unsigned long reservedpages = 0; | ||
78 | 77 | ||
79 | for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { | 78 | for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { |
80 | struct page *page = pfn_to_page(pfn); | 79 | struct page *page = pfn_to_page(pfn); |
81 | 80 | ||
82 | /* FIXME not sure about */ | 81 | /* FIXME not sure about */ |
83 | if (memblock_is_reserved(pfn << PAGE_SHIFT)) | 82 | if (!memblock_is_reserved(pfn << PAGE_SHIFT)) |
84 | continue; | 83 | free_highmem_page(page); |
85 | free_highmem_page(page); | ||
86 | reservedpages++; | ||
87 | } | 84 | } |
88 | pr_info("High memory: %luk\n", | ||
89 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
90 | |||
91 | return reservedpages; | ||
92 | } | 85 | } |
93 | #endif /* CONFIG_HIGHMEM */ | 86 | #endif /* CONFIG_HIGHMEM */ |
94 | 87 | ||
@@ -167,13 +160,12 @@ void __init setup_memory(void) | |||
167 | * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) | 160 | * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) |
168 | * max_low_pfn | 161 | * max_low_pfn |
169 | * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) | 162 | * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) |
170 | * num_physpages - number of all pages | ||
171 | */ | 163 | */ |
172 | 164 | ||
173 | /* memory start is from the kernel end (aligned) to higher addr */ | 165 | /* memory start is from the kernel end (aligned) to higher addr */ |
174 | min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ | 166 | min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ |
175 | /* RAM is assumed contiguous */ | 167 | /* RAM is assumed contiguous */ |
176 | num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; | 168 | max_mapnr = memory_size >> PAGE_SHIFT; |
177 | max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; | 169 | max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; |
178 | max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; | 170 | max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; |
179 | 171 | ||
@@ -235,57 +227,26 @@ void __init setup_memory(void) | |||
235 | #ifdef CONFIG_BLK_DEV_INITRD | 227 | #ifdef CONFIG_BLK_DEV_INITRD |
236 | void free_initrd_mem(unsigned long start, unsigned long end) | 228 | void free_initrd_mem(unsigned long start, unsigned long end) |
237 | { | 229 | { |
238 | free_reserved_area(start, end, 0, "initrd"); | 230 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
239 | } | 231 | } |
240 | #endif | 232 | #endif |
241 | 233 | ||
242 | void free_initmem(void) | 234 | void free_initmem(void) |
243 | { | 235 | { |
244 | free_initmem_default(0); | 236 | free_initmem_default(-1); |
245 | } | 237 | } |
246 | 238 | ||
247 | void __init mem_init(void) | 239 | void __init mem_init(void) |
248 | { | 240 | { |
249 | pg_data_t *pgdat; | ||
250 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | ||
251 | |||
252 | high_memory = (void *)__va(memory_start + lowmem_size - 1); | 241 | high_memory = (void *)__va(memory_start + lowmem_size - 1); |
253 | 242 | ||
254 | /* this will put all memory onto the freelists */ | 243 | /* this will put all memory onto the freelists */ |
255 | totalram_pages += free_all_bootmem(); | 244 | free_all_bootmem(); |
256 | |||
257 | for_each_online_pgdat(pgdat) { | ||
258 | unsigned long i; | ||
259 | struct page *page; | ||
260 | |||
261 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
262 | if (!pfn_valid(pgdat->node_start_pfn + i)) | ||
263 | continue; | ||
264 | page = pgdat_page_nr(pgdat, i); | ||
265 | if (PageReserved(page)) | ||
266 | reservedpages++; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | #ifdef CONFIG_HIGHMEM | 245 | #ifdef CONFIG_HIGHMEM |
271 | reservedpages -= highmem_setup(); | 246 | highmem_setup(); |
272 | #endif | 247 | #endif |
273 | 248 | ||
274 | codesize = (unsigned long)&_sdata - (unsigned long)&_stext; | 249 | mem_init_print_info(NULL); |
275 | datasize = (unsigned long)&_edata - (unsigned long)&_sdata; | ||
276 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
277 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | ||
278 | |||
279 | pr_info("Memory: %luk/%luk available (%luk kernel code, ", | ||
280 | nr_free_pages() << (PAGE_SHIFT-10), | ||
281 | num_physpages << (PAGE_SHIFT-10), | ||
282 | codesize >> 10); | ||
283 | pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n", | ||
284 | reservedpages << (PAGE_SHIFT-10), | ||
285 | datasize >> 10, | ||
286 | bsssize >> 10, | ||
287 | initsize >> 10); | ||
288 | |||
289 | #ifdef CONFIG_MMU | 250 | #ifdef CONFIG_MMU |
290 | pr_info("Kernel virtual memory layout:\n"); | 251 | pr_info("Kernel virtual memory layout:\n"); |
291 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); | 252 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); |
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 3be9e7bb30ff..f291cf99b03a 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c | |||
@@ -4,16 +4,6 @@ | |||
4 | #include <asm/uaccess.h> | 4 | #include <asm/uaccess.h> |
5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
6 | 6 | ||
7 | static int __init parse_savemaxmem(char *p) | ||
8 | { | ||
9 | if (p) | ||
10 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | ||
11 | |||
12 | return 1; | ||
13 | } | ||
14 | __setup("savemaxmem=", parse_savemaxmem); | ||
15 | |||
16 | |||
17 | static void *kdump_buf_page; | 7 | static void *kdump_buf_page; |
18 | 8 | ||
19 | /** | 9 | /** |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 9b973e0af9cb..4e73f10a7519 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -359,11 +359,24 @@ void __init paging_init(void) | |||
359 | static struct kcore_list kcore_kseg0; | 359 | static struct kcore_list kcore_kseg0; |
360 | #endif | 360 | #endif |
361 | 361 | ||
362 | void __init mem_init(void) | 362 | static inline void mem_init_free_highmem(void) |
363 | { | 363 | { |
364 | unsigned long codesize, reservedpages, datasize, initsize; | 364 | #ifdef CONFIG_HIGHMEM |
365 | unsigned long tmp, ram; | 365 | unsigned long tmp; |
366 | 366 | ||
367 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { | ||
368 | struct page *page = pfn_to_page(tmp); | ||
369 | |||
370 | if (!page_is_ram(tmp)) | ||
371 | SetPageReserved(page); | ||
372 | else | ||
373 | free_highmem_page(page); | ||
374 | } | ||
375 | #endif | ||
376 | } | ||
377 | |||
378 | void __init mem_init(void) | ||
379 | { | ||
367 | #ifdef CONFIG_HIGHMEM | 380 | #ifdef CONFIG_HIGHMEM |
368 | #ifdef CONFIG_DISCONTIGMEM | 381 | #ifdef CONFIG_DISCONTIGMEM |
369 | #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" | 382 | #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" |
@@ -374,34 +387,10 @@ void __init mem_init(void) | |||
374 | #endif | 387 | #endif |
375 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); | 388 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
376 | 389 | ||
377 | totalram_pages += free_all_bootmem(); | 390 | free_all_bootmem(); |
378 | setup_zero_pages(); /* Setup zeroed pages. */ | 391 | setup_zero_pages(); /* Setup zeroed pages. */ |
379 | 392 | mem_init_free_highmem(); | |
380 | reservedpages = ram = 0; | 393 | mem_init_print_info(NULL); |
381 | for (tmp = 0; tmp < max_low_pfn; tmp++) | ||
382 | if (page_is_ram(tmp) && pfn_valid(tmp)) { | ||
383 | ram++; | ||
384 | if (PageReserved(pfn_to_page(tmp))) | ||
385 | reservedpages++; | ||
386 | } | ||
387 | num_physpages = ram; | ||
388 | |||
389 | #ifdef CONFIG_HIGHMEM | ||
390 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { | ||
391 | struct page *page = pfn_to_page(tmp); | ||
392 | |||
393 | if (!page_is_ram(tmp)) { | ||
394 | SetPageReserved(page); | ||
395 | continue; | ||
396 | } | ||
397 | free_highmem_page(page); | ||
398 | } | ||
399 | num_physpages += totalhigh_pages; | ||
400 | #endif | ||
401 | |||
402 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
403 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
404 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
405 | 394 | ||
406 | #ifdef CONFIG_64BIT | 395 | #ifdef CONFIG_64BIT |
407 | if ((unsigned long) &_text > (unsigned long) CKSEG0) | 396 | if ((unsigned long) &_text > (unsigned long) CKSEG0) |
@@ -410,16 +399,6 @@ void __init mem_init(void) | |||
410 | kclist_add(&kcore_kseg0, (void *) CKSEG0, | 399 | kclist_add(&kcore_kseg0, (void *) CKSEG0, |
411 | 0x80000000 - 4, KCORE_TEXT); | 400 | 0x80000000 - 4, KCORE_TEXT); |
412 | #endif | 401 | #endif |
413 | |||
414 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | ||
415 | "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", | ||
416 | nr_free_pages() << (PAGE_SHIFT-10), | ||
417 | ram << (PAGE_SHIFT-10), | ||
418 | codesize >> 10, | ||
419 | reservedpages << (PAGE_SHIFT-10), | ||
420 | datasize >> 10, | ||
421 | initsize >> 10, | ||
422 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
423 | } | 402 | } |
424 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 403 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
425 | 404 | ||
@@ -440,7 +419,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) | |||
440 | #ifdef CONFIG_BLK_DEV_INITRD | 419 | #ifdef CONFIG_BLK_DEV_INITRD |
441 | void free_initrd_mem(unsigned long start, unsigned long end) | 420 | void free_initrd_mem(unsigned long start, unsigned long end) |
442 | { | 421 | { |
443 | free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); | 422 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
423 | "initrd"); | ||
444 | } | 424 | } |
445 | #endif | 425 | #endif |
446 | 426 | ||
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 879077b01155..cb1ef9984069 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c | |||
@@ -89,7 +89,7 @@ static inline u32 ltq_calc_bar11mask(void) | |||
89 | u32 mem, bar11mask; | 89 | u32 mem, bar11mask; |
90 | 90 | ||
91 | /* BAR11MASK value depends on available memory on system. */ | 91 | /* BAR11MASK value depends on available memory on system. */ |
92 | mem = num_physpages * PAGE_SIZE; | 92 | mem = get_num_physpages() * PAGE_SIZE; |
93 | bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; | 93 | bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; |
94 | 94 | ||
95 | return bar11mask; | 95 | return bar11mask; |
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 1230f56429d7..a95c00f5fb96 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c | |||
@@ -357,8 +357,6 @@ static void __init szmem(void) | |||
357 | int slot; | 357 | int slot; |
358 | cnodeid_t node; | 358 | cnodeid_t node; |
359 | 359 | ||
360 | num_physpages = 0; | ||
361 | |||
362 | for_each_online_node(node) { | 360 | for_each_online_node(node) { |
363 | nodebytes = 0; | 361 | nodebytes = 0; |
364 | for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { | 362 | for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { |
@@ -381,7 +379,6 @@ static void __init szmem(void) | |||
381 | slot = MAX_MEM_SLOTS; | 379 | slot = MAX_MEM_SLOTS; |
382 | continue; | 380 | continue; |
383 | } | 381 | } |
384 | num_physpages += slot_psize; | ||
385 | memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), | 382 | memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), |
386 | PFN_PHYS(slot_psize), node); | 383 | PFN_PHYS(slot_psize), node); |
387 | } | 384 | } |
@@ -480,32 +477,8 @@ void __init paging_init(void) | |||
480 | 477 | ||
481 | void __init mem_init(void) | 478 | void __init mem_init(void) |
482 | { | 479 | { |
483 | unsigned long codesize, datasize, initsize, tmp; | 480 | high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); |
484 | unsigned node; | 481 | free_all_bootmem(); |
485 | |||
486 | high_memory = (void *) __va(num_physpages << PAGE_SHIFT); | ||
487 | |||
488 | for_each_online_node(node) { | ||
489 | /* | ||
490 | * This will free up the bootmem, ie, slot 0 memory. | ||
491 | */ | ||
492 | totalram_pages += free_all_bootmem_node(NODE_DATA(node)); | ||
493 | } | ||
494 | |||
495 | setup_zero_pages(); /* This comes from node 0 */ | 482 | setup_zero_pages(); /* This comes from node 0 */ |
496 | 483 | mem_init_print_info(NULL); | |
497 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
498 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
499 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
500 | |||
501 | tmp = nr_free_pages(); | ||
502 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | ||
503 | "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", | ||
504 | tmp << (PAGE_SHIFT-10), | ||
505 | num_physpages << (PAGE_SHIFT-10), | ||
506 | codesize >> 10, | ||
507 | (num_physpages - tmp) << (PAGE_SHIFT-10), | ||
508 | datasize >> 10, | ||
509 | initsize >> 10, | ||
510 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
511 | } | 484 | } |
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index 5a8ace63a6b4..97a1ec0beeec 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c | |||
@@ -99,43 +99,21 @@ void __init paging_init(void) | |||
99 | */ | 99 | */ |
100 | void __init mem_init(void) | 100 | void __init mem_init(void) |
101 | { | 101 | { |
102 | int codesize, reservedpages, datasize, initsize; | ||
103 | int tmp; | ||
104 | |||
105 | BUG_ON(!mem_map); | 102 | BUG_ON(!mem_map); |
106 | 103 | ||
107 | #define START_PFN (contig_page_data.bdata->node_min_pfn) | 104 | #define START_PFN (contig_page_data.bdata->node_min_pfn) |
108 | #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) | 105 | #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) |
109 | 106 | ||
110 | max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; | 107 | max_mapnr = MAX_LOW_PFN - START_PFN; |
111 | high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); | 108 | high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); |
112 | 109 | ||
113 | /* clear the zero-page */ | 110 | /* clear the zero-page */ |
114 | memset(empty_zero_page, 0, PAGE_SIZE); | 111 | memset(empty_zero_page, 0, PAGE_SIZE); |
115 | 112 | ||
116 | /* this will put all low memory onto the freelists */ | 113 | /* this will put all low memory onto the freelists */ |
117 | totalram_pages += free_all_bootmem(); | 114 | free_all_bootmem(); |
118 | 115 | ||
119 | reservedpages = 0; | 116 | mem_init_print_info(NULL); |
120 | for (tmp = 0; tmp < num_physpages; tmp++) | ||
121 | if (PageReserved(&mem_map[tmp])) | ||
122 | reservedpages++; | ||
123 | |||
124 | codesize = (unsigned long) &_etext - (unsigned long) &_stext; | ||
125 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
126 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
127 | |||
128 | printk(KERN_INFO | ||
129 | "Memory: %luk/%luk available" | ||
130 | " (%dk kernel code, %dk reserved, %dk data, %dk init," | ||
131 | " %ldk highmem)\n", | ||
132 | nr_free_pages() << (PAGE_SHIFT - 10), | ||
133 | max_mapnr << (PAGE_SHIFT - 10), | ||
134 | codesize >> 10, | ||
135 | reservedpages << (PAGE_SHIFT - 10), | ||
136 | datasize >> 10, | ||
137 | initsize >> 10, | ||
138 | totalhigh_pages << (PAGE_SHIFT - 10)); | ||
139 | } | 117 | } |
140 | 118 | ||
141 | /* | 119 | /* |
@@ -152,6 +130,7 @@ void free_initmem(void) | |||
152 | #ifdef CONFIG_BLK_DEV_INITRD | 130 | #ifdef CONFIG_BLK_DEV_INITRD |
153 | void free_initrd_mem(unsigned long start, unsigned long end) | 131 | void free_initrd_mem(unsigned long start, unsigned long end) |
154 | { | 132 | { |
155 | free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); | 133 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
134 | "initrd"); | ||
156 | } | 135 | } |
157 | #endif | 136 | #endif |
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index b3cbc6703837..7f94652311d7 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c | |||
@@ -202,56 +202,20 @@ void __init paging_init(void) | |||
202 | 202 | ||
203 | /* References to section boundaries */ | 203 | /* References to section boundaries */ |
204 | 204 | ||
205 | static int __init free_pages_init(void) | ||
206 | { | ||
207 | int reservedpages, pfn; | ||
208 | |||
209 | /* this will put all low memory onto the freelists */ | ||
210 | totalram_pages = free_all_bootmem(); | ||
211 | |||
212 | reservedpages = 0; | ||
213 | for (pfn = 0; pfn < max_low_pfn; pfn++) { | ||
214 | /* | ||
215 | * Only count reserved RAM pages | ||
216 | */ | ||
217 | if (PageReserved(mem_map + pfn)) | ||
218 | reservedpages++; | ||
219 | } | ||
220 | |||
221 | return reservedpages; | ||
222 | } | ||
223 | |||
224 | static void __init set_max_mapnr_init(void) | ||
225 | { | ||
226 | max_mapnr = num_physpages = max_low_pfn; | ||
227 | } | ||
228 | |||
229 | void __init mem_init(void) | 205 | void __init mem_init(void) |
230 | { | 206 | { |
231 | int codesize, reservedpages, datasize, initsize; | ||
232 | |||
233 | BUG_ON(!mem_map); | 207 | BUG_ON(!mem_map); |
234 | 208 | ||
235 | set_max_mapnr_init(); | 209 | max_mapnr = max_low_pfn; |
236 | |||
237 | high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); | 210 | high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); |
238 | 211 | ||
239 | /* clear the zero-page */ | 212 | /* clear the zero-page */ |
240 | memset((void *)empty_zero_page, 0, PAGE_SIZE); | 213 | memset((void *)empty_zero_page, 0, PAGE_SIZE); |
241 | 214 | ||
242 | reservedpages = free_pages_init(); | 215 | /* this will put all low memory onto the freelists */ |
243 | 216 | free_all_bootmem(); | |
244 | codesize = (unsigned long)&_etext - (unsigned long)&_stext; | ||
245 | datasize = (unsigned long)&_edata - (unsigned long)&_etext; | ||
246 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
247 | 217 | ||
248 | printk(KERN_INFO | 218 | mem_init_print_info(NULL); |
249 | "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | ||
250 | (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), | ||
251 | max_mapnr << (PAGE_SHIFT - 10), codesize >> 10, | ||
252 | reservedpages << (PAGE_SHIFT - 10), datasize >> 10, | ||
253 | initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10)) | ||
254 | ); | ||
255 | 219 | ||
256 | printk("mem_init_done ...........................................\n"); | 220 | printk("mem_init_done ...........................................\n"); |
257 | mem_init_done = 1; | 221 | mem_init_done = 1; |
@@ -261,11 +225,11 @@ void __init mem_init(void) | |||
261 | #ifdef CONFIG_BLK_DEV_INITRD | 225 | #ifdef CONFIG_BLK_DEV_INITRD |
262 | void free_initrd_mem(unsigned long start, unsigned long end) | 226 | void free_initrd_mem(unsigned long start, unsigned long end) |
263 | { | 227 | { |
264 | free_reserved_area(start, end, 0, "initrd"); | 228 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
265 | } | 229 | } |
266 | #endif | 230 | #endif |
267 | 231 | ||
268 | void free_initmem(void) | 232 | void free_initmem(void) |
269 | { | 233 | { |
270 | free_initmem_default(0); | 234 | free_initmem_default(-1); |
271 | } | 235 | } |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 505b56c6b9b9..b0f96c0e6316 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -214,7 +214,6 @@ static void __init setup_bootmem(void) | |||
214 | mem_limit_func(); /* check for "mem=" argument */ | 214 | mem_limit_func(); /* check for "mem=" argument */ |
215 | 215 | ||
216 | mem_max = 0; | 216 | mem_max = 0; |
217 | num_physpages = 0; | ||
218 | for (i = 0; i < npmem_ranges; i++) { | 217 | for (i = 0; i < npmem_ranges; i++) { |
219 | unsigned long rsize; | 218 | unsigned long rsize; |
220 | 219 | ||
@@ -229,10 +228,8 @@ static void __init setup_bootmem(void) | |||
229 | npmem_ranges = i + 1; | 228 | npmem_ranges = i + 1; |
230 | mem_max = mem_limit; | 229 | mem_max = mem_limit; |
231 | } | 230 | } |
232 | num_physpages += pmem_ranges[i].pages; | ||
233 | break; | 231 | break; |
234 | } | 232 | } |
235 | num_physpages += pmem_ranges[i].pages; | ||
236 | mem_max += rsize; | 233 | mem_max += rsize; |
237 | } | 234 | } |
238 | 235 | ||
@@ -532,7 +529,7 @@ void free_initmem(void) | |||
532 | * pages are no-longer executable */ | 529 | * pages are no-longer executable */ |
533 | flush_icache_range(init_begin, init_end); | 530 | flush_icache_range(init_begin, init_end); |
534 | 531 | ||
535 | num_physpages += free_initmem_default(0); | 532 | free_initmem_default(-1); |
536 | 533 | ||
537 | /* set up a new led state on systems shipped LED State panel */ | 534 | /* set up a new led state on systems shipped LED State panel */ |
538 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | 535 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); |
@@ -580,8 +577,6 @@ unsigned long pcxl_dma_start __read_mostly; | |||
580 | 577 | ||
581 | void __init mem_init(void) | 578 | void __init mem_init(void) |
582 | { | 579 | { |
583 | int codesize, reservedpages, datasize, initsize; | ||
584 | |||
585 | /* Do sanity checks on page table constants */ | 580 | /* Do sanity checks on page table constants */ |
586 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); | 581 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); |
587 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); | 582 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); |
@@ -590,45 +585,8 @@ void __init mem_init(void) | |||
590 | > BITS_PER_LONG); | 585 | > BITS_PER_LONG); |
591 | 586 | ||
592 | high_memory = __va((max_pfn << PAGE_SHIFT)); | 587 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
593 | 588 | set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); | |
594 | #ifndef CONFIG_DISCONTIGMEM | 589 | free_all_bootmem(); |
595 | max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; | ||
596 | totalram_pages += free_all_bootmem(); | ||
597 | #else | ||
598 | { | ||
599 | int i; | ||
600 | |||
601 | for (i = 0; i < npmem_ranges; i++) | ||
602 | totalram_pages += free_all_bootmem_node(NODE_DATA(i)); | ||
603 | } | ||
604 | #endif | ||
605 | |||
606 | codesize = (unsigned long)_etext - (unsigned long)_text; | ||
607 | datasize = (unsigned long)_edata - (unsigned long)_etext; | ||
608 | initsize = (unsigned long)__init_end - (unsigned long)__init_begin; | ||
609 | |||
610 | reservedpages = 0; | ||
611 | { | ||
612 | unsigned long pfn; | ||
613 | #ifdef CONFIG_DISCONTIGMEM | ||
614 | int i; | ||
615 | |||
616 | for (i = 0; i < npmem_ranges; i++) { | ||
617 | for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) { | ||
618 | if (PageReserved(pfn_to_page(pfn))) | ||
619 | reservedpages++; | ||
620 | } | ||
621 | } | ||
622 | #else /* !CONFIG_DISCONTIGMEM */ | ||
623 | for (pfn = 0; pfn < max_pfn; pfn++) { | ||
624 | /* | ||
625 | * Only count reserved RAM pages | ||
626 | */ | ||
627 | if (PageReserved(pfn_to_page(pfn))) | ||
628 | reservedpages++; | ||
629 | } | ||
630 | #endif | ||
631 | } | ||
632 | 590 | ||
633 | #ifdef CONFIG_PA11 | 591 | #ifdef CONFIG_PA11 |
634 | if (hppa_dma_ops == &pcxl_dma_ops) { | 592 | if (hppa_dma_ops == &pcxl_dma_ops) { |
@@ -643,15 +601,7 @@ void __init mem_init(void) | |||
643 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); | 601 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
644 | #endif | 602 | #endif |
645 | 603 | ||
646 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | 604 | mem_init_print_info(NULL); |
647 | nr_free_pages() << (PAGE_SHIFT-10), | ||
648 | num_physpages << (PAGE_SHIFT-10), | ||
649 | codesize >> 10, | ||
650 | reservedpages << (PAGE_SHIFT-10), | ||
651 | datasize >> 10, | ||
652 | initsize >> 10 | ||
653 | ); | ||
654 | |||
655 | #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ | 605 | #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ |
656 | printk("virtual kernel memory layout:\n" | 606 | printk("virtual kernel memory layout:\n" |
657 | " vmalloc : 0x%p - 0x%p (%4ld MB)\n" | 607 | " vmalloc : 0x%p - 0x%p (%4ld MB)\n" |
@@ -1101,6 +1051,6 @@ void flush_tlb_all(void) | |||
1101 | #ifdef CONFIG_BLK_DEV_INITRD | 1051 | #ifdef CONFIG_BLK_DEV_INITRD |
1102 | void free_initrd_mem(unsigned long start, unsigned long end) | 1052 | void free_initrd_mem(unsigned long start, unsigned long end) |
1103 | { | 1053 | { |
1104 | num_physpages += free_reserved_area(start, end, 0, "initrd"); | 1054 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
1105 | } | 1055 | } |
1106 | #endif | 1056 | #endif |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 9ec3fe174cba..779a78c26435 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -69,16 +69,6 @@ void __init setup_kdump_trampoline(void) | |||
69 | } | 69 | } |
70 | #endif /* CONFIG_NONSTATIC_KERNEL */ | 70 | #endif /* CONFIG_NONSTATIC_KERNEL */ |
71 | 71 | ||
72 | static int __init parse_savemaxmem(char *p) | ||
73 | { | ||
74 | if (p) | ||
75 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | ||
76 | |||
77 | return 1; | ||
78 | } | ||
79 | __setup("savemaxmem=", parse_savemaxmem); | ||
80 | |||
81 | |||
82 | static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, | 72 | static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, |
83 | unsigned long offset, int userbuf) | 73 | unsigned long offset, int userbuf) |
84 | { | 74 | { |
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6782221d49bd..db28032e320e 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -750,13 +750,8 @@ EXPORT_SYMBOL_GPL(kvm_hypercall); | |||
750 | 750 | ||
751 | static __init void kvm_free_tmp(void) | 751 | static __init void kvm_free_tmp(void) |
752 | { | 752 | { |
753 | unsigned long start, end; | 753 | free_reserved_area(&kvm_tmp[kvm_tmp_index], |
754 | 754 | &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); | |
755 | start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK; | ||
756 | end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; | ||
757 | |||
758 | /* Free the tmp space we don't need */ | ||
759 | free_reserved_area(start, end, 0, NULL); | ||
760 | } | 755 | } |
761 | 756 | ||
762 | static int __init kvm_guest_init(void) | 757 | static int __init kvm_guest_init(void) |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 77fdd2cef33b..4210549ac95e 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -357,7 +357,7 @@ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) | |||
357 | int alloc_bootmem_huge_page(struct hstate *hstate) | 357 | int alloc_bootmem_huge_page(struct hstate *hstate) |
358 | { | 358 | { |
359 | struct huge_bootmem_page *m; | 359 | struct huge_bootmem_page *m; |
360 | int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT); | 360 | int idx = shift_to_mmu_psize(huge_page_shift(hstate)); |
361 | int nr_gpages = gpage_freearray[idx].nr_gpages; | 361 | int nr_gpages = gpage_freearray[idx].nr_gpages; |
362 | 362 | ||
363 | if (nr_gpages == 0) | 363 | if (nr_gpages == 0) |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0988a26e0413..1cb1ea133a2c 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -299,47 +299,13 @@ void __init paging_init(void) | |||
299 | 299 | ||
300 | void __init mem_init(void) | 300 | void __init mem_init(void) |
301 | { | 301 | { |
302 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
303 | int nid; | ||
304 | #endif | ||
305 | pg_data_t *pgdat; | ||
306 | unsigned long i; | ||
307 | struct page *page; | ||
308 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | ||
309 | |||
310 | #ifdef CONFIG_SWIOTLB | 302 | #ifdef CONFIG_SWIOTLB |
311 | swiotlb_init(0); | 303 | swiotlb_init(0); |
312 | #endif | 304 | #endif |
313 | 305 | ||
314 | num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; | ||
315 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 306 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
316 | 307 | set_max_mapnr(max_pfn); | |
317 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 308 | free_all_bootmem(); |
318 | for_each_online_node(nid) { | ||
319 | if (NODE_DATA(nid)->node_spanned_pages != 0) { | ||
320 | printk("freeing bootmem node %d\n", nid); | ||
321 | totalram_pages += | ||
322 | free_all_bootmem_node(NODE_DATA(nid)); | ||
323 | } | ||
324 | } | ||
325 | #else | ||
326 | max_mapnr = max_pfn; | ||
327 | totalram_pages += free_all_bootmem(); | ||
328 | #endif | ||
329 | for_each_online_pgdat(pgdat) { | ||
330 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
331 | if (!pfn_valid(pgdat->node_start_pfn + i)) | ||
332 | continue; | ||
333 | page = pgdat_page_nr(pgdat, i); | ||
334 | if (PageReserved(page)) | ||
335 | reservedpages++; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | codesize = (unsigned long)&_sdata - (unsigned long)&_stext; | ||
340 | datasize = (unsigned long)&_edata - (unsigned long)&_sdata; | ||
341 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
342 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | ||
343 | 309 | ||
344 | #ifdef CONFIG_HIGHMEM | 310 | #ifdef CONFIG_HIGHMEM |
345 | { | 311 | { |
@@ -349,13 +315,9 @@ void __init mem_init(void) | |||
349 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | 315 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { |
350 | phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; | 316 | phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; |
351 | struct page *page = pfn_to_page(pfn); | 317 | struct page *page = pfn_to_page(pfn); |
352 | if (memblock_is_reserved(paddr)) | 318 | if (!memblock_is_reserved(paddr)) |
353 | continue; | 319 | free_highmem_page(page); |
354 | free_highmem_page(page); | ||
355 | reservedpages--; | ||
356 | } | 320 | } |
357 | printk(KERN_DEBUG "High memory: %luk\n", | ||
358 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
359 | } | 321 | } |
360 | #endif /* CONFIG_HIGHMEM */ | 322 | #endif /* CONFIG_HIGHMEM */ |
361 | 323 | ||
@@ -368,16 +330,7 @@ void __init mem_init(void) | |||
368 | (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | 330 | (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; |
369 | #endif | 331 | #endif |
370 | 332 | ||
371 | printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " | 333 | mem_init_print_info(NULL); |
372 | "%luk reserved, %luk data, %luk bss, %luk init)\n", | ||
373 | nr_free_pages() << (PAGE_SHIFT-10), | ||
374 | num_physpages << (PAGE_SHIFT-10), | ||
375 | codesize >> 10, | ||
376 | reservedpages << (PAGE_SHIFT-10), | ||
377 | datasize >> 10, | ||
378 | bsssize >> 10, | ||
379 | initsize >> 10); | ||
380 | |||
381 | #ifdef CONFIG_PPC32 | 334 | #ifdef CONFIG_PPC32 |
382 | pr_info("Kernel virtual memory layout:\n"); | 335 | pr_info("Kernel virtual memory layout:\n"); |
383 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); | 336 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); |
@@ -407,7 +360,7 @@ void free_initmem(void) | |||
407 | #ifdef CONFIG_BLK_DEV_INITRD | 360 | #ifdef CONFIG_BLK_DEV_INITRD |
408 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 361 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
409 | { | 362 | { |
410 | free_reserved_area(start, end, 0, "initrd"); | 363 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
411 | } | 364 | } |
412 | #endif | 365 | #endif |
413 | 366 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0a49095104c9..497451ec5e26 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -719,10 +719,6 @@ static void reserve_oldmem(void) | |||
719 | } | 719 | } |
720 | create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); | 720 | create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); |
721 | create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); | 721 | create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); |
722 | if (OLDMEM_BASE + OLDMEM_SIZE == real_size) | ||
723 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | ||
724 | else | ||
725 | saved_max_pfn = PFN_DOWN(real_size) - 1; | ||
726 | #endif | 722 | #endif |
727 | } | 723 | } |
728 | 724 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 89ebae4008f2..ce36ea80e4f9 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -135,30 +135,17 @@ void __init paging_init(void) | |||
135 | 135 | ||
136 | void __init mem_init(void) | 136 | void __init mem_init(void) |
137 | { | 137 | { |
138 | unsigned long codesize, reservedpages, datasize, initsize; | 138 | max_mapnr = max_low_pfn; |
139 | |||
140 | max_mapnr = num_physpages = max_low_pfn; | ||
141 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 139 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
142 | 140 | ||
143 | /* Setup guest page hinting */ | 141 | /* Setup guest page hinting */ |
144 | cmma_init(); | 142 | cmma_init(); |
145 | 143 | ||
146 | /* this will put all low memory onto the freelists */ | 144 | /* this will put all low memory onto the freelists */ |
147 | totalram_pages += free_all_bootmem(); | 145 | free_all_bootmem(); |
148 | setup_zero_pages(); /* Setup zeroed pages. */ | 146 | setup_zero_pages(); /* Setup zeroed pages. */ |
149 | 147 | ||
150 | reservedpages = 0; | 148 | mem_init_print_info(NULL); |
151 | |||
152 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
153 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
154 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
155 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", | ||
156 | nr_free_pages() << (PAGE_SHIFT-10), | ||
157 | max_mapnr << (PAGE_SHIFT-10), | ||
158 | codesize >> 10, | ||
159 | reservedpages << (PAGE_SHIFT-10), | ||
160 | datasize >>10, | ||
161 | initsize >> 10); | ||
162 | printk("Write protected kernel read-only data: %#lx - %#lx\n", | 149 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
163 | (unsigned long)&_stext, | 150 | (unsigned long)&_stext, |
164 | PFN_ALIGN((unsigned long)&_eshared) - 1); | 151 | PFN_ALIGN((unsigned long)&_eshared) - 1); |
@@ -166,13 +153,14 @@ void __init mem_init(void) | |||
166 | 153 | ||
167 | void free_initmem(void) | 154 | void free_initmem(void) |
168 | { | 155 | { |
169 | free_initmem_default(0); | 156 | free_initmem_default(POISON_FREE_INITMEM); |
170 | } | 157 | } |
171 | 158 | ||
172 | #ifdef CONFIG_BLK_DEV_INITRD | 159 | #ifdef CONFIG_BLK_DEV_INITRD |
173 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 160 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
174 | { | 161 | { |
175 | free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); | 162 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
163 | "initrd"); | ||
176 | } | 164 | } |
177 | #endif | 165 | #endif |
178 | 166 | ||
diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S index eebcbaa4e978..7274b5c4287e 100644 --- a/arch/score/kernel/vmlinux.lds.S +++ b/arch/score/kernel/vmlinux.lds.S | |||
@@ -49,6 +49,7 @@ SECTIONS | |||
49 | } | 49 | } |
50 | 50 | ||
51 | . = ALIGN(16); | 51 | . = ALIGN(16); |
52 | _sdata = .; /* Start of data section */ | ||
52 | RODATA | 53 | RODATA |
53 | 54 | ||
54 | EXCEPTION_TABLE(16) | 55 | EXCEPTION_TABLE(16) |
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 0940682ab38b..9fbce49ad3bd 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c | |||
@@ -75,40 +75,19 @@ void __init paging_init(void) | |||
75 | 75 | ||
76 | void __init mem_init(void) | 76 | void __init mem_init(void) |
77 | { | 77 | { |
78 | unsigned long codesize, reservedpages, datasize, initsize; | ||
79 | unsigned long tmp, ram = 0; | ||
80 | |||
81 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); | 78 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
82 | totalram_pages += free_all_bootmem(); | 79 | free_all_bootmem(); |
83 | setup_zero_page(); /* Setup zeroed pages. */ | 80 | setup_zero_page(); /* Setup zeroed pages. */ |
84 | reservedpages = 0; | 81 | |
85 | 82 | mem_init_print_info(NULL); | |
86 | for (tmp = 0; tmp < max_low_pfn; tmp++) | ||
87 | if (page_is_ram(tmp)) { | ||
88 | ram++; | ||
89 | if (PageReserved(pfn_to_page(tmp))) | ||
90 | reservedpages++; | ||
91 | } | ||
92 | |||
93 | num_physpages = ram; | ||
94 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
95 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
96 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
97 | |||
98 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | ||
99 | "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", | ||
100 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
101 | ram << (PAGE_SHIFT-10), codesize >> 10, | ||
102 | reservedpages << (PAGE_SHIFT-10), datasize >> 10, | ||
103 | initsize >> 10, | ||
104 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
105 | } | 83 | } |
106 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 84 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
107 | 85 | ||
108 | #ifdef CONFIG_BLK_DEV_INITRD | 86 | #ifdef CONFIG_BLK_DEV_INITRD |
109 | void free_initrd_mem(unsigned long start, unsigned long end) | 87 | void free_initrd_mem(unsigned long start, unsigned long end) |
110 | { | 88 | { |
111 | free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); | 89 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
90 | "initrd"); | ||
112 | } | 91 | } |
113 | #endif | 92 | #endif |
114 | 93 | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 20f9ead650d3..33890fd267cb 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -407,30 +407,16 @@ unsigned int mem_init_done = 0; | |||
407 | 407 | ||
408 | void __init mem_init(void) | 408 | void __init mem_init(void) |
409 | { | 409 | { |
410 | int codesize, datasize, initsize; | 410 | pg_data_t *pgdat; |
411 | int nid; | ||
412 | 411 | ||
413 | iommu_init(); | 412 | iommu_init(); |
414 | 413 | ||
415 | num_physpages = 0; | ||
416 | high_memory = NULL; | 414 | high_memory = NULL; |
415 | for_each_online_pgdat(pgdat) | ||
416 | high_memory = max_t(void *, high_memory, | ||
417 | __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); | ||
417 | 418 | ||
418 | for_each_online_node(nid) { | 419 | free_all_bootmem(); |
419 | pg_data_t *pgdat = NODE_DATA(nid); | ||
420 | void *node_high_memory; | ||
421 | |||
422 | num_physpages += pgdat->node_present_pages; | ||
423 | |||
424 | if (pgdat->node_spanned_pages) | ||
425 | totalram_pages += free_all_bootmem_node(pgdat); | ||
426 | |||
427 | |||
428 | node_high_memory = (void *)__va((pgdat->node_start_pfn + | ||
429 | pgdat->node_spanned_pages) << | ||
430 | PAGE_SHIFT); | ||
431 | if (node_high_memory > high_memory) | ||
432 | high_memory = node_high_memory; | ||
433 | } | ||
434 | 420 | ||
435 | /* Set this up early, so we can take care of the zero page */ | 421 | /* Set this up early, so we can take care of the zero page */ |
436 | cpu_cache_init(); | 422 | cpu_cache_init(); |
@@ -441,19 +427,8 @@ void __init mem_init(void) | |||
441 | 427 | ||
442 | vsyscall_init(); | 428 | vsyscall_init(); |
443 | 429 | ||
444 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 430 | mem_init_print_info(NULL); |
445 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 431 | pr_info("virtual kernel memory layout:\n" |
446 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
447 | |||
448 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | ||
449 | "%dk data, %dk init)\n", | ||
450 | nr_free_pages() << (PAGE_SHIFT-10), | ||
451 | num_physpages << (PAGE_SHIFT-10), | ||
452 | codesize >> 10, | ||
453 | datasize >> 10, | ||
454 | initsize >> 10); | ||
455 | |||
456 | printk(KERN_INFO "virtual kernel memory layout:\n" | ||
457 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 432 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
458 | #ifdef CONFIG_HIGHMEM | 433 | #ifdef CONFIG_HIGHMEM |
459 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 434 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
@@ -499,13 +474,13 @@ void __init mem_init(void) | |||
499 | 474 | ||
500 | void free_initmem(void) | 475 | void free_initmem(void) |
501 | { | 476 | { |
502 | free_initmem_default(0); | 477 | free_initmem_default(-1); |
503 | } | 478 | } |
504 | 479 | ||
505 | #ifdef CONFIG_BLK_DEV_INITRD | 480 | #ifdef CONFIG_BLK_DEV_INITRD |
506 | void free_initrd_mem(unsigned long start, unsigned long end) | 481 | void free_initrd_mem(unsigned long start, unsigned long end) |
507 | { | 482 | { |
508 | free_reserved_area(start, end, 0, "initrd"); | 483 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
509 | } | 484 | } |
510 | #endif | 485 | #endif |
511 | 486 | ||
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 6cfc1b09ec25..d7aa524b7283 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c | |||
@@ -254,15 +254,12 @@ void __init leon_smp_done(void) | |||
254 | /* Free unneeded trap tables */ | 254 | /* Free unneeded trap tables */ |
255 | if (!cpu_present(1)) { | 255 | if (!cpu_present(1)) { |
256 | free_reserved_page(virt_to_page(&trapbase_cpu1)); | 256 | free_reserved_page(virt_to_page(&trapbase_cpu1)); |
257 | num_physpages++; | ||
258 | } | 257 | } |
259 | if (!cpu_present(2)) { | 258 | if (!cpu_present(2)) { |
260 | free_reserved_page(virt_to_page(&trapbase_cpu2)); | 259 | free_reserved_page(virt_to_page(&trapbase_cpu2)); |
261 | num_physpages++; | ||
262 | } | 260 | } |
263 | if (!cpu_present(3)) { | 261 | if (!cpu_present(3)) { |
264 | free_reserved_page(virt_to_page(&trapbase_cpu3)); | 262 | free_reserved_page(virt_to_page(&trapbase_cpu3)); |
265 | num_physpages++; | ||
266 | } | 263 | } |
267 | /* Ok, they are spinning and ready to go. */ | 264 | /* Ok, they are spinning and ready to go. */ |
268 | smp_processors_ready = 1; | 265 | smp_processors_ready = 1; |
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index af472cf7c69a..db6987082805 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c | |||
@@ -288,10 +288,6 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) | |||
288 | 288 | ||
289 | void __init mem_init(void) | 289 | void __init mem_init(void) |
290 | { | 290 | { |
291 | int codepages = 0; | ||
292 | int datapages = 0; | ||
293 | int initpages = 0; | ||
294 | int reservedpages = 0; | ||
295 | int i; | 291 | int i; |
296 | 292 | ||
297 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | 293 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { |
@@ -323,15 +319,12 @@ void __init mem_init(void) | |||
323 | 319 | ||
324 | max_mapnr = last_valid_pfn - pfn_base; | 320 | max_mapnr = last_valid_pfn - pfn_base; |
325 | high_memory = __va(max_low_pfn << PAGE_SHIFT); | 321 | high_memory = __va(max_low_pfn << PAGE_SHIFT); |
326 | 322 | free_all_bootmem(); | |
327 | totalram_pages = free_all_bootmem(); | ||
328 | 323 | ||
329 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 324 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
330 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | 325 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; |
331 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | 326 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; |
332 | 327 | ||
333 | num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT; | ||
334 | |||
335 | if (end_pfn <= highstart_pfn) | 328 | if (end_pfn <= highstart_pfn) |
336 | continue; | 329 | continue; |
337 | 330 | ||
@@ -341,39 +334,19 @@ void __init mem_init(void) | |||
341 | map_high_region(start_pfn, end_pfn); | 334 | map_high_region(start_pfn, end_pfn); |
342 | } | 335 | } |
343 | 336 | ||
344 | codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); | 337 | mem_init_print_info(NULL); |
345 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | ||
346 | datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); | ||
347 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | ||
348 | initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); | ||
349 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | ||
350 | |||
351 | /* Ignore memory holes for the purpose of counting reserved pages */ | ||
352 | for (i=0; i < max_low_pfn; i++) | ||
353 | if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap) | ||
354 | && PageReserved(pfn_to_page(i))) | ||
355 | reservedpages++; | ||
356 | |||
357 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | ||
358 | nr_free_pages() << (PAGE_SHIFT-10), | ||
359 | num_physpages << (PAGE_SHIFT - 10), | ||
360 | codepages << (PAGE_SHIFT-10), | ||
361 | reservedpages << (PAGE_SHIFT - 10), | ||
362 | datapages << (PAGE_SHIFT-10), | ||
363 | initpages << (PAGE_SHIFT-10), | ||
364 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
365 | } | 338 | } |
366 | 339 | ||
367 | void free_initmem (void) | 340 | void free_initmem (void) |
368 | { | 341 | { |
369 | num_physpages += free_initmem_default(POISON_FREE_INITMEM); | 342 | free_initmem_default(POISON_FREE_INITMEM); |
370 | } | 343 | } |
371 | 344 | ||
372 | #ifdef CONFIG_BLK_DEV_INITRD | 345 | #ifdef CONFIG_BLK_DEV_INITRD |
373 | void free_initrd_mem(unsigned long start, unsigned long end) | 346 | void free_initrd_mem(unsigned long start, unsigned long end) |
374 | { | 347 | { |
375 | num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM, | 348 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
376 | "initrd"); | 349 | "initrd"); |
377 | } | 350 | } |
378 | #endif | 351 | #endif |
379 | 352 | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 04fd55a6e461..a9c42a7ffb6a 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -2045,7 +2045,6 @@ static void __init register_page_bootmem_info(void) | |||
2045 | } | 2045 | } |
2046 | void __init mem_init(void) | 2046 | void __init mem_init(void) |
2047 | { | 2047 | { |
2048 | unsigned long codepages, datapages, initpages; | ||
2049 | unsigned long addr, last; | 2048 | unsigned long addr, last; |
2050 | 2049 | ||
2051 | addr = PAGE_OFFSET + kern_base; | 2050 | addr = PAGE_OFFSET + kern_base; |
@@ -2061,12 +2060,7 @@ void __init mem_init(void) | |||
2061 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | 2060 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
2062 | 2061 | ||
2063 | register_page_bootmem_info(); | 2062 | register_page_bootmem_info(); |
2064 | totalram_pages = free_all_bootmem(); | 2063 | free_all_bootmem(); |
2065 | |||
2066 | /* We subtract one to account for the mem_map_zero page | ||
2067 | * allocated below. | ||
2068 | */ | ||
2069 | num_physpages = totalram_pages - 1; | ||
2070 | 2064 | ||
2071 | /* | 2065 | /* |
2072 | * Set up the zero page, mark it reserved, so that page count | 2066 | * Set up the zero page, mark it reserved, so that page count |
@@ -2079,19 +2073,7 @@ void __init mem_init(void) | |||
2079 | } | 2073 | } |
2080 | mark_page_reserved(mem_map_zero); | 2074 | mark_page_reserved(mem_map_zero); |
2081 | 2075 | ||
2082 | codepages = (((unsigned long) _etext) - ((unsigned long) _start)); | 2076 | mem_init_print_info(NULL); |
2083 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | ||
2084 | datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); | ||
2085 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | ||
2086 | initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); | ||
2087 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | ||
2088 | |||
2089 | printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", | ||
2090 | nr_free_pages() << (PAGE_SHIFT-10), | ||
2091 | codepages << (PAGE_SHIFT-10), | ||
2092 | datapages << (PAGE_SHIFT-10), | ||
2093 | initpages << (PAGE_SHIFT-10), | ||
2094 | PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); | ||
2095 | 2077 | ||
2096 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | 2078 | if (tlb_type == cheetah || tlb_type == cheetah_plus) |
2097 | cheetah_ecache_flush_init(); | 2079 | cheetah_ecache_flush_init(); |
@@ -2131,8 +2113,8 @@ void free_initmem(void) | |||
2131 | #ifdef CONFIG_BLK_DEV_INITRD | 2113 | #ifdef CONFIG_BLK_DEV_INITRD |
2132 | void free_initrd_mem(unsigned long start, unsigned long end) | 2114 | void free_initrd_mem(unsigned long start, unsigned long end) |
2133 | { | 2115 | { |
2134 | num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM, | 2116 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
2135 | "initrd"); | 2117 | "initrd"); |
2136 | } | 2118 | } |
2137 | #endif | 2119 | #endif |
2138 | 2120 | ||
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h index d062d463fca9..7d8a935a9238 100644 --- a/arch/tile/include/asm/sections.h +++ b/arch/tile/include/asm/sections.h | |||
@@ -34,7 +34,7 @@ extern char __sys_cmpxchg_grab_lock[]; | |||
34 | extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; | 34 | extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | /* Handle the discontiguity between _sdata and _stext. */ | 37 | /* Handle the discontiguity between _sdata and _text. */ |
38 | static inline int arch_is_kernel_data(unsigned long addr) | 38 | static inline int arch_is_kernel_data(unsigned long addr) |
39 | { | 39 | { |
40 | return addr >= (unsigned long)_sdata && | 40 | return addr >= (unsigned long)_sdata && |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 7a5aa1a7864e..68b542677f6a 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -307,8 +307,8 @@ static void __cpuinit store_permanent_mappings(void) | |||
307 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); | 307 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); |
308 | } | 308 | } |
309 | 309 | ||
310 | hv_store_mapping((HV_VirtAddr)_stext, | 310 | hv_store_mapping((HV_VirtAddr)_text, |
311 | (uint32_t)(_einittext - _stext), 0); | 311 | (uint32_t)(_einittext - _text), 0); |
312 | } | 312 | } |
313 | 313 | ||
314 | /* | 314 | /* |
@@ -329,6 +329,7 @@ static void __init setup_memory(void) | |||
329 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) | 329 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) |
330 | long lowmem_pages; | 330 | long lowmem_pages; |
331 | #endif | 331 | #endif |
332 | unsigned long physpages = 0; | ||
332 | 333 | ||
333 | /* We are using a char to hold the cpu_2_node[] mapping */ | 334 | /* We are using a char to hold the cpu_2_node[] mapping */ |
334 | BUILD_BUG_ON(MAX_NUMNODES > 127); | 335 | BUILD_BUG_ON(MAX_NUMNODES > 127); |
@@ -388,8 +389,8 @@ static void __init setup_memory(void) | |||
388 | continue; | 389 | continue; |
389 | } | 390 | } |
390 | } | 391 | } |
391 | if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { | 392 | if (physpages + PFN_DOWN(range.size) > maxmem_pfn) { |
392 | int max_size = maxmem_pfn - num_physpages; | 393 | int max_size = maxmem_pfn - physpages; |
393 | if (max_size > 0) { | 394 | if (max_size > 0) { |
394 | pr_err("Maxmem reduced node %d to %d pages\n", | 395 | pr_err("Maxmem reduced node %d to %d pages\n", |
395 | i, max_size); | 396 | i, max_size); |
@@ -446,7 +447,7 @@ static void __init setup_memory(void) | |||
446 | node_start_pfn[i] = start; | 447 | node_start_pfn[i] = start; |
447 | node_end_pfn[i] = end; | 448 | node_end_pfn[i] = end; |
448 | node_controller[i] = range.controller; | 449 | node_controller[i] = range.controller; |
449 | num_physpages += size; | 450 | physpages += size; |
450 | max_pfn = end; | 451 | max_pfn = end; |
451 | 452 | ||
452 | /* Mark node as online */ | 453 | /* Mark node as online */ |
@@ -465,7 +466,7 @@ static void __init setup_memory(void) | |||
465 | * we're willing to use at 8 million pages (32GB of 4KB pages). | 466 | * we're willing to use at 8 million pages (32GB of 4KB pages). |
466 | */ | 467 | */ |
467 | cap = 8 * 1024 * 1024; /* 8 million pages */ | 468 | cap = 8 * 1024 * 1024; /* 8 million pages */ |
468 | if (num_physpages > cap) { | 469 | if (physpages > cap) { |
469 | int num_nodes = num_online_nodes(); | 470 | int num_nodes = num_online_nodes(); |
470 | int cap_each = cap / num_nodes; | 471 | int cap_each = cap / num_nodes; |
471 | unsigned long dropped_pages = 0; | 472 | unsigned long dropped_pages = 0; |
@@ -476,10 +477,10 @@ static void __init setup_memory(void) | |||
476 | node_end_pfn[i] = node_start_pfn[i] + cap_each; | 477 | node_end_pfn[i] = node_start_pfn[i] + cap_each; |
477 | } | 478 | } |
478 | } | 479 | } |
479 | num_physpages -= dropped_pages; | 480 | physpages -= dropped_pages; |
480 | pr_warning("Only using %ldMB memory;" | 481 | pr_warning("Only using %ldMB memory;" |
481 | " ignoring %ldMB.\n", | 482 | " ignoring %ldMB.\n", |
482 | num_physpages >> (20 - PAGE_SHIFT), | 483 | physpages >> (20 - PAGE_SHIFT), |
483 | dropped_pages >> (20 - PAGE_SHIFT)); | 484 | dropped_pages >> (20 - PAGE_SHIFT)); |
484 | pr_warning("Consider using a larger page size.\n"); | 485 | pr_warning("Consider using a larger page size.\n"); |
485 | } | 486 | } |
@@ -497,7 +498,7 @@ static void __init setup_memory(void) | |||
497 | 498 | ||
498 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? | 499 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? |
499 | MAXMEM_PFN : mappable_physpages; | 500 | MAXMEM_PFN : mappable_physpages; |
500 | highmem_pages = (long) (num_physpages - lowmem_pages); | 501 | highmem_pages = (long) (physpages - lowmem_pages); |
501 | 502 | ||
502 | pr_notice("%ldMB HIGHMEM available.\n", | 503 | pr_notice("%ldMB HIGHMEM available.\n", |
503 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); | 504 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); |
@@ -514,7 +515,6 @@ static void __init setup_memory(void) | |||
514 | pr_warning("Use a HIGHMEM enabled kernel.\n"); | 515 | pr_warning("Use a HIGHMEM enabled kernel.\n"); |
515 | max_low_pfn = MAXMEM_PFN; | 516 | max_low_pfn = MAXMEM_PFN; |
516 | max_pfn = MAXMEM_PFN; | 517 | max_pfn = MAXMEM_PFN; |
517 | num_physpages = MAXMEM_PFN; | ||
518 | node_end_pfn[0] = MAXMEM_PFN; | 518 | node_end_pfn[0] = MAXMEM_PFN; |
519 | } else { | 519 | } else { |
520 | pr_notice("%ldMB memory available.\n", | 520 | pr_notice("%ldMB memory available.\n", |
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 631f10de12fe..a13ed902afbb 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S | |||
@@ -27,7 +27,6 @@ SECTIONS | |||
27 | .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ | 27 | .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ |
28 | { | 28 | { |
29 | _text = .; | 29 | _text = .; |
30 | _stext = .; | ||
31 | *(.intrpt1) | 30 | *(.intrpt1) |
32 | } :intrpt1 =0 | 31 | } :intrpt1 =0 |
33 | 32 | ||
@@ -36,6 +35,7 @@ SECTIONS | |||
36 | 35 | ||
37 | /* Now the real code */ | 36 | /* Now the real code */ |
38 | . = ALIGN(0x20000); | 37 | . = ALIGN(0x20000); |
38 | _stext = .; | ||
39 | .text : AT (ADDR(.text) - LOAD_OFFSET) { | 39 | .text : AT (ADDR(.text) - LOAD_OFFSET) { |
40 | HEAD_TEXT | 40 | HEAD_TEXT |
41 | SCHED_TEXT | 41 | SCHED_TEXT |
@@ -58,11 +58,13 @@ SECTIONS | |||
58 | #define LOAD_OFFSET PAGE_OFFSET | 58 | #define LOAD_OFFSET PAGE_OFFSET |
59 | 59 | ||
60 | . = ALIGN(PAGE_SIZE); | 60 | . = ALIGN(PAGE_SIZE); |
61 | __init_begin = .; | ||
61 | VMLINUX_SYMBOL(_sinitdata) = .; | 62 | VMLINUX_SYMBOL(_sinitdata) = .; |
62 | INIT_DATA_SECTION(16) :data =0 | 63 | INIT_DATA_SECTION(16) :data =0 |
63 | PERCPU_SECTION(L2_CACHE_BYTES) | 64 | PERCPU_SECTION(L2_CACHE_BYTES) |
64 | . = ALIGN(PAGE_SIZE); | 65 | . = ALIGN(PAGE_SIZE); |
65 | VMLINUX_SYMBOL(_einitdata) = .; | 66 | VMLINUX_SYMBOL(_einitdata) = .; |
67 | __init_end = .; | ||
66 | 68 | ||
67 | _sdata = .; /* Start of data section */ | 69 | _sdata = .; /* Start of data section */ |
68 | 70 | ||
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 2749515a0547..e182958c707d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -562,7 +562,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
562 | prot = ktext_set_nocache(prot); | 562 | prot = ktext_set_nocache(prot); |
563 | } | 563 | } |
564 | 564 | ||
565 | BUG_ON(address != (unsigned long)_stext); | 565 | BUG_ON(address != (unsigned long)_text); |
566 | pte = NULL; | 566 | pte = NULL; |
567 | for (; address < (unsigned long)_einittext; | 567 | for (; address < (unsigned long)_einittext; |
568 | pfn++, address += PAGE_SIZE) { | 568 | pfn++, address += PAGE_SIZE) { |
@@ -720,7 +720,7 @@ static void __init init_free_pfn_range(unsigned long start, unsigned long end) | |||
720 | } | 720 | } |
721 | init_page_count(page); | 721 | init_page_count(page); |
722 | __free_pages(page, order); | 722 | __free_pages(page, order); |
723 | totalram_pages += count; | 723 | adjust_managed_page_count(page, count); |
724 | 724 | ||
725 | page += count; | 725 | page += count; |
726 | pfn += count; | 726 | pfn += count; |
@@ -821,7 +821,6 @@ static void __init set_max_mapnr_init(void) | |||
821 | 821 | ||
822 | void __init mem_init(void) | 822 | void __init mem_init(void) |
823 | { | 823 | { |
824 | int codesize, datasize, initsize; | ||
825 | int i; | 824 | int i; |
826 | #ifndef __tilegx__ | 825 | #ifndef __tilegx__ |
827 | void *last; | 826 | void *last; |
@@ -846,26 +845,14 @@ void __init mem_init(void) | |||
846 | set_max_mapnr_init(); | 845 | set_max_mapnr_init(); |
847 | 846 | ||
848 | /* this will put all bootmem onto the freelists */ | 847 | /* this will put all bootmem onto the freelists */ |
849 | totalram_pages += free_all_bootmem(); | 848 | free_all_bootmem(); |
850 | 849 | ||
851 | #ifndef CONFIG_64BIT | 850 | #ifndef CONFIG_64BIT |
852 | /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ | 851 | /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ |
853 | set_non_bootmem_pages_init(); | 852 | set_non_bootmem_pages_init(); |
854 | #endif | 853 | #endif |
855 | 854 | ||
856 | codesize = (unsigned long)&_etext - (unsigned long)&_text; | 855 | mem_init_print_info(NULL); |
857 | datasize = (unsigned long)&_end - (unsigned long)&_sdata; | ||
858 | initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; | ||
859 | initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; | ||
860 | |||
861 | pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | ||
862 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
863 | num_physpages << (PAGE_SHIFT-10), | ||
864 | codesize >> 10, | ||
865 | datasize >> 10, | ||
866 | initsize >> 10, | ||
867 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | ||
868 | ); | ||
869 | 856 | ||
870 | /* | 857 | /* |
871 | * In debug mode, dump some interesting memory mappings. | 858 | * In debug mode, dump some interesting memory mappings. |
@@ -1024,16 +1011,13 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
1024 | pte_clear(&init_mm, addr, ptep); | 1011 | pte_clear(&init_mm, addr, ptep); |
1025 | continue; | 1012 | continue; |
1026 | } | 1013 | } |
1027 | __ClearPageReserved(page); | ||
1028 | init_page_count(page); | ||
1029 | if (pte_huge(*ptep)) | 1014 | if (pte_huge(*ptep)) |
1030 | BUG_ON(!kdata_huge); | 1015 | BUG_ON(!kdata_huge); |
1031 | else | 1016 | else |
1032 | set_pte_at(&init_mm, addr, ptep, | 1017 | set_pte_at(&init_mm, addr, ptep, |
1033 | pfn_pte(pfn, PAGE_KERNEL)); | 1018 | pfn_pte(pfn, PAGE_KERNEL)); |
1034 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 1019 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
1035 | free_page(addr); | 1020 | free_reserved_page(page); |
1036 | totalram_pages++; | ||
1037 | } | 1021 | } |
1038 | pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | 1022 | pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
1039 | } | 1023 | } |
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index 4938de5512d2..1dd5bd8a8c59 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S | |||
@@ -57,7 +57,6 @@ | |||
57 | *(.uml.initcall.init) | 57 | *(.uml.initcall.init) |
58 | __uml_initcall_end = .; | 58 | __uml_initcall_end = .; |
59 | } | 59 | } |
60 | __init_end = .; | ||
61 | 60 | ||
62 | SECURITY_INIT | 61 | SECURITY_INIT |
63 | 62 | ||
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index fb8fd6fb6563..adde088aeeff 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S | |||
@@ -14,8 +14,6 @@ SECTIONS | |||
14 | __binary_start = .; | 14 | __binary_start = .; |
15 | . = ALIGN(4096); /* Init code and data */ | 15 | . = ALIGN(4096); /* Init code and data */ |
16 | _text = .; | 16 | _text = .; |
17 | _stext = .; | ||
18 | __init_begin = .; | ||
19 | INIT_TEXT_SECTION(PAGE_SIZE) | 17 | INIT_TEXT_SECTION(PAGE_SIZE) |
20 | 18 | ||
21 | . = ALIGN(PAGE_SIZE); | 19 | . = ALIGN(PAGE_SIZE); |
@@ -67,6 +65,7 @@ SECTIONS | |||
67 | } =0x90909090 | 65 | } =0x90909090 |
68 | .plt : { *(.plt) } | 66 | .plt : { *(.plt) } |
69 | .text : { | 67 | .text : { |
68 | _stext = .; | ||
70 | TEXT_TEXT | 69 | TEXT_TEXT |
71 | SCHED_TEXT | 70 | SCHED_TEXT |
72 | LOCK_TEXT | 71 | LOCK_TEXT |
@@ -91,7 +90,9 @@ SECTIONS | |||
91 | 90 | ||
92 | #include <asm/common.lds.S> | 91 | #include <asm/common.lds.S> |
93 | 92 | ||
93 | __init_begin = .; | ||
94 | init.data : { INIT_DATA } | 94 | init.data : { INIT_DATA } |
95 | __init_end = .; | ||
95 | 96 | ||
96 | /* Ensure the __preinit_array_start label is properly aligned. We | 97 | /* Ensure the __preinit_array_start label is properly aligned. We |
97 | could instead move the label definition inside the section, but | 98 | could instead move the label definition inside the section, but |
@@ -155,6 +156,7 @@ SECTIONS | |||
155 | . = ALIGN(32 / 8); | 156 | . = ALIGN(32 / 8); |
156 | . = ALIGN(32 / 8); | 157 | . = ALIGN(32 / 8); |
157 | } | 158 | } |
159 | __bss_stop = .; | ||
158 | _end = .; | 160 | _end = .; |
159 | PROVIDE (end = .); | 161 | PROVIDE (end = .); |
160 | 162 | ||
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9df292b270a8..7ddb64baf327 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c | |||
@@ -65,15 +65,13 @@ void __init mem_init(void) | |||
65 | uml_reserved = brk_end; | 65 | uml_reserved = brk_end; |
66 | 66 | ||
67 | /* this will put all low memory onto the freelists */ | 67 | /* this will put all low memory onto the freelists */ |
68 | totalram_pages = free_all_bootmem(); | 68 | free_all_bootmem(); |
69 | max_low_pfn = totalram_pages; | 69 | max_low_pfn = totalram_pages; |
70 | #ifdef CONFIG_HIGHMEM | 70 | #ifdef CONFIG_HIGHMEM |
71 | setup_highmem(end_iomem, highmem); | 71 | setup_highmem(end_iomem, highmem); |
72 | #endif | 72 | #endif |
73 | num_physpages = totalram_pages; | ||
74 | max_pfn = totalram_pages; | 73 | max_pfn = totalram_pages; |
75 | printk(KERN_INFO "Memory: %luk available\n", | 74 | mem_init_print_info(NULL); |
76 | nr_free_pages() << (PAGE_SHIFT-10)); | ||
77 | kmalloc_ok = 1; | 75 | kmalloc_ok = 1; |
78 | } | 76 | } |
79 | 77 | ||
@@ -244,7 +242,7 @@ void free_initmem(void) | |||
244 | #ifdef CONFIG_BLK_DEV_INITRD | 242 | #ifdef CONFIG_BLK_DEV_INITRD |
245 | void free_initrd_mem(unsigned long start, unsigned long end) | 243 | void free_initrd_mem(unsigned long start, unsigned long end) |
246 | { | 244 | { |
247 | free_reserved_area(start, end, 0, "initrd"); | 245 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
248 | } | 246 | } |
249 | #endif | 247 | #endif |
250 | 248 | ||
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index ff65fb4f1a95..6899195602b7 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S | |||
@@ -20,13 +20,12 @@ SECTIONS | |||
20 | . = START + SIZEOF_HEADERS; | 20 | . = START + SIZEOF_HEADERS; |
21 | 21 | ||
22 | _text = .; | 22 | _text = .; |
23 | _stext = .; | ||
24 | __init_begin = .; | ||
25 | INIT_TEXT_SECTION(0) | 23 | INIT_TEXT_SECTION(0) |
26 | . = ALIGN(PAGE_SIZE); | 24 | . = ALIGN(PAGE_SIZE); |
27 | 25 | ||
28 | .text : | 26 | .text : |
29 | { | 27 | { |
28 | _stext = .; | ||
30 | TEXT_TEXT | 29 | TEXT_TEXT |
31 | SCHED_TEXT | 30 | SCHED_TEXT |
32 | LOCK_TEXT | 31 | LOCK_TEXT |
@@ -62,7 +61,10 @@ SECTIONS | |||
62 | 61 | ||
63 | #include <asm/common.lds.S> | 62 | #include <asm/common.lds.S> |
64 | 63 | ||
64 | __init_begin = .; | ||
65 | init.data : { INIT_DATA } | 65 | init.data : { INIT_DATA } |
66 | __init_end = .; | ||
67 | |||
66 | .data : | 68 | .data : |
67 | { | 69 | { |
68 | INIT_TASK_DATA(KERNEL_STACK_SIZE) | 70 | INIT_TASK_DATA(KERNEL_STACK_SIZE) |
@@ -97,6 +99,7 @@ SECTIONS | |||
97 | PROVIDE(_bss_start = .); | 99 | PROVIDE(_bss_start = .); |
98 | SBSS(0) | 100 | SBSS(0) |
99 | BSS(0) | 101 | BSS(0) |
102 | __bss_stop = .; | ||
100 | _end = .; | 103 | _end = .; |
101 | PROVIDE (end = .); | 104 | PROVIDE (end = .); |
102 | 105 | ||
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h index 5eddb997defe..debafc40200a 100644 --- a/arch/unicore32/include/asm/memory.h +++ b/arch/unicore32/include/asm/memory.h | |||
@@ -98,12 +98,6 @@ | |||
98 | /* | 98 | /* |
99 | * Conversion between a struct page and a physical address. | 99 | * Conversion between a struct page and a physical address. |
100 | * | 100 | * |
101 | * Note: when converting an unknown physical address to a | ||
102 | * struct page, the resulting pointer must be validated | ||
103 | * using VALID_PAGE(). It must return an invalid struct page | ||
104 | * for any physical address not corresponding to a system | ||
105 | * RAM address. | ||
106 | * | ||
107 | * page_to_pfn(page) convert a struct page * to a PFN number | 101 | * page_to_pfn(page) convert a struct page * to a PFN number |
108 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * | 102 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * |
109 | * | 103 | * |
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index 63df12d71ce3..ae6bc036db92 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c | |||
@@ -383,59 +383,14 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
383 | */ | 383 | */ |
384 | void __init mem_init(void) | 384 | void __init mem_init(void) |
385 | { | 385 | { |
386 | unsigned long reserved_pages, free_pages; | ||
387 | struct memblock_region *reg; | ||
388 | int i; | ||
389 | |||
390 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; | 386 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; |
391 | 387 | ||
392 | free_unused_memmap(&meminfo); | 388 | free_unused_memmap(&meminfo); |
393 | 389 | ||
394 | /* this will put all unused low memory onto the freelists */ | 390 | /* this will put all unused low memory onto the freelists */ |
395 | totalram_pages += free_all_bootmem(); | 391 | free_all_bootmem(); |
396 | |||
397 | reserved_pages = free_pages = 0; | ||
398 | |||
399 | for_each_bank(i, &meminfo) { | ||
400 | struct membank *bank = &meminfo.bank[i]; | ||
401 | unsigned int pfn1, pfn2; | ||
402 | struct page *page, *end; | ||
403 | |||
404 | pfn1 = bank_pfn_start(bank); | ||
405 | pfn2 = bank_pfn_end(bank); | ||
406 | |||
407 | page = pfn_to_page(pfn1); | ||
408 | end = pfn_to_page(pfn2 - 1) + 1; | ||
409 | |||
410 | do { | ||
411 | if (PageReserved(page)) | ||
412 | reserved_pages++; | ||
413 | else if (!page_count(page)) | ||
414 | free_pages++; | ||
415 | page++; | ||
416 | } while (page < end); | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * Since our memory may not be contiguous, calculate the | ||
421 | * real number of pages we have in this system | ||
422 | */ | ||
423 | printk(KERN_INFO "Memory:"); | ||
424 | num_physpages = 0; | ||
425 | for_each_memblock(memory, reg) { | ||
426 | unsigned long pages = memblock_region_memory_end_pfn(reg) - | ||
427 | memblock_region_memory_base_pfn(reg); | ||
428 | num_physpages += pages; | ||
429 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
430 | } | ||
431 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
432 | |||
433 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", | ||
434 | nr_free_pages() << (PAGE_SHIFT-10), | ||
435 | free_pages << (PAGE_SHIFT-10), | ||
436 | reserved_pages << (PAGE_SHIFT-10), | ||
437 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
438 | 392 | ||
393 | mem_init_print_info(NULL); | ||
439 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" | 394 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" |
440 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" | 395 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" |
441 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | 396 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
@@ -464,7 +419,7 @@ void __init mem_init(void) | |||
464 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | 419 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); |
465 | BUG_ON(TASK_SIZE > MODULES_VADDR); | 420 | BUG_ON(TASK_SIZE > MODULES_VADDR); |
466 | 421 | ||
467 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 422 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
468 | /* | 423 | /* |
469 | * On a machine this small we won't get | 424 | * On a machine this small we won't get |
470 | * anywhere without overcommit, so turn | 425 | * anywhere without overcommit, so turn |
@@ -476,7 +431,7 @@ void __init mem_init(void) | |||
476 | 431 | ||
477 | void free_initmem(void) | 432 | void free_initmem(void) |
478 | { | 433 | { |
479 | free_initmem_default(0); | 434 | free_initmem_default(-1); |
480 | } | 435 | } |
481 | 436 | ||
482 | #ifdef CONFIG_BLK_DEV_INITRD | 437 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -486,7 +441,7 @@ static int keep_initrd; | |||
486 | void free_initrd_mem(unsigned long start, unsigned long end) | 441 | void free_initrd_mem(unsigned long start, unsigned long end) |
487 | { | 442 | { |
488 | if (!keep_initrd) | 443 | if (!keep_initrd) |
489 | free_reserved_area(start, end, 0, "initrd"); | 444 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
490 | } | 445 | } |
491 | 446 | ||
492 | static int __init keepinitrd_setup(char *__unused) | 447 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b094816a7e0f..2775023a0744 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -102,6 +102,7 @@ config X86 | |||
102 | select HAVE_ARCH_SECCOMP_FILTER | 102 | select HAVE_ARCH_SECCOMP_FILTER |
103 | select BUILDTIME_EXTABLE_SORT | 103 | select BUILDTIME_EXTABLE_SORT |
104 | select GENERIC_CMOS_UPDATE | 104 | select GENERIC_CMOS_UPDATE |
105 | select HAVE_ARCH_SOFT_DIRTY | ||
105 | select CLOCKSOURCE_WATCHDOG | 106 | select CLOCKSOURCE_WATCHDOG |
106 | select GENERIC_CLOCKEVENTS | 107 | select GENERIC_CLOCKEVENTS |
107 | select ARCH_CLOCKSOURCE_DATA if X86_64 | 108 | select ARCH_CLOCKSOURCE_DATA if X86_64 |
@@ -2258,11 +2259,11 @@ source "drivers/pcmcia/Kconfig" | |||
2258 | source "drivers/pci/hotplug/Kconfig" | 2259 | source "drivers/pci/hotplug/Kconfig" |
2259 | 2260 | ||
2260 | config RAPIDIO | 2261 | config RAPIDIO |
2261 | bool "RapidIO support" | 2262 | tristate "RapidIO support" |
2262 | depends on PCI | 2263 | depends on PCI |
2263 | default n | 2264 | default n |
2264 | help | 2265 | help |
2265 | If you say Y here, the kernel will include drivers and | 2266 | If enabled this option will include drivers and the core |
2266 | infrastructure code to support RapidIO interconnect devices. | 2267 | infrastructure code to support RapidIO interconnect devices. |
2267 | 2268 | ||
2268 | source "drivers/rapidio/Kconfig" | 2269 | source "drivers/rapidio/Kconfig" |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5b0818bc8963..7dc305a46058 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -207,7 +207,7 @@ static inline pte_t pte_mkexec(pte_t pte) | |||
207 | 207 | ||
208 | static inline pte_t pte_mkdirty(pte_t pte) | 208 | static inline pte_t pte_mkdirty(pte_t pte) |
209 | { | 209 | { |
210 | return pte_set_flags(pte, _PAGE_DIRTY); | 210 | return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
211 | } | 211 | } |
212 | 212 | ||
213 | static inline pte_t pte_mkyoung(pte_t pte) | 213 | static inline pte_t pte_mkyoung(pte_t pte) |
@@ -271,7 +271,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd) | |||
271 | 271 | ||
272 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | 272 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
273 | { | 273 | { |
274 | return pmd_set_flags(pmd, _PAGE_DIRTY); | 274 | return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | 277 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
@@ -294,6 +294,26 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd) | |||
294 | return pmd_clear_flags(pmd, _PAGE_PRESENT); | 294 | return pmd_clear_flags(pmd, _PAGE_PRESENT); |
295 | } | 295 | } |
296 | 296 | ||
297 | static inline int pte_soft_dirty(pte_t pte) | ||
298 | { | ||
299 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | ||
300 | } | ||
301 | |||
302 | static inline int pmd_soft_dirty(pmd_t pmd) | ||
303 | { | ||
304 | return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; | ||
305 | } | ||
306 | |||
307 | static inline pte_t pte_mksoft_dirty(pte_t pte) | ||
308 | { | ||
309 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | ||
310 | } | ||
311 | |||
312 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | ||
313 | { | ||
314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | ||
315 | } | ||
316 | |||
297 | /* | 317 | /* |
298 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | 318 | * Mask out unsupported bits in a present pgprot. Non-present pgprots |
299 | * can use those bits for other purposes, so leave them be. | 319 | * can use those bits for other purposes, so leave them be. |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index e6423002c10b..c98ac63aae48 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -55,6 +55,18 @@ | |||
55 | #define _PAGE_HIDDEN (_AT(pteval_t, 0)) | 55 | #define _PAGE_HIDDEN (_AT(pteval_t, 0)) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* | ||
59 | * The same hidden bit is used by kmemcheck, but since kmemcheck | ||
60 | * works on kernel pages while soft-dirty engine on user space, | ||
61 | * they do not conflict with each other. | ||
62 | */ | ||
63 | |||
64 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
65 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) | ||
66 | #else | ||
67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) | ||
68 | #endif | ||
69 | |||
58 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 70 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
59 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 71 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
60 | #else | 72 | #else |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index a1df6e84691f..27811190cbd7 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -89,7 +89,6 @@ struct thread_info { | |||
89 | #define TIF_FORK 18 /* ret_from_fork */ | 89 | #define TIF_FORK 18 /* ret_from_fork */ |
90 | #define TIF_NOHZ 19 /* in adaptive nohz mode */ | 90 | #define TIF_NOHZ 19 /* in adaptive nohz mode */ |
91 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ | 91 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ |
92 | #define TIF_DEBUG 21 /* uses debug registers */ | ||
93 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | 92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | 93 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ |
95 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ | 94 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ |
@@ -113,7 +112,6 @@ struct thread_info { | |||
113 | #define _TIF_IA32 (1 << TIF_IA32) | 112 | #define _TIF_IA32 (1 << TIF_IA32) |
114 | #define _TIF_FORK (1 << TIF_FORK) | 113 | #define _TIF_FORK (1 << TIF_FORK) |
115 | #define _TIF_NOHZ (1 << TIF_NOHZ) | 114 | #define _TIF_NOHZ (1 << TIF_NOHZ) |
116 | #define _TIF_DEBUG (1 << TIF_DEBUG) | ||
117 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | 115 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
118 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | 116 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
119 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | 117 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
@@ -154,7 +152,7 @@ struct thread_info { | |||
154 | (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) | 152 | (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) |
155 | 153 | ||
156 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) | 154 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
157 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | 155 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
158 | 156 | ||
159 | #define PREEMPT_ACTIVE 0x10000000 | 157 | #define PREEMPT_ACTIVE 0x10000000 |
160 | 158 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5013a48d1aff..c587a8757227 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -90,7 +90,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
90 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | 90 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) |
91 | { | 91 | { |
92 | u32 l, h; | 92 | u32 l, h; |
93 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | 93 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); |
94 | 94 | ||
95 | if (c->x86_model < 6) { | 95 | if (c->x86_model < 6) { |
96 | /* Based on AMD doc 20734R - June 2000 */ | 96 | /* Based on AMD doc 20734R - June 2000 */ |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 56f7fcfe7fa2..e68709da8251 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1040,8 +1040,6 @@ void __init setup_arch(char **cmdline_p) | |||
1040 | /* max_low_pfn get updated here */ | 1040 | /* max_low_pfn get updated here */ |
1041 | find_low_pfn_range(); | 1041 | find_low_pfn_range(); |
1042 | #else | 1042 | #else |
1043 | num_physpages = max_pfn; | ||
1044 | |||
1045 | check_x2apic(); | 1043 | check_x2apic(); |
1046 | 1044 | ||
1047 | /* How many end-of-memory variables you have, grandma! */ | 1045 | /* How many end-of-memory variables you have, grandma! */ |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 252b8f5489ba..4500142bc4aa 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/highmem.h> | 1 | #include <linux/highmem.h> |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/swap.h> /* for totalram_pages */ | 3 | #include <linux/swap.h> /* for totalram_pages */ |
4 | #include <linux/bootmem.h> | ||
4 | 5 | ||
5 | void *kmap(struct page *page) | 6 | void *kmap(struct page *page) |
6 | { | 7 | { |
@@ -121,6 +122,11 @@ void __init set_highmem_pages_init(void) | |||
121 | struct zone *zone; | 122 | struct zone *zone; |
122 | int nid; | 123 | int nid; |
123 | 124 | ||
125 | /* | ||
126 | * Explicitly reset zone->managed_pages because set_highmem_pages_init() | ||
127 | * is invoked before free_all_bootmem() | ||
128 | */ | ||
129 | reset_all_zones_managed_pages(); | ||
124 | for_each_zone(zone) { | 130 | for_each_zone(zone) { |
125 | unsigned long zone_start_pfn, zone_end_pfn; | 131 | unsigned long zone_start_pfn, zone_end_pfn; |
126 | 132 | ||
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1f34e9219775..2ec29ac78ae6 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -494,7 +494,6 @@ int devmem_is_allowed(unsigned long pagenr) | |||
494 | 494 | ||
495 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 495 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
496 | { | 496 | { |
497 | unsigned long addr; | ||
498 | unsigned long begin_aligned, end_aligned; | 497 | unsigned long begin_aligned, end_aligned; |
499 | 498 | ||
500 | /* Make sure boundaries are page aligned */ | 499 | /* Make sure boundaries are page aligned */ |
@@ -509,8 +508,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
509 | if (begin >= end) | 508 | if (begin >= end) |
510 | return; | 509 | return; |
511 | 510 | ||
512 | addr = begin; | ||
513 | |||
514 | /* | 511 | /* |
515 | * If debugging page accesses then do not free this memory but | 512 | * If debugging page accesses then do not free this memory but |
516 | * mark them not present - any buggy init-section access will | 513 | * mark them not present - any buggy init-section access will |
@@ -529,18 +526,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
529 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); | 526 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); |
530 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | 527 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); |
531 | 528 | ||
532 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 529 | free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what); |
533 | |||
534 | for (; addr < end; addr += PAGE_SIZE) { | ||
535 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
536 | free_reserved_page(virt_to_page(addr)); | ||
537 | } | ||
538 | #endif | 530 | #endif |
539 | } | 531 | } |
540 | 532 | ||
541 | void free_initmem(void) | 533 | void free_initmem(void) |
542 | { | 534 | { |
543 | free_init_pages("unused kernel memory", | 535 | free_init_pages("unused kernel", |
544 | (unsigned long)(&__init_begin), | 536 | (unsigned long)(&__init_begin), |
545 | (unsigned long)(&__init_end)); | 537 | (unsigned long)(&__init_end)); |
546 | } | 538 | } |
@@ -566,7 +558,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
566 | * - relocate_initrd() | 558 | * - relocate_initrd() |
567 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed | 559 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed |
568 | */ | 560 | */ |
569 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); | 561 | free_init_pages("initrd", start, PAGE_ALIGN(end)); |
570 | } | 562 | } |
571 | #endif | 563 | #endif |
572 | 564 | ||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 3ac7e319918d..4287f1ffba7e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -660,10 +660,8 @@ void __init initmem_init(void) | |||
660 | highstart_pfn = max_low_pfn; | 660 | highstart_pfn = max_low_pfn; |
661 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 661 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
662 | pages_to_mb(highend_pfn - highstart_pfn)); | 662 | pages_to_mb(highend_pfn - highstart_pfn)); |
663 | num_physpages = highend_pfn; | ||
664 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | 663 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
665 | #else | 664 | #else |
666 | num_physpages = max_low_pfn; | ||
667 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 665 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
668 | #endif | 666 | #endif |
669 | 667 | ||
@@ -671,7 +669,7 @@ void __init initmem_init(void) | |||
671 | sparse_memory_present_with_active_regions(0); | 669 | sparse_memory_present_with_active_regions(0); |
672 | 670 | ||
673 | #ifdef CONFIG_FLATMEM | 671 | #ifdef CONFIG_FLATMEM |
674 | max_mapnr = num_physpages; | 672 | max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; |
675 | #endif | 673 | #endif |
676 | __vmalloc_start_set = true; | 674 | __vmalloc_start_set = true; |
677 | 675 | ||
@@ -739,9 +737,6 @@ static void __init test_wp_bit(void) | |||
739 | 737 | ||
740 | void __init mem_init(void) | 738 | void __init mem_init(void) |
741 | { | 739 | { |
742 | int codesize, reservedpages, datasize, initsize; | ||
743 | int tmp; | ||
744 | |||
745 | pci_iommu_alloc(); | 740 | pci_iommu_alloc(); |
746 | 741 | ||
747 | #ifdef CONFIG_FLATMEM | 742 | #ifdef CONFIG_FLATMEM |
@@ -759,32 +754,11 @@ void __init mem_init(void) | |||
759 | set_highmem_pages_init(); | 754 | set_highmem_pages_init(); |
760 | 755 | ||
761 | /* this will put all low memory onto the freelists */ | 756 | /* this will put all low memory onto the freelists */ |
762 | totalram_pages += free_all_bootmem(); | 757 | free_all_bootmem(); |
763 | |||
764 | reservedpages = 0; | ||
765 | for (tmp = 0; tmp < max_low_pfn; tmp++) | ||
766 | /* | ||
767 | * Only count reserved RAM pages: | ||
768 | */ | ||
769 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | ||
770 | reservedpages++; | ||
771 | 758 | ||
772 | after_bootmem = 1; | 759 | after_bootmem = 1; |
773 | 760 | ||
774 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 761 | mem_init_print_info(NULL); |
775 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
776 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
777 | |||
778 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | ||
779 | "%dk reserved, %dk data, %dk init, %ldk highmem)\n", | ||
780 | nr_free_pages() << (PAGE_SHIFT-10), | ||
781 | num_physpages << (PAGE_SHIFT-10), | ||
782 | codesize >> 10, | ||
783 | reservedpages << (PAGE_SHIFT-10), | ||
784 | datasize >> 10, | ||
785 | initsize >> 10, | ||
786 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
787 | |||
788 | printk(KERN_INFO "virtual kernel memory layout:\n" | 762 | printk(KERN_INFO "virtual kernel memory layout:\n" |
789 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 763 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
790 | #ifdef CONFIG_HIGHMEM | 764 | #ifdef CONFIG_HIGHMEM |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b3940b6b4d7e..104d56a9245f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -712,36 +712,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory); | |||
712 | 712 | ||
713 | static void __meminit free_pagetable(struct page *page, int order) | 713 | static void __meminit free_pagetable(struct page *page, int order) |
714 | { | 714 | { |
715 | struct zone *zone; | ||
716 | bool bootmem = false; | ||
717 | unsigned long magic; | 715 | unsigned long magic; |
718 | unsigned int nr_pages = 1 << order; | 716 | unsigned int nr_pages = 1 << order; |
719 | 717 | ||
720 | /* bootmem page has reserved flag */ | 718 | /* bootmem page has reserved flag */ |
721 | if (PageReserved(page)) { | 719 | if (PageReserved(page)) { |
722 | __ClearPageReserved(page); | 720 | __ClearPageReserved(page); |
723 | bootmem = true; | ||
724 | 721 | ||
725 | magic = (unsigned long)page->lru.next; | 722 | magic = (unsigned long)page->lru.next; |
726 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { | 723 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
727 | while (nr_pages--) | 724 | while (nr_pages--) |
728 | put_page_bootmem(page++); | 725 | put_page_bootmem(page++); |
729 | } else | 726 | } else |
730 | __free_pages_bootmem(page, order); | 727 | while (nr_pages--) |
728 | free_reserved_page(page++); | ||
731 | } else | 729 | } else |
732 | free_pages((unsigned long)page_address(page), order); | 730 | free_pages((unsigned long)page_address(page), order); |
733 | |||
734 | /* | ||
735 | * SECTION_INFO pages and MIX_SECTION_INFO pages | ||
736 | * are all allocated by bootmem. | ||
737 | */ | ||
738 | if (bootmem) { | ||
739 | zone = page_zone(page); | ||
740 | zone_span_writelock(zone); | ||
741 | zone->present_pages += nr_pages; | ||
742 | zone_span_writeunlock(zone); | ||
743 | totalram_pages += nr_pages; | ||
744 | } | ||
745 | } | 731 | } |
746 | 732 | ||
747 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | 733 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) |
@@ -1058,9 +1044,6 @@ static void __init register_page_bootmem_info(void) | |||
1058 | 1044 | ||
1059 | void __init mem_init(void) | 1045 | void __init mem_init(void) |
1060 | { | 1046 | { |
1061 | long codesize, reservedpages, datasize, initsize; | ||
1062 | unsigned long absent_pages; | ||
1063 | |||
1064 | pci_iommu_alloc(); | 1047 | pci_iommu_alloc(); |
1065 | 1048 | ||
1066 | /* clear_bss() already clear the empty_zero_page */ | 1049 | /* clear_bss() already clear the empty_zero_page */ |
@@ -1068,29 +1051,14 @@ void __init mem_init(void) | |||
1068 | register_page_bootmem_info(); | 1051 | register_page_bootmem_info(); |
1069 | 1052 | ||
1070 | /* this will put all memory onto the freelists */ | 1053 | /* this will put all memory onto the freelists */ |
1071 | totalram_pages = free_all_bootmem(); | 1054 | free_all_bootmem(); |
1072 | |||
1073 | absent_pages = absent_pages_in_range(0, max_pfn); | ||
1074 | reservedpages = max_pfn - totalram_pages - absent_pages; | ||
1075 | after_bootmem = 1; | 1055 | after_bootmem = 1; |
1076 | 1056 | ||
1077 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
1078 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
1079 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
1080 | |||
1081 | /* Register memory areas for /proc/kcore */ | 1057 | /* Register memory areas for /proc/kcore */ |
1082 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, | 1058 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
1083 | VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); | 1059 | VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); |
1084 | 1060 | ||
1085 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | 1061 | mem_init_print_info(NULL); |
1086 | "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", | ||
1087 | nr_free_pages() << (PAGE_SHIFT-10), | ||
1088 | max_pfn << (PAGE_SHIFT-10), | ||
1089 | codesize >> 10, | ||
1090 | absent_pages << (PAGE_SHIFT-10), | ||
1091 | reservedpages << (PAGE_SHIFT-10), | ||
1092 | datasize >> 10, | ||
1093 | initsize >> 10); | ||
1094 | } | 1062 | } |
1095 | 1063 | ||
1096 | #ifdef CONFIG_DEBUG_RODATA | 1064 | #ifdef CONFIG_DEBUG_RODATA |
@@ -1166,11 +1134,10 @@ void mark_rodata_ro(void) | |||
1166 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); | 1134 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
1167 | #endif | 1135 | #endif |
1168 | 1136 | ||
1169 | free_init_pages("unused kernel memory", | 1137 | free_init_pages("unused kernel", |
1170 | (unsigned long) __va(__pa_symbol(text_end)), | 1138 | (unsigned long) __va(__pa_symbol(text_end)), |
1171 | (unsigned long) __va(__pa_symbol(rodata_start))); | 1139 | (unsigned long) __va(__pa_symbol(rodata_start))); |
1172 | 1140 | free_init_pages("unused kernel", | |
1173 | free_init_pages("unused kernel memory", | ||
1174 | (unsigned long) __va(__pa_symbol(rodata_end)), | 1141 | (unsigned long) __va(__pa_symbol(rodata_end)), |
1175 | (unsigned long) __va(__pa_symbol(_sdata))); | 1142 | (unsigned long) __va(__pa_symbol(_sdata))); |
1176 | } | 1143 | } |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 73a6d7395bd3..0342d27ca798 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -83,10 +83,8 @@ void __init initmem_init(void) | |||
83 | highstart_pfn = max_low_pfn; | 83 | highstart_pfn = max_low_pfn; |
84 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 84 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
85 | pages_to_mb(highend_pfn - highstart_pfn)); | 85 | pages_to_mb(highend_pfn - highstart_pfn)); |
86 | num_physpages = highend_pfn; | ||
87 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | 86 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
88 | #else | 87 | #else |
89 | num_physpages = max_low_pfn; | ||
90 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 88 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
91 | #endif | 89 | #endif |
92 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | 90 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index bba125b4bb06..479d7537a32a 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -173,39 +173,16 @@ void __init zones_init(void) | |||
173 | 173 | ||
174 | void __init mem_init(void) | 174 | void __init mem_init(void) |
175 | { | 175 | { |
176 | unsigned long codesize, reservedpages, datasize, initsize; | 176 | max_mapnr = max_low_pfn - ARCH_PFN_OFFSET; |
177 | unsigned long highmemsize, tmp, ram; | ||
178 | |||
179 | max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET; | ||
180 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); | 177 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
181 | highmemsize = 0; | ||
182 | 178 | ||
183 | #ifdef CONFIG_HIGHMEM | 179 | #ifdef CONFIG_HIGHMEM |
184 | #error HIGHGMEM not implemented in init.c | 180 | #error HIGHGMEM not implemented in init.c |
185 | #endif | 181 | #endif |
186 | 182 | ||
187 | totalram_pages += free_all_bootmem(); | 183 | free_all_bootmem(); |
188 | |||
189 | reservedpages = ram = 0; | ||
190 | for (tmp = 0; tmp < max_mapnr; tmp++) { | ||
191 | ram++; | ||
192 | if (PageReserved(mem_map+tmp)) | ||
193 | reservedpages++; | ||
194 | } | ||
195 | 184 | ||
196 | codesize = (unsigned long) _etext - (unsigned long) _stext; | 185 | mem_init_print_info(NULL); |
197 | datasize = (unsigned long) _edata - (unsigned long) _sdata; | ||
198 | initsize = (unsigned long) __init_end - (unsigned long) __init_begin; | ||
199 | |||
200 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " | ||
201 | "%ldk data, %ldk init %ldk highmem)\n", | ||
202 | nr_free_pages() << (PAGE_SHIFT-10), | ||
203 | ram << (PAGE_SHIFT-10), | ||
204 | codesize >> 10, | ||
205 | reservedpages << (PAGE_SHIFT-10), | ||
206 | datasize >> 10, | ||
207 | initsize >> 10, | ||
208 | highmemsize >> 10); | ||
209 | } | 186 | } |
210 | 187 | ||
211 | #ifdef CONFIG_BLK_DEV_INITRD | 188 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -214,11 +191,11 @@ extern int initrd_is_mapped; | |||
214 | void free_initrd_mem(unsigned long start, unsigned long end) | 191 | void free_initrd_mem(unsigned long start, unsigned long end) |
215 | { | 192 | { |
216 | if (initrd_is_mapped) | 193 | if (initrd_is_mapped) |
217 | free_reserved_area(start, end, 0, "initrd"); | 194 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
218 | } | 195 | } |
219 | #endif | 196 | #endif |
220 | 197 | ||
221 | void free_initmem(void) | 198 | void free_initmem(void) |
222 | { | 199 | { |
223 | free_initmem_default(0); | 200 | free_initmem_default(-1); |
224 | } | 201 | } |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 7c668c8a6f95..7e5d474dc6ba 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -59,6 +59,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev, | |||
59 | if (!disk->fops->getgeo) | 59 | if (!disk->fops->getgeo) |
60 | return -ENOTTY; | 60 | return -ENOTTY; |
61 | 61 | ||
62 | memset(&geo, 0, sizeof(geo)); | ||
62 | /* | 63 | /* |
63 | * We need to set the startsect first, the driver may | 64 | * We need to set the startsect first, the driver may |
64 | * want to override it. | 65 | * want to override it. |
diff --git a/block/genhd.c b/block/genhd.c index e9094b375c05..dadf42b454a3 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -512,7 +512,7 @@ static void register_disk(struct gendisk *disk) | |||
512 | 512 | ||
513 | ddev->parent = disk->driverfs_dev; | 513 | ddev->parent = disk->driverfs_dev; |
514 | 514 | ||
515 | dev_set_name(ddev, disk->disk_name); | 515 | dev_set_name(ddev, "%s", disk->disk_name); |
516 | 516 | ||
517 | /* delay uevents, until we scanned partition table */ | 517 | /* delay uevents, until we scanned partition table */ |
518 | dev_set_uevent_suppress(ddev, 1); | 518 | dev_set_uevent_suppress(ddev, 1); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 6149a6e09643..7a1ae87f1683 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -495,7 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name) | |||
495 | 495 | ||
496 | struct crypto_template *crypto_lookup_template(const char *name) | 496 | struct crypto_template *crypto_lookup_template(const char *name) |
497 | { | 497 | { |
498 | return try_then_request_module(__crypto_lookup_template(name), name); | 498 | return try_then_request_module(__crypto_lookup_template(name), "%s", |
499 | name); | ||
499 | } | 500 | } |
500 | EXPORT_SYMBOL_GPL(crypto_lookup_template); | 501 | EXPORT_SYMBOL_GPL(crypto_lookup_template); |
501 | 502 | ||
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index 1b11abbb5c91..f38a58aef3ec 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
@@ -10,10 +10,6 @@ config ASYNC_XOR | |||
10 | select ASYNC_CORE | 10 | select ASYNC_CORE |
11 | select XOR_BLOCKS | 11 | select XOR_BLOCKS |
12 | 12 | ||
13 | config ASYNC_MEMSET | ||
14 | tristate | ||
15 | select ASYNC_CORE | ||
16 | |||
17 | config ASYNC_PQ | 13 | config ASYNC_PQ |
18 | tristate | 14 | tristate |
19 | select ASYNC_CORE | 15 | select ASYNC_CORE |
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index d1e0e6f72bc1..462e4abbfe69 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile | |||
@@ -1,6 +1,5 @@ | |||
1 | obj-$(CONFIG_ASYNC_CORE) += async_tx.o | 1 | obj-$(CONFIG_ASYNC_CORE) += async_tx.o |
2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o | 2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o |
3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o | ||
4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o | 3 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o |
5 | obj-$(CONFIG_ASYNC_PQ) += async_pq.o | 4 | obj-$(CONFIG_ASYNC_PQ) += async_pq.o |
6 | obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o | 5 | obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c deleted file mode 100644 index 05a4d1e00148..000000000000 --- a/crypto/async_tx/async_memset.c +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | /* | ||
2 | * memory fill offload engine support | ||
3 | * | ||
4 | * Copyright © 2006, Intel Corporation. | ||
5 | * | ||
6 | * Dan Williams <dan.j.williams@intel.com> | ||
7 | * | ||
8 | * with architecture considerations by: | ||
9 | * Neil Brown <neilb@suse.de> | ||
10 | * Jeff Garzik <jeff@garzik.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms and conditions of the GNU General Public License, | ||
14 | * version 2, as published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
19 | * more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License along with | ||
22 | * this program; if not, write to the Free Software Foundation, Inc., | ||
23 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | * | ||
25 | */ | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/async_tx.h> | ||
32 | |||
33 | /** | ||
34 | * async_memset - attempt to fill memory with a dma engine. | ||
35 | * @dest: destination page | ||
36 | * @val: fill value | ||
37 | * @offset: offset in pages to start transaction | ||
38 | * @len: length in bytes | ||
39 | * | ||
40 | * honored flags: ASYNC_TX_ACK | ||
41 | */ | ||
42 | struct dma_async_tx_descriptor * | ||
43 | async_memset(struct page *dest, int val, unsigned int offset, size_t len, | ||
44 | struct async_submit_ctl *submit) | ||
45 | { | ||
46 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, | ||
47 | &dest, 1, NULL, 0, len); | ||
48 | struct dma_device *device = chan ? chan->device : NULL; | ||
49 | struct dma_async_tx_descriptor *tx = NULL; | ||
50 | |||
51 | if (device && is_dma_fill_aligned(device, offset, 0, len)) { | ||
52 | dma_addr_t dma_dest; | ||
53 | unsigned long dma_prep_flags = 0; | ||
54 | |||
55 | if (submit->cb_fn) | ||
56 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
57 | if (submit->flags & ASYNC_TX_FENCE) | ||
58 | dma_prep_flags |= DMA_PREP_FENCE; | ||
59 | dma_dest = dma_map_page(device->dev, dest, offset, len, | ||
60 | DMA_FROM_DEVICE); | ||
61 | |||
62 | tx = device->device_prep_dma_memset(chan, dma_dest, val, len, | ||
63 | dma_prep_flags); | ||
64 | } | ||
65 | |||
66 | if (tx) { | ||
67 | pr_debug("%s: (async) len: %zu\n", __func__, len); | ||
68 | async_tx_submit(chan, tx, submit); | ||
69 | } else { /* run the memset synchronously */ | ||
70 | void *dest_buf; | ||
71 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | ||
72 | |||
73 | dest_buf = page_address(dest) + offset; | ||
74 | |||
75 | /* wait for any prerequisite operations */ | ||
76 | async_tx_quiesce(&submit->depend_tx); | ||
77 | |||
78 | memset(dest_buf, val, len); | ||
79 | |||
80 | async_tx_sync_epilog(submit); | ||
81 | } | ||
82 | |||
83 | return tx; | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(async_memset); | ||
86 | |||
87 | MODULE_AUTHOR("Intel Corporation"); | ||
88 | MODULE_DESCRIPTION("asynchronous memset api"); | ||
89 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index b2c99dc1c5e2..f8c920cafe63 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | |||
455 | 455 | ||
456 | get_online_cpus(); | 456 | get_online_cpus(); |
457 | 457 | ||
458 | pcrypt->wq = alloc_workqueue(name, | 458 | pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, |
459 | WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); | 459 | 1, name); |
460 | if (!pcrypt->wq) | 460 | if (!pcrypt->wq) |
461 | goto err; | 461 | goto err; |
462 | 462 | ||
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 5c5d1624fa2c..05306a59aedc 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -677,10 +677,9 @@ void acpi_irq_stats_init(void) | |||
677 | else | 677 | else |
678 | sprintf(buffer, "bug%02X", i); | 678 | sprintf(buffer, "bug%02X", i); |
679 | 679 | ||
680 | name = kzalloc(strlen(buffer) + 1, GFP_KERNEL); | 680 | name = kstrdup(buffer, GFP_KERNEL); |
681 | if (name == NULL) | 681 | if (name == NULL) |
682 | goto fail; | 682 | goto fail; |
683 | strncpy(name, buffer, strlen(buffer) + 1); | ||
684 | 683 | ||
685 | sysfs_attr_init(&counter_attrs[i].attr); | 684 | sysfs_attr_init(&counter_attrs[i].attr); |
686 | counter_attrs[i].attr.name = name; | 685 | counter_attrs[i].attr.name = name; |
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c index d78b204e65c1..ecc1929d7f6a 100644 --- a/drivers/base/attribute_container.c +++ b/drivers/base/attribute_container.c | |||
@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev, | |||
167 | ic->classdev.parent = get_device(dev); | 167 | ic->classdev.parent = get_device(dev); |
168 | ic->classdev.class = cont->class; | 168 | ic->classdev.class = cont->class; |
169 | cont->class->dev_release = attribute_container_release; | 169 | cont->class->dev_release = attribute_container_release; |
170 | dev_set_name(&ic->classdev, dev_name(dev)); | 170 | dev_set_name(&ic->classdev, "%s", dev_name(dev)); |
171 | if (fn) | 171 | if (fn) |
172 | fn(cont, dev, &ic->classdev); | 172 | fn(cont, dev, &ic->classdev); |
173 | else | 173 | else |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 175649468c95..025c41d3cb33 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | #define VERSION "81" | 2 | #define VERSION "83" |
3 | #define AOE_MAJOR 152 | 3 | #define AOE_MAJOR 152 |
4 | #define DEVICE_NAME "aoe" | 4 | #define DEVICE_NAME "aoe" |
5 | 5 | ||
@@ -196,9 +196,11 @@ struct ktstate { | |||
196 | struct completion rendez; | 196 | struct completion rendez; |
197 | struct task_struct *task; | 197 | struct task_struct *task; |
198 | wait_queue_head_t *waitq; | 198 | wait_queue_head_t *waitq; |
199 | int (*fn) (void); | 199 | int (*fn) (int); |
200 | char *name; | 200 | char name[12]; |
201 | spinlock_t *lock; | 201 | spinlock_t *lock; |
202 | int id; | ||
203 | int active; | ||
202 | }; | 204 | }; |
203 | 205 | ||
204 | int aoeblk_init(void); | 206 | int aoeblk_init(void); |
@@ -222,6 +224,7 @@ int aoecmd_init(void); | |||
222 | struct sk_buff *aoecmd_ata_id(struct aoedev *); | 224 | struct sk_buff *aoecmd_ata_id(struct aoedev *); |
223 | void aoe_freetframe(struct frame *); | 225 | void aoe_freetframe(struct frame *); |
224 | void aoe_flush_iocq(void); | 226 | void aoe_flush_iocq(void); |
227 | void aoe_flush_iocq_by_index(int); | ||
225 | void aoe_end_request(struct aoedev *, struct request *, int); | 228 | void aoe_end_request(struct aoedev *, struct request *, int); |
226 | int aoe_ktstart(struct ktstate *k); | 229 | int aoe_ktstart(struct ktstate *k); |
227 | void aoe_ktstop(struct ktstate *k); | 230 | void aoe_ktstop(struct ktstate *k); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index fc803ecbbce4..99cb944a002d 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoecmd.c | 3 | * aoecmd.c |
4 | * Filesystem request handling methods | 4 | * Filesystem request handling methods |
@@ -35,14 +35,27 @@ module_param(aoe_maxout, int, 0644); | |||
35 | MODULE_PARM_DESC(aoe_maxout, | 35 | MODULE_PARM_DESC(aoe_maxout, |
36 | "Only aoe_maxout outstanding packets for every MAC on eX.Y."); | 36 | "Only aoe_maxout outstanding packets for every MAC on eX.Y."); |
37 | 37 | ||
38 | static wait_queue_head_t ktiowq; | 38 | /* The number of online cpus during module initialization gives us a |
39 | static struct ktstate kts; | 39 | * convenient heuristic cap on the parallelism used for ktio threads |
40 | * doing I/O completion. It is not important that the cap equal the | ||
41 | * actual number of running CPUs at any given time, but because of CPU | ||
42 | * hotplug, we take care to use ncpus instead of using | ||
43 | * num_online_cpus() after module initialization. | ||
44 | */ | ||
45 | static int ncpus; | ||
46 | |||
47 | /* mutex lock used for synchronization while thread spawning */ | ||
48 | static DEFINE_MUTEX(ktio_spawn_lock); | ||
49 | |||
50 | static wait_queue_head_t *ktiowq; | ||
51 | static struct ktstate *kts; | ||
40 | 52 | ||
41 | /* io completion queue */ | 53 | /* io completion queue */ |
42 | static struct { | 54 | struct iocq_ktio { |
43 | struct list_head head; | 55 | struct list_head head; |
44 | spinlock_t lock; | 56 | spinlock_t lock; |
45 | } iocq; | 57 | }; |
58 | static struct iocq_ktio *iocq; | ||
46 | 59 | ||
47 | static struct page *empty_page; | 60 | static struct page *empty_page; |
48 | 61 | ||
@@ -1278,23 +1291,36 @@ out: | |||
1278 | * Returns true iff responses needing processing remain. | 1291 | * Returns true iff responses needing processing remain. |
1279 | */ | 1292 | */ |
1280 | static int | 1293 | static int |
1281 | ktio(void) | 1294 | ktio(int id) |
1282 | { | 1295 | { |
1283 | struct frame *f; | 1296 | struct frame *f; |
1284 | struct list_head *pos; | 1297 | struct list_head *pos; |
1285 | int i; | 1298 | int i; |
1299 | int actual_id; | ||
1286 | 1300 | ||
1287 | for (i = 0; ; ++i) { | 1301 | for (i = 0; ; ++i) { |
1288 | if (i == MAXIOC) | 1302 | if (i == MAXIOC) |
1289 | return 1; | 1303 | return 1; |
1290 | if (list_empty(&iocq.head)) | 1304 | if (list_empty(&iocq[id].head)) |
1291 | return 0; | 1305 | return 0; |
1292 | pos = iocq.head.next; | 1306 | pos = iocq[id].head.next; |
1293 | list_del(pos); | 1307 | list_del(pos); |
1294 | spin_unlock_irq(&iocq.lock); | ||
1295 | f = list_entry(pos, struct frame, head); | 1308 | f = list_entry(pos, struct frame, head); |
1309 | spin_unlock_irq(&iocq[id].lock); | ||
1296 | ktiocomplete(f); | 1310 | ktiocomplete(f); |
1297 | spin_lock_irq(&iocq.lock); | 1311 | |
1312 | /* Figure out if extra threads are required. */ | ||
1313 | actual_id = f->t->d->aoeminor % ncpus; | ||
1314 | |||
1315 | if (!kts[actual_id].active) { | ||
1316 | BUG_ON(id != 0); | ||
1317 | mutex_lock(&ktio_spawn_lock); | ||
1318 | if (!kts[actual_id].active | ||
1319 | && aoe_ktstart(&kts[actual_id]) == 0) | ||
1320 | kts[actual_id].active = 1; | ||
1321 | mutex_unlock(&ktio_spawn_lock); | ||
1322 | } | ||
1323 | spin_lock_irq(&iocq[id].lock); | ||
1298 | } | 1324 | } |
1299 | } | 1325 | } |
1300 | 1326 | ||
@@ -1311,7 +1337,7 @@ kthread(void *vp) | |||
1311 | complete(&k->rendez); /* tell spawner we're running */ | 1337 | complete(&k->rendez); /* tell spawner we're running */ |
1312 | do { | 1338 | do { |
1313 | spin_lock_irq(k->lock); | 1339 | spin_lock_irq(k->lock); |
1314 | more = k->fn(); | 1340 | more = k->fn(k->id); |
1315 | if (!more) { | 1341 | if (!more) { |
1316 | add_wait_queue(k->waitq, &wait); | 1342 | add_wait_queue(k->waitq, &wait); |
1317 | __set_current_state(TASK_INTERRUPTIBLE); | 1343 | __set_current_state(TASK_INTERRUPTIBLE); |
@@ -1340,7 +1366,7 @@ aoe_ktstart(struct ktstate *k) | |||
1340 | struct task_struct *task; | 1366 | struct task_struct *task; |
1341 | 1367 | ||
1342 | init_completion(&k->rendez); | 1368 | init_completion(&k->rendez); |
1343 | task = kthread_run(kthread, k, k->name); | 1369 | task = kthread_run(kthread, k, "%s", k->name); |
1344 | if (task == NULL || IS_ERR(task)) | 1370 | if (task == NULL || IS_ERR(task)) |
1345 | return -ENOMEM; | 1371 | return -ENOMEM; |
1346 | k->task = task; | 1372 | k->task = task; |
@@ -1353,13 +1379,24 @@ aoe_ktstart(struct ktstate *k) | |||
1353 | static void | 1379 | static void |
1354 | ktcomplete(struct frame *f, struct sk_buff *skb) | 1380 | ktcomplete(struct frame *f, struct sk_buff *skb) |
1355 | { | 1381 | { |
1382 | int id; | ||
1356 | ulong flags; | 1383 | ulong flags; |
1357 | 1384 | ||
1358 | f->r_skb = skb; | 1385 | f->r_skb = skb; |
1359 | spin_lock_irqsave(&iocq.lock, flags); | 1386 | id = f->t->d->aoeminor % ncpus; |
1360 | list_add_tail(&f->head, &iocq.head); | 1387 | spin_lock_irqsave(&iocq[id].lock, flags); |
1361 | spin_unlock_irqrestore(&iocq.lock, flags); | 1388 | if (!kts[id].active) { |
1362 | wake_up(&ktiowq); | 1389 | spin_unlock_irqrestore(&iocq[id].lock, flags); |
1390 | /* The thread with id has not been spawned yet, | ||
1391 | * so delegate the work to the main thread and | ||
1392 | * try spawning a new thread. | ||
1393 | */ | ||
1394 | id = 0; | ||
1395 | spin_lock_irqsave(&iocq[id].lock, flags); | ||
1396 | } | ||
1397 | list_add_tail(&f->head, &iocq[id].head); | ||
1398 | spin_unlock_irqrestore(&iocq[id].lock, flags); | ||
1399 | wake_up(&ktiowq[id]); | ||
1363 | } | 1400 | } |
1364 | 1401 | ||
1365 | struct sk_buff * | 1402 | struct sk_buff * |
@@ -1706,6 +1743,17 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) | |||
1706 | void | 1743 | void |
1707 | aoe_flush_iocq(void) | 1744 | aoe_flush_iocq(void) |
1708 | { | 1745 | { |
1746 | int i; | ||
1747 | |||
1748 | for (i = 0; i < ncpus; i++) { | ||
1749 | if (kts[i].active) | ||
1750 | aoe_flush_iocq_by_index(i); | ||
1751 | } | ||
1752 | } | ||
1753 | |||
1754 | void | ||
1755 | aoe_flush_iocq_by_index(int id) | ||
1756 | { | ||
1709 | struct frame *f; | 1757 | struct frame *f; |
1710 | struct aoedev *d; | 1758 | struct aoedev *d; |
1711 | LIST_HEAD(flist); | 1759 | LIST_HEAD(flist); |
@@ -1713,9 +1761,9 @@ aoe_flush_iocq(void) | |||
1713 | struct sk_buff *skb; | 1761 | struct sk_buff *skb; |
1714 | ulong flags; | 1762 | ulong flags; |
1715 | 1763 | ||
1716 | spin_lock_irqsave(&iocq.lock, flags); | 1764 | spin_lock_irqsave(&iocq[id].lock, flags); |
1717 | list_splice_init(&iocq.head, &flist); | 1765 | list_splice_init(&iocq[id].head, &flist); |
1718 | spin_unlock_irqrestore(&iocq.lock, flags); | 1766 | spin_unlock_irqrestore(&iocq[id].lock, flags); |
1719 | while (!list_empty(&flist)) { | 1767 | while (!list_empty(&flist)) { |
1720 | pos = flist.next; | 1768 | pos = flist.next; |
1721 | list_del(pos); | 1769 | list_del(pos); |
@@ -1738,6 +1786,8 @@ int __init | |||
1738 | aoecmd_init(void) | 1786 | aoecmd_init(void) |
1739 | { | 1787 | { |
1740 | void *p; | 1788 | void *p; |
1789 | int i; | ||
1790 | int ret; | ||
1741 | 1791 | ||
1742 | /* get_zeroed_page returns page with ref count 1 */ | 1792 | /* get_zeroed_page returns page with ref count 1 */ |
1743 | p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 1793 | p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); |
@@ -1745,22 +1795,72 @@ aoecmd_init(void) | |||
1745 | return -ENOMEM; | 1795 | return -ENOMEM; |
1746 | empty_page = virt_to_page(p); | 1796 | empty_page = virt_to_page(p); |
1747 | 1797 | ||
1748 | INIT_LIST_HEAD(&iocq.head); | 1798 | ncpus = num_online_cpus(); |
1749 | spin_lock_init(&iocq.lock); | 1799 | |
1750 | init_waitqueue_head(&ktiowq); | 1800 | iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL); |
1751 | kts.name = "aoe_ktio"; | 1801 | if (!iocq) |
1752 | kts.fn = ktio; | 1802 | return -ENOMEM; |
1753 | kts.waitq = &ktiowq; | 1803 | |
1754 | kts.lock = &iocq.lock; | 1804 | kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL); |
1755 | return aoe_ktstart(&kts); | 1805 | if (!kts) { |
1806 | ret = -ENOMEM; | ||
1807 | goto kts_fail; | ||
1808 | } | ||
1809 | |||
1810 | ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL); | ||
1811 | if (!ktiowq) { | ||
1812 | ret = -ENOMEM; | ||
1813 | goto ktiowq_fail; | ||
1814 | } | ||
1815 | |||
1816 | mutex_init(&ktio_spawn_lock); | ||
1817 | |||
1818 | for (i = 0; i < ncpus; i++) { | ||
1819 | INIT_LIST_HEAD(&iocq[i].head); | ||
1820 | spin_lock_init(&iocq[i].lock); | ||
1821 | init_waitqueue_head(&ktiowq[i]); | ||
1822 | snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i); | ||
1823 | kts[i].fn = ktio; | ||
1824 | kts[i].waitq = &ktiowq[i]; | ||
1825 | kts[i].lock = &iocq[i].lock; | ||
1826 | kts[i].id = i; | ||
1827 | kts[i].active = 0; | ||
1828 | } | ||
1829 | kts[0].active = 1; | ||
1830 | if (aoe_ktstart(&kts[0])) { | ||
1831 | ret = -ENOMEM; | ||
1832 | goto ktstart_fail; | ||
1833 | } | ||
1834 | return 0; | ||
1835 | |||
1836 | ktstart_fail: | ||
1837 | kfree(ktiowq); | ||
1838 | ktiowq_fail: | ||
1839 | kfree(kts); | ||
1840 | kts_fail: | ||
1841 | kfree(iocq); | ||
1842 | |||
1843 | return ret; | ||
1756 | } | 1844 | } |
1757 | 1845 | ||
1758 | void | 1846 | void |
1759 | aoecmd_exit(void) | 1847 | aoecmd_exit(void) |
1760 | { | 1848 | { |
1761 | aoe_ktstop(&kts); | 1849 | int i; |
1850 | |||
1851 | for (i = 0; i < ncpus; i++) | ||
1852 | if (kts[i].active) | ||
1853 | aoe_ktstop(&kts[i]); | ||
1854 | |||
1762 | aoe_flush_iocq(); | 1855 | aoe_flush_iocq(); |
1763 | 1856 | ||
1857 | /* Free up the iocq and thread speicific configuration | ||
1858 | * allocated during startup. | ||
1859 | */ | ||
1860 | kfree(iocq); | ||
1861 | kfree(kts); | ||
1862 | kfree(ktiowq); | ||
1863 | |||
1764 | free_page((unsigned long) page_address(empty_page)); | 1864 | free_page((unsigned long) page_address(empty_page)); |
1765 | empty_page = NULL; | 1865 | empty_page = NULL; |
1766 | } | 1866 | } |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 98f2965778b9..784c92e038d1 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoedev.c | 3 | * aoedev.c |
4 | * AoE device utility functions; maintains device list. | 4 | * AoE device utility functions; maintains device list. |
@@ -518,7 +518,6 @@ void | |||
518 | aoedev_exit(void) | 518 | aoedev_exit(void) |
519 | { | 519 | { |
520 | flush_scheduled_work(); | 520 | flush_scheduled_work(); |
521 | aoe_flush_iocq(); | ||
522 | flush(NULL, 0, EXITING); | 521 | flush(NULL, 0, EXITING); |
523 | } | 522 | } |
524 | 523 | ||
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 71d3ea8d3006..63773a90581d 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoenet.c | 3 | * aoenet.c |
4 | * Ethernet portion of AoE driver | 4 | * Ethernet portion of AoE driver |
@@ -52,7 +52,7 @@ static struct sk_buff_head skbtxq; | |||
52 | 52 | ||
53 | /* enters with txlock held */ | 53 | /* enters with txlock held */ |
54 | static int | 54 | static int |
55 | tx(void) __must_hold(&txlock) | 55 | tx(int id) __must_hold(&txlock) |
56 | { | 56 | { |
57 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
58 | struct net_device *ifp; | 58 | struct net_device *ifp; |
@@ -205,7 +205,8 @@ aoenet_init(void) | |||
205 | kts.lock = &txlock; | 205 | kts.lock = &txlock; |
206 | kts.fn = tx; | 206 | kts.fn = tx; |
207 | kts.waitq = &txwq; | 207 | kts.waitq = &txwq; |
208 | kts.name = "aoe_tx"; | 208 | kts.id = 0; |
209 | snprintf(kts.name, sizeof(kts.name), "aoe_tx%d", kts.id); | ||
209 | if (aoe_ktstart(&kts)) | 210 | if (aoe_ktstart(&kts)) |
210 | return -EAGAIN; | 211 | return -EAGAIN; |
211 | dev_add_pack(&aoe_pt); | 212 | dev_add_pack(&aoe_pt); |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 20dd52a2f92f..952dbfe22126 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -4087,7 +4087,8 @@ skip_create_disk: | |||
4087 | start_service_thread: | 4087 | start_service_thread: |
4088 | sprintf(thd_name, "mtip_svc_thd_%02d", index); | 4088 | sprintf(thd_name, "mtip_svc_thd_%02d", index); |
4089 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, | 4089 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, |
4090 | dd, dd->numa_node, thd_name); | 4090 | dd, dd->numa_node, "%s", |
4091 | thd_name); | ||
4091 | 4092 | ||
4092 | if (IS_ERR(dd->mtip_svc_handler)) { | 4093 | if (IS_ERR(dd->mtip_svc_handler)) { |
4093 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); | 4094 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 037288e7874d..2dc3b5153f0d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -623,8 +623,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
623 | if (!nbd->sock) | 623 | if (!nbd->sock) |
624 | return -EINVAL; | 624 | return -EINVAL; |
625 | 625 | ||
626 | nbd->disconnect = 1; | ||
627 | |||
626 | nbd_send_req(nbd, &sreq); | 628 | nbd_send_req(nbd, &sreq); |
627 | return 0; | 629 | return 0; |
628 | } | 630 | } |
629 | 631 | ||
630 | case NBD_CLEAR_SOCK: { | 632 | case NBD_CLEAR_SOCK: { |
@@ -654,6 +656,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
654 | nbd->sock = SOCKET_I(inode); | 656 | nbd->sock = SOCKET_I(inode); |
655 | if (max_part > 0) | 657 | if (max_part > 0) |
656 | bdev->bd_invalidated = 1; | 658 | bdev->bd_invalidated = 1; |
659 | nbd->disconnect = 0; /* we're connected now */ | ||
657 | return 0; | 660 | return 0; |
658 | } else { | 661 | } else { |
659 | fput(file); | 662 | fput(file); |
@@ -714,7 +717,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
714 | else | 717 | else |
715 | blk_queue_flush(nbd->disk->queue, 0); | 718 | blk_queue_flush(nbd->disk->queue, 0); |
716 | 719 | ||
717 | thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); | 720 | thread = kthread_create(nbd_thread, nbd, "%s", |
721 | nbd->disk->disk_name); | ||
718 | if (IS_ERR(thread)) { | 722 | if (IS_ERR(thread)) { |
719 | mutex_lock(&nbd->tx_lock); | 723 | mutex_lock(&nbd->tx_lock); |
720 | return PTR_ERR(thread); | 724 | return PTR_ERR(thread); |
@@ -742,6 +746,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
742 | set_capacity(nbd->disk, 0); | 746 | set_capacity(nbd->disk, 0); |
743 | if (max_part > 0) | 747 | if (max_part > 0) |
744 | ioctl_by_bdev(bdev, BLKRRPART, 0); | 748 | ioctl_by_bdev(bdev, BLKRRPART, 0); |
749 | if (nbd->disconnect) /* user requested, ignore socket errors */ | ||
750 | return 0; | ||
745 | return nbd->harderror; | 751 | return nbd->harderror; |
746 | } | 752 | } |
747 | 753 | ||
@@ -750,7 +756,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
750 | * This is for compatibility only. The queue is always cleared | 756 | * This is for compatibility only. The queue is always cleared |
751 | * by NBD_DO_IT or NBD_CLEAR_SOCK. | 757 | * by NBD_DO_IT or NBD_CLEAR_SOCK. |
752 | */ | 758 | */ |
753 | BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); | ||
754 | return 0; | 759 | return 0; |
755 | 760 | ||
756 | case NBD_PRINT_DEBUG: | 761 | case NBD_PRINT_DEBUG: |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 8bfd1bcf95ec..04608a6502d7 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -93,7 +93,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) | |||
93 | } | 93 | } |
94 | invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); | 94 | invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); |
95 | 95 | ||
96 | blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name); | 96 | blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name); |
97 | if (IS_ERR(blkif->xenblkd)) { | 97 | if (IS_ERR(blkif->xenblkd)) { |
98 | err = PTR_ERR(blkif->xenblkd); | 98 | err = PTR_ERR(blkif->xenblkd); |
99 | blkif->xenblkd = NULL; | 99 | blkif->xenblkd = NULL; |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index d620b4495745..8a3aff724d98 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -2882,7 +2882,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, | |||
2882 | if (lba < 0) | 2882 | if (lba < 0) |
2883 | return -EINVAL; | 2883 | return -EINVAL; |
2884 | 2884 | ||
2885 | cgc->buffer = kmalloc(blocksize, GFP_KERNEL); | 2885 | cgc->buffer = kzalloc(blocksize, GFP_KERNEL); |
2886 | if (cgc->buffer == NULL) | 2886 | if (cgc->buffer == NULL) |
2887 | return -ENOMEM; | 2887 | return -ENOMEM; |
2888 | 2888 | ||
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 4afcb65cc623..5980cb9af857 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -830,9 +830,9 @@ probe_fail_cdrom_register: | |||
830 | del_gendisk(gd.disk); | 830 | del_gendisk(gd.disk); |
831 | probe_fail_no_disk: | 831 | probe_fail_no_disk: |
832 | kfree(gd.cd_info); | 832 | kfree(gd.cd_info); |
833 | probe_fail_no_mem: | ||
833 | unregister_blkdev(gdrom_major, GDROM_DEV_NAME); | 834 | unregister_blkdev(gdrom_major, GDROM_DEV_NAME); |
834 | gdrom_major = 0; | 835 | gdrom_major = 0; |
835 | probe_fail_no_mem: | ||
836 | pr_warning("Probe failed - error is 0x%X\n", err); | 836 | pr_warning("Probe failed - error is 0x%X\n", err); |
837 | return err; | 837 | return err; |
838 | } | 838 | } |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 2ca6d7844ad9..f895a8c8a244 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/crash_dump.h> | ||
25 | #include <linux/backing-dev.h> | 24 | #include <linux/backing-dev.h> |
26 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
27 | #include <linux/splice.h> | 26 | #include <linux/splice.h> |
@@ -357,40 +356,6 @@ static int mmap_kmem(struct file *file, struct vm_area_struct *vma) | |||
357 | } | 356 | } |
358 | #endif | 357 | #endif |
359 | 358 | ||
360 | #ifdef CONFIG_CRASH_DUMP | ||
361 | /* | ||
362 | * Read memory corresponding to the old kernel. | ||
363 | */ | ||
364 | static ssize_t read_oldmem(struct file *file, char __user *buf, | ||
365 | size_t count, loff_t *ppos) | ||
366 | { | ||
367 | unsigned long pfn, offset; | ||
368 | size_t read = 0, csize; | ||
369 | int rc = 0; | ||
370 | |||
371 | while (count) { | ||
372 | pfn = *ppos / PAGE_SIZE; | ||
373 | if (pfn > saved_max_pfn) | ||
374 | return read; | ||
375 | |||
376 | offset = (unsigned long)(*ppos % PAGE_SIZE); | ||
377 | if (count > PAGE_SIZE - offset) | ||
378 | csize = PAGE_SIZE - offset; | ||
379 | else | ||
380 | csize = count; | ||
381 | |||
382 | rc = copy_oldmem_page(pfn, buf, csize, offset, 1); | ||
383 | if (rc < 0) | ||
384 | return rc; | ||
385 | buf += csize; | ||
386 | *ppos += csize; | ||
387 | read += csize; | ||
388 | count -= csize; | ||
389 | } | ||
390 | return read; | ||
391 | } | ||
392 | #endif | ||
393 | |||
394 | #ifdef CONFIG_DEVKMEM | 359 | #ifdef CONFIG_DEVKMEM |
395 | /* | 360 | /* |
396 | * This function reads the *virtual* memory as seen by the kernel. | 361 | * This function reads the *virtual* memory as seen by the kernel. |
@@ -772,7 +737,6 @@ static int open_port(struct inode *inode, struct file *filp) | |||
772 | #define aio_write_zero aio_write_null | 737 | #define aio_write_zero aio_write_null |
773 | #define open_mem open_port | 738 | #define open_mem open_port |
774 | #define open_kmem open_mem | 739 | #define open_kmem open_mem |
775 | #define open_oldmem open_mem | ||
776 | 740 | ||
777 | static const struct file_operations mem_fops = { | 741 | static const struct file_operations mem_fops = { |
778 | .llseek = memory_lseek, | 742 | .llseek = memory_lseek, |
@@ -837,14 +801,6 @@ static const struct file_operations full_fops = { | |||
837 | .write = write_full, | 801 | .write = write_full, |
838 | }; | 802 | }; |
839 | 803 | ||
840 | #ifdef CONFIG_CRASH_DUMP | ||
841 | static const struct file_operations oldmem_fops = { | ||
842 | .read = read_oldmem, | ||
843 | .open = open_oldmem, | ||
844 | .llseek = default_llseek, | ||
845 | }; | ||
846 | #endif | ||
847 | |||
848 | static const struct memdev { | 804 | static const struct memdev { |
849 | const char *name; | 805 | const char *name; |
850 | umode_t mode; | 806 | umode_t mode; |
@@ -866,9 +822,6 @@ static const struct memdev { | |||
866 | #ifdef CONFIG_PRINTK | 822 | #ifdef CONFIG_PRINTK |
867 | [11] = { "kmsg", 0644, &kmsg_fops, NULL }, | 823 | [11] = { "kmsg", 0644, &kmsg_fops, NULL }, |
868 | #endif | 824 | #endif |
869 | #ifdef CONFIG_CRASH_DUMP | ||
870 | [12] = { "oldmem", 0, &oldmem_fops, NULL }, | ||
871 | #endif | ||
872 | }; | 825 | }; |
873 | 826 | ||
874 | static int memory_open(struct inode *inode, struct file *filp) | 827 | static int memory_open(struct inode *inode, struct file *filp) |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 44c407986f64..e94e619fe050 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -486,7 +486,7 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
486 | GFP_KERNEL); | 486 | GFP_KERNEL); |
487 | devfreq->last_stat_updated = jiffies; | 487 | devfreq->last_stat_updated = jiffies; |
488 | 488 | ||
489 | dev_set_name(&devfreq->dev, dev_name(dev)); | 489 | dev_set_name(&devfreq->dev, "%s", dev_name(dev)); |
490 | err = device_register(&devfreq->dev); | 490 | err = device_register(&devfreq->dev); |
491 | if (err) { | 491 | if (err) { |
492 | put_device(&devfreq->dev); | 492 | put_device(&devfreq->dev); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 93f7992bee5c..9e56745f87bf 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -663,11 +663,6 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
663 | return false; | 663 | return false; |
664 | #endif | 664 | #endif |
665 | 665 | ||
666 | #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) | ||
667 | if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) | ||
668 | return false; | ||
669 | #endif | ||
670 | |||
671 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | 666 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) |
672 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | 667 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
673 | return false; | 668 | return false; |
@@ -729,8 +724,6 @@ int dma_async_device_register(struct dma_device *device) | |||
729 | !device->device_prep_dma_pq); | 724 | !device->device_prep_dma_pq); |
730 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | 725 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && |
731 | !device->device_prep_dma_pq_val); | 726 | !device->device_prep_dma_pq_val); |
732 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | ||
733 | !device->device_prep_dma_memset); | ||
734 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 727 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
735 | !device->device_prep_dma_interrupt); | 728 | !device->device_prep_dma_interrupt); |
736 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | 729 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 17a2393b3e25..5ff6fc1819dc 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -1105,12 +1105,11 @@ static ssize_t cap_show(struct dma_chan *c, char *page) | |||
1105 | { | 1105 | { |
1106 | struct dma_device *dma = c->device; | 1106 | struct dma_device *dma = c->device; |
1107 | 1107 | ||
1108 | return sprintf(page, "copy%s%s%s%s%s%s\n", | 1108 | return sprintf(page, "copy%s%s%s%s%s\n", |
1109 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", | 1109 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", |
1110 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", | 1110 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", |
1111 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", | 1111 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", |
1112 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", | 1112 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", |
1113 | dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", | ||
1114 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); | 1113 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); |
1115 | 1114 | ||
1116 | } | 1115 | } |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 29bf9448035d..212d584fe427 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -123,7 +123,6 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len | |||
123 | struct ioat_ring_ent { | 123 | struct ioat_ring_ent { |
124 | union { | 124 | union { |
125 | struct ioat_dma_descriptor *hw; | 125 | struct ioat_dma_descriptor *hw; |
126 | struct ioat_fill_descriptor *fill; | ||
127 | struct ioat_xor_descriptor *xor; | 126 | struct ioat_xor_descriptor *xor; |
128 | struct ioat_xor_ext_descriptor *xor_ex; | 127 | struct ioat_xor_ext_descriptor *xor_ex; |
129 | struct ioat_pq_descriptor *pq; | 128 | struct ioat_pq_descriptor *pq; |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index ca6ea9b3551b..b642e035579b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -311,14 +311,6 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | |||
311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | 311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ |
312 | ioat_dma_unmap(chan, flags, len, desc->hw); | 312 | ioat_dma_unmap(chan, flags, len, desc->hw); |
313 | break; | 313 | break; |
314 | case IOAT_OP_FILL: { | ||
315 | struct ioat_fill_descriptor *hw = desc->fill; | ||
316 | |||
317 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
318 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
319 | PCI_DMA_FROMDEVICE, flags, 1); | ||
320 | break; | ||
321 | } | ||
322 | case IOAT_OP_XOR_VAL: | 314 | case IOAT_OP_XOR_VAL: |
323 | case IOAT_OP_XOR: { | 315 | case IOAT_OP_XOR: { |
324 | struct ioat_xor_descriptor *xor = desc->xor; | 316 | struct ioat_xor_descriptor *xor = desc->xor; |
@@ -824,51 +816,6 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
824 | } | 816 | } |
825 | 817 | ||
826 | static struct dma_async_tx_descriptor * | 818 | static struct dma_async_tx_descriptor * |
827 | ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | ||
828 | size_t len, unsigned long flags) | ||
829 | { | ||
830 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
831 | struct ioat_ring_ent *desc; | ||
832 | size_t total_len = len; | ||
833 | struct ioat_fill_descriptor *fill; | ||
834 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); | ||
835 | int num_descs, idx, i; | ||
836 | |||
837 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
838 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) | ||
839 | idx = ioat->head; | ||
840 | else | ||
841 | return NULL; | ||
842 | i = 0; | ||
843 | do { | ||
844 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
845 | |||
846 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
847 | fill = desc->fill; | ||
848 | |||
849 | fill->size = xfer_size; | ||
850 | fill->src_data = src_data; | ||
851 | fill->dst_addr = dest; | ||
852 | fill->ctl = 0; | ||
853 | fill->ctl_f.op = IOAT_OP_FILL; | ||
854 | |||
855 | len -= xfer_size; | ||
856 | dest += xfer_size; | ||
857 | dump_desc_dbg(ioat, desc); | ||
858 | } while (++i < num_descs); | ||
859 | |||
860 | desc->txd.flags = flags; | ||
861 | desc->len = total_len; | ||
862 | fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
863 | fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
864 | fill->ctl_f.compl_write = 1; | ||
865 | dump_desc_dbg(ioat, desc); | ||
866 | |||
867 | /* we leave the channel locked to ensure in order submission */ | ||
868 | return &desc->txd; | ||
869 | } | ||
870 | |||
871 | static struct dma_async_tx_descriptor * | ||
872 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | 819 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, |
873 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | 820 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, |
874 | size_t len, unsigned long flags) | 821 | size_t len, unsigned long flags) |
@@ -1431,7 +1378,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1431 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | 1378 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; |
1432 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | 1379 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; |
1433 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | 1380 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; |
1434 | dma_addr_t dma_addr, dest_dma; | 1381 | dma_addr_t dest_dma; |
1435 | struct dma_async_tx_descriptor *tx; | 1382 | struct dma_async_tx_descriptor *tx; |
1436 | struct dma_chan *dma_chan; | 1383 | struct dma_chan *dma_chan; |
1437 | dma_cookie_t cookie; | 1384 | dma_cookie_t cookie; |
@@ -1598,56 +1545,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1598 | goto free_resources; | 1545 | goto free_resources; |
1599 | } | 1546 | } |
1600 | 1547 | ||
1601 | /* skip memset if the capability is not present */ | ||
1602 | if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) | ||
1603 | goto free_resources; | ||
1604 | |||
1605 | /* test memset */ | ||
1606 | op = IOAT_OP_FILL; | ||
1607 | |||
1608 | dma_addr = dma_map_page(dev, dest, 0, | ||
1609 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1610 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | ||
1611 | DMA_PREP_INTERRUPT | | ||
1612 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1613 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1614 | if (!tx) { | ||
1615 | dev_err(dev, "Self-test memset prep failed\n"); | ||
1616 | err = -ENODEV; | ||
1617 | goto dma_unmap; | ||
1618 | } | ||
1619 | |||
1620 | async_tx_ack(tx); | ||
1621 | init_completion(&cmp); | ||
1622 | tx->callback = ioat3_dma_test_callback; | ||
1623 | tx->callback_param = &cmp; | ||
1624 | cookie = tx->tx_submit(tx); | ||
1625 | if (cookie < 0) { | ||
1626 | dev_err(dev, "Self-test memset setup failed\n"); | ||
1627 | err = -ENODEV; | ||
1628 | goto dma_unmap; | ||
1629 | } | ||
1630 | dma->device_issue_pending(dma_chan); | ||
1631 | |||
1632 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1633 | |||
1634 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | ||
1635 | dev_err(dev, "Self-test memset timed out\n"); | ||
1636 | err = -ENODEV; | ||
1637 | goto dma_unmap; | ||
1638 | } | ||
1639 | |||
1640 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1641 | |||
1642 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | ||
1643 | u32 *ptr = page_address(dest); | ||
1644 | if (ptr[i]) { | ||
1645 | dev_err(dev, "Self-test memset failed compare\n"); | ||
1646 | err = -ENODEV; | ||
1647 | goto free_resources; | ||
1648 | } | ||
1649 | } | ||
1650 | |||
1651 | /* test for non-zero parity sum */ | 1548 | /* test for non-zero parity sum */ |
1652 | op = IOAT_OP_XOR_VAL; | 1549 | op = IOAT_OP_XOR_VAL; |
1653 | 1550 | ||
@@ -1706,8 +1603,7 @@ dma_unmap: | |||
1706 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1603 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1707 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | 1604 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, |
1708 | DMA_TO_DEVICE); | 1605 | DMA_TO_DEVICE); |
1709 | } else if (op == IOAT_OP_FILL) | 1606 | } |
1710 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1711 | free_resources: | 1607 | free_resources: |
1712 | dma->device_free_chan_resources(dma_chan); | 1608 | dma->device_free_chan_resources(dma_chan); |
1713 | out: | 1609 | out: |
@@ -1944,12 +1840,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1944 | } | 1840 | } |
1945 | } | 1841 | } |
1946 | 1842 | ||
1947 | if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { | ||
1948 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | ||
1949 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | ||
1950 | } | ||
1951 | |||
1952 | |||
1953 | dma->device_tx_status = ioat3_tx_status; | 1843 | dma->device_tx_status = ioat3_tx_status; |
1954 | device->cleanup_fn = ioat3_cleanup_event; | 1844 | device->cleanup_fn = ioat3_cleanup_event; |
1955 | device->timer_fn = ioat3_timer_event; | 1845 | device->timer_fn = ioat3_timer_event; |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 5ee57d402a6e..62f83e983d8d 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -100,33 +100,6 @@ struct ioat_dma_descriptor { | |||
100 | uint64_t user2; | 100 | uint64_t user2; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct ioat_fill_descriptor { | ||
104 | uint32_t size; | ||
105 | union { | ||
106 | uint32_t ctl; | ||
107 | struct { | ||
108 | unsigned int int_en:1; | ||
109 | unsigned int rsvd:1; | ||
110 | unsigned int dest_snoop_dis:1; | ||
111 | unsigned int compl_write:1; | ||
112 | unsigned int fence:1; | ||
113 | unsigned int rsvd2:2; | ||
114 | unsigned int dest_brk:1; | ||
115 | unsigned int bundle:1; | ||
116 | unsigned int rsvd4:15; | ||
117 | #define IOAT_OP_FILL 0x01 | ||
118 | unsigned int op:8; | ||
119 | } ctl_f; | ||
120 | }; | ||
121 | uint64_t src_data; | ||
122 | uint64_t dst_addr; | ||
123 | uint64_t next; | ||
124 | uint64_t rsv1; | ||
125 | uint64_t next_dst_addr; | ||
126 | uint64_t user1; | ||
127 | uint64_t user2; | ||
128 | }; | ||
129 | |||
130 | struct ioat_xor_descriptor { | 103 | struct ioat_xor_descriptor { |
131 | uint32_t size; | 104 | uint32_t size; |
132 | union { | 105 | union { |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 7dafb9f3785f..c9cc08c2dbba 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -633,39 +633,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
633 | } | 633 | } |
634 | 634 | ||
635 | static struct dma_async_tx_descriptor * | 635 | static struct dma_async_tx_descriptor * |
636 | iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | ||
637 | int value, size_t len, unsigned long flags) | ||
638 | { | ||
639 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | ||
640 | struct iop_adma_desc_slot *sw_desc, *grp_start; | ||
641 | int slot_cnt, slots_per_op; | ||
642 | |||
643 | if (unlikely(!len)) | ||
644 | return NULL; | ||
645 | BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); | ||
646 | |||
647 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | ||
648 | __func__, len); | ||
649 | |||
650 | spin_lock_bh(&iop_chan->lock); | ||
651 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); | ||
652 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | ||
653 | if (sw_desc) { | ||
654 | grp_start = sw_desc->group_head; | ||
655 | iop_desc_init_memset(grp_start, flags); | ||
656 | iop_desc_set_byte_count(grp_start, iop_chan, len); | ||
657 | iop_desc_set_block_fill_val(grp_start, value); | ||
658 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
659 | sw_desc->unmap_src_cnt = 1; | ||
660 | sw_desc->unmap_len = len; | ||
661 | sw_desc->async_tx.flags = flags; | ||
662 | } | ||
663 | spin_unlock_bh(&iop_chan->lock); | ||
664 | |||
665 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
666 | } | ||
667 | |||
668 | static struct dma_async_tx_descriptor * | ||
669 | iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | 636 | iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, |
670 | dma_addr_t *dma_src, unsigned int src_cnt, size_t len, | 637 | dma_addr_t *dma_src, unsigned int src_cnt, size_t len, |
671 | unsigned long flags) | 638 | unsigned long flags) |
@@ -1176,33 +1143,6 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1176 | goto free_resources; | 1143 | goto free_resources; |
1177 | } | 1144 | } |
1178 | 1145 | ||
1179 | /* test memset */ | ||
1180 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | ||
1181 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1182 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | ||
1183 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1184 | |||
1185 | cookie = iop_adma_tx_submit(tx); | ||
1186 | iop_adma_issue_pending(dma_chan); | ||
1187 | msleep(8); | ||
1188 | |||
1189 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | ||
1190 | dev_err(dma_chan->device->dev, | ||
1191 | "Self-test memset timed out, disabling\n"); | ||
1192 | err = -ENODEV; | ||
1193 | goto free_resources; | ||
1194 | } | ||
1195 | |||
1196 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | ||
1197 | u32 *ptr = page_address(dest); | ||
1198 | if (ptr[i]) { | ||
1199 | dev_err(dma_chan->device->dev, | ||
1200 | "Self-test memset failed compare, disabling\n"); | ||
1201 | err = -ENODEV; | ||
1202 | goto free_resources; | ||
1203 | } | ||
1204 | } | ||
1205 | |||
1206 | /* test for non-zero parity sum */ | 1146 | /* test for non-zero parity sum */ |
1207 | zero_sum_result = 0; | 1147 | zero_sum_result = 0; |
1208 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) | 1148 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) |
@@ -1487,8 +1427,6 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1487 | /* set prep routines based on capability */ | 1427 | /* set prep routines based on capability */ |
1488 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1428 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1489 | dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; | 1429 | dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; |
1490 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1491 | dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; | ||
1492 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1430 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1493 | dma_dev->max_xor = iop_adma_get_max_xor(); | 1431 | dma_dev->max_xor = iop_adma_get_max_xor(); |
1494 | dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; | 1432 | dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; |
@@ -1556,8 +1494,7 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1556 | goto err_free_iop_chan; | 1494 | goto err_free_iop_chan; |
1557 | } | 1495 | } |
1558 | 1496 | ||
1559 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || | 1497 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1560 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { | ||
1561 | ret = iop_adma_xor_val_self_test(adev); | 1498 | ret = iop_adma_xor_val_self_test(adev); |
1562 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | 1499 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1563 | if (ret) | 1500 | if (ret) |
@@ -1584,7 +1521,6 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1584 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", | 1521 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", |
1585 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1522 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1586 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", | 1523 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", |
1587 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1588 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1524 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1589 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1525 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1590 | 1526 | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d64ae14f2706..200f1a3c9a44 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -89,11 +89,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |||
89 | hw_desc->phy_next_desc = 0; | 89 | hw_desc->phy_next_desc = 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) | ||
93 | { | ||
94 | desc->value = val; | ||
95 | } | ||
96 | |||
97 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | 92 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, |
98 | dma_addr_t addr) | 93 | dma_addr_t addr) |
99 | { | 94 | { |
@@ -128,22 +123,6 @@ static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |||
128 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | 123 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); |
129 | } | 124 | } |
130 | 125 | ||
131 | static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) | ||
132 | { | ||
133 | __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); | ||
134 | } | ||
135 | |||
136 | static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) | ||
137 | { | ||
138 | __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); | ||
139 | } | ||
140 | |||
141 | static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) | ||
142 | { | ||
143 | __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); | ||
144 | __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); | ||
145 | } | ||
146 | |||
147 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | 126 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
148 | { | 127 | { |
149 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | 128 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); |
@@ -186,8 +165,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc) | |||
186 | 165 | ||
187 | if (chain_old_tail->type != desc->type) | 166 | if (chain_old_tail->type != desc->type) |
188 | return 0; | 167 | return 0; |
189 | if (desc->type == DMA_MEMSET) | ||
190 | return 0; | ||
191 | 168 | ||
192 | return 1; | 169 | return 1; |
193 | } | 170 | } |
@@ -205,9 +182,6 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
205 | case DMA_MEMCPY: | 182 | case DMA_MEMCPY: |
206 | op_mode = XOR_OPERATION_MODE_MEMCPY; | 183 | op_mode = XOR_OPERATION_MODE_MEMCPY; |
207 | break; | 184 | break; |
208 | case DMA_MEMSET: | ||
209 | op_mode = XOR_OPERATION_MODE_MEMSET; | ||
210 | break; | ||
211 | default: | 185 | default: |
212 | dev_err(mv_chan_to_devp(chan), | 186 | dev_err(mv_chan_to_devp(chan), |
213 | "error: unsupported operation %d\n", | 187 | "error: unsupported operation %d\n", |
@@ -274,18 +248,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |||
274 | if (sw_desc->type != mv_chan->current_type) | 248 | if (sw_desc->type != mv_chan->current_type) |
275 | mv_set_mode(mv_chan, sw_desc->type); | 249 | mv_set_mode(mv_chan, sw_desc->type); |
276 | 250 | ||
277 | if (sw_desc->type == DMA_MEMSET) { | 251 | /* set the hardware chain */ |
278 | /* for memset requests we need to program the engine, no | 252 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
279 | * descriptors used. | 253 | |
280 | */ | ||
281 | struct mv_xor_desc *hw_desc = sw_desc->hw_desc; | ||
282 | mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); | ||
283 | mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); | ||
284 | mv_chan_set_value(mv_chan, sw_desc->value); | ||
285 | } else { | ||
286 | /* set the hardware chain */ | ||
287 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | ||
288 | } | ||
289 | mv_chan->pending += sw_desc->slot_cnt; | 254 | mv_chan->pending += sw_desc->slot_cnt; |
290 | mv_xor_issue_pending(&mv_chan->dmachan); | 255 | mv_xor_issue_pending(&mv_chan->dmachan); |
291 | } | 256 | } |
@@ -688,43 +653,6 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
688 | } | 653 | } |
689 | 654 | ||
690 | static struct dma_async_tx_descriptor * | 655 | static struct dma_async_tx_descriptor * |
691 | mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | ||
692 | size_t len, unsigned long flags) | ||
693 | { | ||
694 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
695 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
696 | int slot_cnt; | ||
697 | |||
698 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
699 | "%s dest: %x len: %u flags: %ld\n", | ||
700 | __func__, dest, len, flags); | ||
701 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
702 | return NULL; | ||
703 | |||
704 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | ||
705 | |||
706 | spin_lock_bh(&mv_chan->lock); | ||
707 | slot_cnt = mv_chan_memset_slot_count(len); | ||
708 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
709 | if (sw_desc) { | ||
710 | sw_desc->type = DMA_MEMSET; | ||
711 | sw_desc->async_tx.flags = flags; | ||
712 | grp_start = sw_desc->group_head; | ||
713 | mv_desc_init(grp_start, flags); | ||
714 | mv_desc_set_byte_count(grp_start, len); | ||
715 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
716 | mv_desc_set_block_fill_val(grp_start, value); | ||
717 | sw_desc->unmap_src_cnt = 1; | ||
718 | sw_desc->unmap_len = len; | ||
719 | } | ||
720 | spin_unlock_bh(&mv_chan->lock); | ||
721 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
722 | "%s sw_desc %p async_tx %p \n", | ||
723 | __func__, sw_desc, &sw_desc->async_tx); | ||
724 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
725 | } | ||
726 | |||
727 | static struct dma_async_tx_descriptor * | ||
728 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 656 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
729 | unsigned int src_cnt, size_t len, unsigned long flags) | 657 | unsigned int src_cnt, size_t len, unsigned long flags) |
730 | { | 658 | { |
@@ -1137,8 +1065,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1137 | /* set prep routines based on capability */ | 1065 | /* set prep routines based on capability */ |
1138 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1066 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1139 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1067 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1140 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1141 | dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; | ||
1142 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1068 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1143 | dma_dev->max_xor = 8; | 1069 | dma_dev->max_xor = 8; |
1144 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1070 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
@@ -1187,9 +1113,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1187 | goto err_free_irq; | 1113 | goto err_free_irq; |
1188 | } | 1114 | } |
1189 | 1115 | ||
1190 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", | 1116 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", |
1191 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1117 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1192 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1193 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1118 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1194 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1119 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1195 | 1120 | ||
@@ -1298,8 +1223,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1298 | dma_cap_set(DMA_MEMCPY, cap_mask); | 1223 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1299 | if (of_property_read_bool(np, "dmacap,xor")) | 1224 | if (of_property_read_bool(np, "dmacap,xor")) |
1300 | dma_cap_set(DMA_XOR, cap_mask); | 1225 | dma_cap_set(DMA_XOR, cap_mask); |
1301 | if (of_property_read_bool(np, "dmacap,memset")) | ||
1302 | dma_cap_set(DMA_MEMSET, cap_mask); | ||
1303 | if (of_property_read_bool(np, "dmacap,interrupt")) | 1226 | if (of_property_read_bool(np, "dmacap,interrupt")) |
1304 | dma_cap_set(DMA_INTERRUPT, cap_mask); | 1227 | dma_cap_set(DMA_INTERRUPT, cap_mask); |
1305 | 1228 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c632a4761fcf..c619359cb7fe 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -31,7 +31,6 @@ | |||
31 | 31 | ||
32 | #define XOR_OPERATION_MODE_XOR 0 | 32 | #define XOR_OPERATION_MODE_XOR 0 |
33 | #define XOR_OPERATION_MODE_MEMCPY 2 | 33 | #define XOR_OPERATION_MODE_MEMCPY 2 |
34 | #define XOR_OPERATION_MODE_MEMSET 4 | ||
35 | 34 | ||
36 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | 35 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) |
37 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | 36 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a17553f7c028..7ec82f0667eb 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2485,10 +2485,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan) | |||
2485 | struct dma_pl330_chan *pch = to_pchan(chan); | 2485 | struct dma_pl330_chan *pch = to_pchan(chan); |
2486 | unsigned long flags; | 2486 | unsigned long flags; |
2487 | 2487 | ||
2488 | spin_lock_irqsave(&pch->lock, flags); | ||
2489 | |||
2490 | tasklet_kill(&pch->task); | 2488 | tasklet_kill(&pch->task); |
2491 | 2489 | ||
2490 | spin_lock_irqsave(&pch->lock, flags); | ||
2491 | |||
2492 | pl330_release_channel(pch->pl330_chid); | 2492 | pl330_release_channel(pch->pl330_chid); |
2493 | pch->pl330_chid = NULL; | 2493 | pch->pl330_chid = NULL; |
2494 | 2494 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5d3d95569a1e..1e220f8dfd8c 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -2323,47 +2323,6 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( | |||
2323 | } | 2323 | } |
2324 | 2324 | ||
2325 | /** | 2325 | /** |
2326 | * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation | ||
2327 | */ | ||
2328 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( | ||
2329 | struct dma_chan *chan, dma_addr_t dma_dest, int value, | ||
2330 | size_t len, unsigned long flags) | ||
2331 | { | ||
2332 | struct ppc440spe_adma_chan *ppc440spe_chan; | ||
2333 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | ||
2334 | int slot_cnt, slots_per_op; | ||
2335 | |||
2336 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | ||
2337 | |||
2338 | if (unlikely(!len)) | ||
2339 | return NULL; | ||
2340 | |||
2341 | BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); | ||
2342 | |||
2343 | spin_lock_bh(&ppc440spe_chan->lock); | ||
2344 | |||
2345 | dev_dbg(ppc440spe_chan->device->common.dev, | ||
2346 | "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n", | ||
2347 | ppc440spe_chan->device->id, __func__, value, len, | ||
2348 | flags & DMA_PREP_INTERRUPT ? 1 : 0); | ||
2349 | |||
2350 | slot_cnt = slots_per_op = 1; | ||
2351 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | ||
2352 | slots_per_op); | ||
2353 | if (sw_desc) { | ||
2354 | group_start = sw_desc->group_head; | ||
2355 | ppc440spe_desc_init_memset(group_start, value, flags); | ||
2356 | ppc440spe_adma_set_dest(group_start, dma_dest, 0); | ||
2357 | ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); | ||
2358 | sw_desc->unmap_len = len; | ||
2359 | sw_desc->async_tx.flags = flags; | ||
2360 | } | ||
2361 | spin_unlock_bh(&ppc440spe_chan->lock); | ||
2362 | |||
2363 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
2364 | } | ||
2365 | |||
2366 | /** | ||
2367 | * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation | 2326 | * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation |
2368 | */ | 2327 | */ |
2369 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( | 2328 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( |
@@ -4125,7 +4084,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
4125 | case PPC440SPE_DMA1_ID: | 4084 | case PPC440SPE_DMA1_ID: |
4126 | dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); | 4085 | dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); |
4127 | dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); | 4086 | dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); |
4128 | dma_cap_set(DMA_MEMSET, adev->common.cap_mask); | ||
4129 | dma_cap_set(DMA_PQ, adev->common.cap_mask); | 4087 | dma_cap_set(DMA_PQ, adev->common.cap_mask); |
4130 | dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); | 4088 | dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); |
4131 | dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); | 4089 | dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); |
@@ -4151,10 +4109,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
4151 | adev->common.device_prep_dma_memcpy = | 4109 | adev->common.device_prep_dma_memcpy = |
4152 | ppc440spe_adma_prep_dma_memcpy; | 4110 | ppc440spe_adma_prep_dma_memcpy; |
4153 | } | 4111 | } |
4154 | if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) { | ||
4155 | adev->common.device_prep_dma_memset = | ||
4156 | ppc440spe_adma_prep_dma_memset; | ||
4157 | } | ||
4158 | if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { | 4112 | if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { |
4159 | adev->common.max_xor = XOR_MAX_OPS; | 4113 | adev->common.max_xor = XOR_MAX_OPS; |
4160 | adev->common.device_prep_dma_xor = | 4114 | adev->common.device_prep_dma_xor = |
@@ -4217,7 +4171,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
4217 | dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", | 4171 | dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", |
4218 | dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", | 4172 | dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", |
4219 | dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", | 4173 | dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", |
4220 | dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "", | ||
4221 | dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); | 4174 | dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); |
4222 | } | 4175 | } |
4223 | 4176 | ||
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index 8c69803558fe..18ccadef43fd 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c | |||
@@ -602,7 +602,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev) | |||
602 | edev->dev->class = extcon_class; | 602 | edev->dev->class = extcon_class; |
603 | edev->dev->release = extcon_dev_release; | 603 | edev->dev->release = extcon_dev_release; |
604 | 604 | ||
605 | dev_set_name(edev->dev, edev->name ? edev->name : dev_name(dev)); | 605 | dev_set_name(edev->dev, "%s", edev->name ? edev->name : dev_name(dev)); |
606 | 606 | ||
607 | if (edev->max_supported) { | 607 | if (edev->max_supported) { |
608 | char buf[10]; | 608 | char buf[10]; |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index b95159b33c39..eb760a218da4 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -551,9 +551,15 @@ static bool dmi_matches(const struct dmi_system_id *dmi) | |||
551 | int s = dmi->matches[i].slot; | 551 | int s = dmi->matches[i].slot; |
552 | if (s == DMI_NONE) | 552 | if (s == DMI_NONE) |
553 | break; | 553 | break; |
554 | if (dmi_ident[s] | 554 | if (dmi_ident[s]) { |
555 | && strstr(dmi_ident[s], dmi->matches[i].substr)) | 555 | if (!dmi->matches[i].exact_match && |
556 | continue; | 556 | strstr(dmi_ident[s], dmi->matches[i].substr)) |
557 | continue; | ||
558 | else if (dmi->matches[i].exact_match && | ||
559 | !strcmp(dmi_ident[s], dmi->matches[i].substr)) | ||
560 | continue; | ||
561 | } | ||
562 | |||
557 | /* No match */ | 563 | /* No match */ |
558 | return false; | 564 | return false; |
559 | } | 565 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 29412cc89c7a..817f936e2666 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -869,6 +869,22 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
869 | DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), | 869 | DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), |
870 | }, | 870 | }, |
871 | }, | 871 | }, |
872 | { | ||
873 | .callback = intel_no_lvds_dmi_callback, | ||
874 | .ident = "Intel D510MO", | ||
875 | .matches = { | ||
876 | DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), | ||
877 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"), | ||
878 | }, | ||
879 | }, | ||
880 | { | ||
881 | .callback = intel_no_lvds_dmi_callback, | ||
882 | .ident = "Intel D525MW", | ||
883 | .matches = { | ||
884 | DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), | ||
885 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), | ||
886 | }, | ||
887 | }, | ||
872 | 888 | ||
873 | { } /* terminating entry */ | 889 | { } /* terminating entry */ |
874 | }; | 890 | }; |
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 5a82b6b75849..af85299f2126 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
@@ -373,19 +373,6 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
373 | pos = pos->next) | 373 | pos = pos->next) |
374 | 374 | ||
375 | /** | 375 | /** |
376 | * __list_for_each - iterate over a list | ||
377 | * @pos: the &struct list_head to use as a loop cursor. | ||
378 | * @head: the head for your list. | ||
379 | * | ||
380 | * This variant differs from list_for_each() in that it's the | ||
381 | * simplest possible list iteration code, no prefetching is done. | ||
382 | * Use this for code that knows the list to be very short (empty | ||
383 | * or 1 entry) most of the time. | ||
384 | */ | ||
385 | #define __list_for_each(pos, head) \ | ||
386 | for (pos = (head)->next; pos != (head); pos = pos->next) | ||
387 | |||
388 | /** | ||
389 | * list_for_each_prev - iterate over a list backwards | 376 | * list_for_each_prev - iterate over a list backwards |
390 | * @pos: the &struct list_head to use as a loop cursor. | 377 | * @pos: the &struct list_head to use as a loop cursor. |
391 | * @head: the head for your list. | 378 | * @head: the head for your list. |
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 833dd1afbf46..66d44581e1b1 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c | |||
@@ -75,7 +75,7 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) | |||
75 | cl->device.bus = &hsi_bus_type; | 75 | cl->device.bus = &hsi_bus_type; |
76 | cl->device.parent = &port->device; | 76 | cl->device.parent = &port->device; |
77 | cl->device.release = hsi_client_release; | 77 | cl->device.release = hsi_client_release; |
78 | dev_set_name(&cl->device, info->name); | 78 | dev_set_name(&cl->device, "%s", info->name); |
79 | cl->device.platform_data = info->platform_data; | 79 | cl->device.platform_data = info->platform_data; |
80 | if (info->archdata) | 80 | if (info->archdata) |
81 | cl->device.archdata = *info->archdata; | 81 | cl->device.archdata = *info->archdata; |
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index b83bf4bb95eb..0f34bca9f5e5 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c | |||
@@ -1285,7 +1285,7 @@ static int adt7470_probe(struct i2c_client *client, | |||
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | init_completion(&data->auto_update_stop); | 1287 | init_completion(&data->auto_update_stop); |
1288 | data->auto_update = kthread_run(adt7470_update_thread, client, | 1288 | data->auto_update = kthread_run(adt7470_update_thread, client, "%s", |
1289 | dev_name(data->hwmon_dev)); | 1289 | dev_name(data->hwmon_dev)); |
1290 | if (IS_ERR(data->auto_update)) { | 1290 | if (IS_ERR(data->auto_update)) { |
1291 | err = PTR_ERR(data->auto_update); | 1291 | err = PTR_ERR(data->auto_update); |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 2ff620444930..0b510bafd90e 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -1756,7 +1756,7 @@ static int ide_cd_probe(ide_drive_t *drive) | |||
1756 | 1756 | ||
1757 | info->dev.parent = &drive->gendev; | 1757 | info->dev.parent = &drive->gendev; |
1758 | info->dev.release = ide_cd_release; | 1758 | info->dev.release = ide_cd_release; |
1759 | dev_set_name(&info->dev, dev_name(&drive->gendev)); | 1759 | dev_set_name(&info->dev, "%s", dev_name(&drive->gendev)); |
1760 | 1760 | ||
1761 | if (device_register(&info->dev)) | 1761 | if (device_register(&info->dev)) |
1762 | goto out_free_disk; | 1762 | goto out_free_disk; |
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index de86631e767d..838996a0039e 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c | |||
@@ -392,7 +392,7 @@ static int ide_gd_probe(ide_drive_t *drive) | |||
392 | 392 | ||
393 | idkp->dev.parent = &drive->gendev; | 393 | idkp->dev.parent = &drive->gendev; |
394 | idkp->dev.release = ide_disk_release; | 394 | idkp->dev.release = ide_disk_release; |
395 | dev_set_name(&idkp->dev, dev_name(&drive->gendev)); | 395 | dev_set_name(&idkp->dev, "%s", dev_name(&drive->gendev)); |
396 | 396 | ||
397 | if (device_register(&idkp->dev)) | 397 | if (device_register(&idkp->dev)) |
398 | goto out_free_disk; | 398 | goto out_free_disk; |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 068cef0a987a..2a744a91370e 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -545,7 +545,7 @@ static int ide_register_port(ide_hwif_t *hwif) | |||
545 | int ret; | 545 | int ret; |
546 | 546 | ||
547 | /* register with global device tree */ | 547 | /* register with global device tree */ |
548 | dev_set_name(&hwif->gendev, hwif->name); | 548 | dev_set_name(&hwif->gendev, "%s", hwif->name); |
549 | dev_set_drvdata(&hwif->gendev, hwif); | 549 | dev_set_drvdata(&hwif->gendev, hwif); |
550 | if (hwif->gendev.parent == NULL) | 550 | if (hwif->gendev.parent == NULL) |
551 | hwif->gendev.parent = hwif->dev; | 551 | hwif->gendev.parent = hwif->dev; |
@@ -559,7 +559,7 @@ static int ide_register_port(ide_hwif_t *hwif) | |||
559 | } | 559 | } |
560 | 560 | ||
561 | hwif->portdev = device_create(ide_port_class, &hwif->gendev, | 561 | hwif->portdev = device_create(ide_port_class, &hwif->gendev, |
562 | MKDEV(0, 0), hwif, hwif->name); | 562 | MKDEV(0, 0), hwif, "%s", hwif->name); |
563 | if (IS_ERR(hwif->portdev)) { | 563 | if (IS_ERR(hwif->portdev)) { |
564 | ret = PTR_ERR(hwif->portdev); | 564 | ret = PTR_ERR(hwif->portdev); |
565 | device_unregister(&hwif->gendev); | 565 | device_unregister(&hwif->gendev); |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index c6c574bd5f59..1793aea4a7d2 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -1985,7 +1985,7 @@ static int ide_tape_probe(ide_drive_t *drive) | |||
1985 | 1985 | ||
1986 | tape->dev.parent = &drive->gendev; | 1986 | tape->dev.parent = &drive->gendev; |
1987 | tape->dev.release = ide_tape_release; | 1987 | tape->dev.release = ide_tape_release; |
1988 | dev_set_name(&tape->dev, dev_name(&drive->gendev)); | 1988 | dev_set_name(&tape->dev, "%s", dev_name(&drive->gendev)); |
1989 | 1989 | ||
1990 | if (device_register(&tape->dev)) | 1990 | if (device_register(&tape->dev)) |
1991 | goto out_free_disk; | 1991 | goto out_free_disk; |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 246fdc151652..99904f7d59e3 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -813,7 +813,7 @@ int ib_device_register_sysfs(struct ib_device *device, | |||
813 | 813 | ||
814 | class_dev->class = &ib_class; | 814 | class_dev->class = &ib_class; |
815 | class_dev->parent = device->dma_device; | 815 | class_dev->parent = device->dma_device; |
816 | dev_set_name(class_dev, device->name); | 816 | dev_set_name(class_dev, "%s", device->name); |
817 | dev_set_drvdata(class_dev, device); | 817 | dev_set_drvdata(class_dev, device); |
818 | 818 | ||
819 | INIT_LIST_HEAD(&device->port_list); | 819 | INIT_LIST_HEAD(&device->port_list); |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index b56c9428f3c5..9dd0bc89c3aa 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -2208,7 +2208,7 @@ int qib_cdev_init(int minor, const char *name, | |||
2208 | goto err_cdev; | 2208 | goto err_cdev; |
2209 | } | 2209 | } |
2210 | 2210 | ||
2211 | device = device_create(qib_class, NULL, dev, NULL, name); | 2211 | device = device_create(qib_class, NULL, dev, NULL, "%s", name); |
2212 | if (!IS_ERR(device)) | 2212 | if (!IS_ERR(device)) |
2213 | goto done; | 2213 | goto done; |
2214 | ret = PTR_ERR(device); | 2214 | ret = PTR_ERR(device); |
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index 88305c9cbff5..8b1a66c6ca8a 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c | |||
@@ -102,7 +102,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) | |||
102 | entry->dev.class = elements_class; | 102 | entry->dev.class = elements_class; |
103 | entry->dev.release = mISDN_dsp_dev_release; | 103 | entry->dev.release = mISDN_dsp_dev_release; |
104 | dev_set_drvdata(&entry->dev, elem); | 104 | dev_set_drvdata(&entry->dev, elem); |
105 | dev_set_name(&entry->dev, elem->name); | 105 | dev_set_name(&entry->dev, "%s", elem->name); |
106 | ret = device_register(&entry->dev); | 106 | ret = device_register(&entry->dev); |
107 | if (ret) { | 107 | if (ret) { |
108 | printk(KERN_ERR "%s: failed to register %s\n", | 108 | printk(KERN_ERR "%s: failed to register %s\n", |
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c index b72a59d3216a..e0634c8b7e0b 100644 --- a/drivers/media/i2c/tvaudio.c +++ b/drivers/media/i2c/tvaudio.c | |||
@@ -2020,7 +2020,8 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id * | |||
2020 | /* start async thread */ | 2020 | /* start async thread */ |
2021 | chip->wt.function = chip_thread_wake; | 2021 | chip->wt.function = chip_thread_wake; |
2022 | chip->wt.data = (unsigned long)chip; | 2022 | chip->wt.data = (unsigned long)chip; |
2023 | chip->thread = kthread_run(chip_thread, chip, client->name); | 2023 | chip->thread = kthread_run(chip_thread, chip, "%s", |
2024 | client->name); | ||
2024 | if (IS_ERR(chip->thread)) { | 2025 | if (IS_ERR(chip->thread)) { |
2025 | v4l2_warn(sd, "failed to create kthread\n"); | 2026 | v4l2_warn(sd, "failed to create kthread\n"); |
2026 | chip->thread = NULL; | 2027 | chip->thread = NULL; |
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 67b61cf3e03a..004d8ace5019 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c | |||
@@ -695,7 +695,7 @@ static int cx18_create_in_workq(struct cx18 *cx) | |||
695 | { | 695 | { |
696 | snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in", | 696 | snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in", |
697 | cx->v4l2_dev.name); | 697 | cx->v4l2_dev.name); |
698 | cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0); | 698 | cx->in_work_queue = alloc_ordered_workqueue("%s", 0, cx->in_workq_name); |
699 | if (cx->in_work_queue == NULL) { | 699 | if (cx->in_work_queue == NULL) { |
700 | CX18_ERR("Unable to create incoming mailbox handler thread\n"); | 700 | CX18_ERR("Unable to create incoming mailbox handler thread\n"); |
701 | return -ENOMEM; | 701 | return -ENOMEM; |
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 07b8460953b6..b809bc868a9f 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c | |||
@@ -753,7 +753,7 @@ static int ivtv_init_struct1(struct ivtv *itv) | |||
753 | 753 | ||
754 | init_kthread_worker(&itv->irq_worker); | 754 | init_kthread_worker(&itv->irq_worker); |
755 | itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, | 755 | itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, |
756 | itv->v4l2_dev.name); | 756 | "%s", itv->v4l2_dev.name); |
757 | if (IS_ERR(itv->irq_worker_task)) { | 757 | if (IS_ERR(itv->irq_worker_task)) { |
758 | IVTV_ERR("Could not create ivtv task\n"); | 758 | IVTV_ERR("Could not create ivtv task\n"); |
759 | return -1; | 759 | return -1; |
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c index 85bc314382d3..1d3f11965196 100644 --- a/drivers/media/platform/vivi.c +++ b/drivers/media/platform/vivi.c | |||
@@ -768,7 +768,8 @@ static int vivi_start_generating(struct vivi_dev *dev) | |||
768 | 768 | ||
769 | dma_q->frame = 0; | 769 | dma_q->frame = 0; |
770 | dma_q->ini_jiffies = jiffies; | 770 | dma_q->ini_jiffies = jiffies; |
771 | dma_q->kthread = kthread_run(vivi_thread, dev, dev->v4l2_dev.name); | 771 | dma_q->kthread = kthread_run(vivi_thread, dev, "%s", |
772 | dev->v4l2_dev.name); | ||
772 | 773 | ||
773 | if (IS_ERR(dma_q->kthread)) { | 774 | if (IS_ERR(dma_q->kthread)) { |
774 | v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); | 775 | v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); |
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index c37d3756d8d2..aeabaa5aedf7 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c | |||
@@ -1046,20 +1046,9 @@ static struct pci_driver jmb38x_ms_driver = { | |||
1046 | .resume = jmb38x_ms_resume | 1046 | .resume = jmb38x_ms_resume |
1047 | }; | 1047 | }; |
1048 | 1048 | ||
1049 | static int __init jmb38x_ms_init(void) | 1049 | module_pci_driver(jmb38x_ms_driver); |
1050 | { | ||
1051 | return pci_register_driver(&jmb38x_ms_driver); | ||
1052 | } | ||
1053 | |||
1054 | static void __exit jmb38x_ms_exit(void) | ||
1055 | { | ||
1056 | pci_unregister_driver(&jmb38x_ms_driver); | ||
1057 | } | ||
1058 | 1050 | ||
1059 | MODULE_AUTHOR("Alex Dubov"); | 1051 | MODULE_AUTHOR("Alex Dubov"); |
1060 | MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver"); | 1052 | MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver"); |
1061 | MODULE_LICENSE("GPL"); | 1053 | MODULE_LICENSE("GPL"); |
1062 | MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl); | 1054 | MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl); |
1063 | |||
1064 | module_init(jmb38x_ms_init); | ||
1065 | module_exit(jmb38x_ms_exit); | ||
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index 9718661c1fb6..1b6e91345222 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c | |||
@@ -884,18 +884,7 @@ static struct pci_driver r852_pci_driver = { | |||
884 | .driver.pm = &r592_pm_ops, | 884 | .driver.pm = &r592_pm_ops, |
885 | }; | 885 | }; |
886 | 886 | ||
887 | static __init int r592_module_init(void) | 887 | module_pci_driver(r852_pci_driver); |
888 | { | ||
889 | return pci_register_driver(&r852_pci_driver); | ||
890 | } | ||
891 | |||
892 | static void __exit r592_module_exit(void) | ||
893 | { | ||
894 | pci_unregister_driver(&r852_pci_driver); | ||
895 | } | ||
896 | |||
897 | module_init(r592_module_init); | ||
898 | module_exit(r592_module_exit); | ||
899 | 888 | ||
900 | module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); | 889 | module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); |
901 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); | 890 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); |
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 8a5b2d8f4daf..813eaa33fa14 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c | |||
@@ -84,8 +84,8 @@ int i2o_driver_register(struct i2o_driver *drv) | |||
84 | osm_debug("Register driver %s\n", drv->name); | 84 | osm_debug("Register driver %s\n", drv->name); |
85 | 85 | ||
86 | if (drv->event) { | 86 | if (drv->event) { |
87 | drv->event_queue = alloc_workqueue(drv->name, | 87 | drv->event_queue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, |
88 | WQ_MEM_RECLAIM, 1); | 88 | drv->name); |
89 | if (!drv->event_queue) { | 89 | if (!drv->event_queue) { |
90 | osm_err("Could not initialize event queue for driver " | 90 | osm_err("Could not initialize event queue for driver " |
91 | "%s\n", drv->name); | 91 | "%s\n", drv->name); |
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index c4acac74725c..f74fc0ca2ef9 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
@@ -876,8 +876,9 @@ int gru_set_context_option(unsigned long arg) | |||
876 | switch (req.op) { | 876 | switch (req.op) { |
877 | case sco_blade_chiplet: | 877 | case sco_blade_chiplet: |
878 | /* Select blade/chiplet for GRU context */ | 878 | /* Select blade/chiplet for GRU context */ |
879 | if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] || | 879 | if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB || |
880 | req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) { | 880 | req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || |
881 | (req.val1 >= 0 && !gru_base[req.val1])) { | ||
881 | ret = -EINVAL; | 882 | ret = -EINVAL; |
882 | } else { | 883 | } else { |
883 | gts->ts_user_blade_id = req.val1; | 884 | gts->ts_user_blade_id = req.val1; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index c400c57c394a..048c823f5c51 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -1151,7 +1151,7 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) | |||
1151 | 1151 | ||
1152 | ret = bdi_init(bdi); | 1152 | ret = bdi_init(bdi); |
1153 | if (!ret) | 1153 | if (!ret) |
1154 | ret = bdi_register(bdi, NULL, name); | 1154 | ret = bdi_register(bdi, NULL, "%s", name); |
1155 | 1155 | ||
1156 | if (ret) | 1156 | if (ret) |
1157 | bdi_destroy(bdi); | 1157 | bdi_destroy(bdi); |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a56133585e92..0aaece9107c7 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -1005,7 +1005,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, | |||
1005 | if (err) | 1005 | if (err) |
1006 | goto out_uif; | 1006 | goto out_uif; |
1007 | 1007 | ||
1008 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); | 1008 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name); |
1009 | if (IS_ERR(ubi->bgt_thread)) { | 1009 | if (IS_ERR(ubi->bgt_thread)) { |
1010 | err = PTR_ERR(ubi->bgt_thread); | 1010 | err = PTR_ERR(ubi->bgt_thread); |
1011 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, | 1011 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, |
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index f433b594388e..6d1f6ed3113f 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -208,6 +208,17 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
208 | if (nets[rnet->mport->id].active[destid]) | 208 | if (nets[rnet->mport->id].active[destid]) |
209 | rionet_queue_tx_msg(skb, ndev, | 209 | rionet_queue_tx_msg(skb, ndev, |
210 | nets[rnet->mport->id].active[destid]); | 210 | nets[rnet->mport->id].active[destid]); |
211 | else { | ||
212 | /* | ||
213 | * If the target device was removed from the list of | ||
214 | * active peers but we still have TX packets targeting | ||
215 | * it just report sending a packet to the target | ||
216 | * (without actual packet transfer). | ||
217 | */ | ||
218 | dev_kfree_skb_any(skb); | ||
219 | ndev->stats.tx_packets++; | ||
220 | ndev->stats.tx_bytes += skb->len; | ||
221 | } | ||
211 | } | 222 | } |
212 | 223 | ||
213 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | 224 | spin_unlock_irqrestore(&rnet->tx_lock, flags); |
@@ -385,24 +396,28 @@ static int rionet_close(struct net_device *ndev) | |||
385 | return 0; | 396 | return 0; |
386 | } | 397 | } |
387 | 398 | ||
388 | static void rionet_remove(struct rio_dev *rdev) | 399 | static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif) |
389 | { | 400 | { |
390 | struct net_device *ndev = rio_get_drvdata(rdev); | 401 | struct rio_dev *rdev = to_rio_dev(dev); |
391 | unsigned char netid = rdev->net->hport->id; | 402 | unsigned char netid = rdev->net->hport->id; |
392 | struct rionet_peer *peer, *tmp; | 403 | struct rionet_peer *peer, *tmp; |
393 | 404 | ||
394 | unregister_netdev(ndev); | 405 | if (dev_rionet_capable(rdev)) { |
395 | 406 | list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) { | |
396 | free_pages((unsigned long)nets[netid].active, get_order(sizeof(void *) * | 407 | if (peer->rdev == rdev) { |
397 | RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size))); | 408 | if (nets[netid].active[rdev->destid]) { |
398 | nets[netid].active = NULL; | 409 | nets[netid].active[rdev->destid] = NULL; |
410 | nets[netid].nact--; | ||
411 | } | ||
399 | 412 | ||
400 | list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) { | 413 | list_del(&peer->node); |
401 | list_del(&peer->node); | 414 | kfree(peer); |
402 | kfree(peer); | 415 | break; |
416 | } | ||
417 | } | ||
403 | } | 418 | } |
404 | 419 | ||
405 | free_netdev(ndev); | 420 | return 0; |
406 | } | 421 | } |
407 | 422 | ||
408 | static void rionet_get_drvinfo(struct net_device *ndev, | 423 | static void rionet_get_drvinfo(struct net_device *ndev, |
@@ -503,12 +518,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) | |||
503 | 518 | ||
504 | static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1]; | 519 | static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1]; |
505 | 520 | ||
506 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | 521 | static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) |
507 | { | 522 | { |
508 | int rc = -ENODEV; | 523 | int rc = -ENODEV; |
509 | u32 lsrc_ops, ldst_ops; | 524 | u32 lsrc_ops, ldst_ops; |
510 | struct rionet_peer *peer; | 525 | struct rionet_peer *peer; |
511 | struct net_device *ndev = NULL; | 526 | struct net_device *ndev = NULL; |
527 | struct rio_dev *rdev = to_rio_dev(dev); | ||
512 | unsigned char netid = rdev->net->hport->id; | 528 | unsigned char netid = rdev->net->hport->id; |
513 | int oldnet; | 529 | int oldnet; |
514 | 530 | ||
@@ -518,8 +534,9 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
518 | oldnet = test_and_set_bit(netid, net_table); | 534 | oldnet = test_and_set_bit(netid, net_table); |
519 | 535 | ||
520 | /* | 536 | /* |
521 | * First time through, make sure local device is rionet | 537 | * If first time through this net, make sure local device is rionet |
522 | * capable, setup netdev (will be skipped on later probes) | 538 | * capable and setup netdev (this step will be skipped in later probes |
539 | * on the same net). | ||
523 | */ | 540 | */ |
524 | if (!oldnet) { | 541 | if (!oldnet) { |
525 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, | 542 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, |
@@ -541,6 +558,12 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
541 | } | 558 | } |
542 | nets[netid].ndev = ndev; | 559 | nets[netid].ndev = ndev; |
543 | rc = rionet_setup_netdev(rdev->net->hport, ndev); | 560 | rc = rionet_setup_netdev(rdev->net->hport, ndev); |
561 | if (rc) { | ||
562 | printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", | ||
563 | DRV_NAME, rc); | ||
564 | goto out; | ||
565 | } | ||
566 | |||
544 | INIT_LIST_HEAD(&nets[netid].peers); | 567 | INIT_LIST_HEAD(&nets[netid].peers); |
545 | nets[netid].nact = 0; | 568 | nets[netid].nact = 0; |
546 | } else if (nets[netid].ndev == NULL) | 569 | } else if (nets[netid].ndev == NULL) |
@@ -559,31 +582,61 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
559 | list_add_tail(&peer->node, &nets[netid].peers); | 582 | list_add_tail(&peer->node, &nets[netid].peers); |
560 | } | 583 | } |
561 | 584 | ||
562 | rio_set_drvdata(rdev, nets[netid].ndev); | 585 | return 0; |
563 | 586 | out: | |
564 | out: | ||
565 | return rc; | 587 | return rc; |
566 | } | 588 | } |
567 | 589 | ||
590 | #ifdef MODULE | ||
568 | static struct rio_device_id rionet_id_table[] = { | 591 | static struct rio_device_id rionet_id_table[] = { |
569 | {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)} | 592 | {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, |
593 | { 0, } /* terminate list */ | ||
570 | }; | 594 | }; |
571 | 595 | ||
572 | static struct rio_driver rionet_driver = { | 596 | MODULE_DEVICE_TABLE(rapidio, rionet_id_table); |
573 | .name = "rionet", | 597 | #endif |
574 | .id_table = rionet_id_table, | 598 | |
575 | .probe = rionet_probe, | 599 | static struct subsys_interface rionet_interface = { |
576 | .remove = rionet_remove, | 600 | .name = "rionet", |
601 | .subsys = &rio_bus_type, | ||
602 | .add_dev = rionet_add_dev, | ||
603 | .remove_dev = rionet_remove_dev, | ||
577 | }; | 604 | }; |
578 | 605 | ||
579 | static int __init rionet_init(void) | 606 | static int __init rionet_init(void) |
580 | { | 607 | { |
581 | return rio_register_driver(&rionet_driver); | 608 | return subsys_interface_register(&rionet_interface); |
582 | } | 609 | } |
583 | 610 | ||
584 | static void __exit rionet_exit(void) | 611 | static void __exit rionet_exit(void) |
585 | { | 612 | { |
586 | rio_unregister_driver(&rionet_driver); | 613 | struct rionet_private *rnet; |
614 | struct net_device *ndev; | ||
615 | struct rionet_peer *peer, *tmp; | ||
616 | int i; | ||
617 | |||
618 | for (i = 0; i < RIONET_MAX_NETS; i++) { | ||
619 | if (nets[i].ndev != NULL) { | ||
620 | ndev = nets[i].ndev; | ||
621 | rnet = netdev_priv(ndev); | ||
622 | unregister_netdev(ndev); | ||
623 | |||
624 | list_for_each_entry_safe(peer, | ||
625 | tmp, &nets[i].peers, node) { | ||
626 | list_del(&peer->node); | ||
627 | kfree(peer); | ||
628 | } | ||
629 | |||
630 | free_pages((unsigned long)nets[i].active, | ||
631 | get_order(sizeof(void *) * | ||
632 | RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size))); | ||
633 | nets[i].active = NULL; | ||
634 | |||
635 | free_netdev(ndev); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | subsys_interface_unregister(&rionet_interface); | ||
587 | } | 640 | } |
588 | 641 | ||
589 | late_initcall(rionet_init); | 642 | late_initcall(rionet_init); |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 6125adb520a3..d0adbaf86186 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -1893,7 +1893,8 @@ static int airo_open(struct net_device *dev) { | |||
1893 | 1893 | ||
1894 | if (ai->wifidev != dev) { | 1894 | if (ai->wifidev != dev) { |
1895 | clear_bit(JOB_DIE, &ai->jobs); | 1895 | clear_bit(JOB_DIE, &ai->jobs); |
1896 | ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); | 1896 | ai->airo_thread_task = kthread_run(airo_thread, dev, "%s", |
1897 | dev->name); | ||
1897 | if (IS_ERR(ai->airo_thread_task)) | 1898 | if (IS_ERR(ai->airo_thread_task)) |
1898 | return (int)PTR_ERR(ai->airo_thread_task); | 1899 | return (int)PTR_ERR(ai->airo_thread_task); |
1899 | 1900 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index d96257b79a84..4ed5e45ca1e2 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -8256,7 +8256,7 @@ static int is_duplicate_packet(struct ipw_priv *priv, | |||
8256 | u8 *mac = header->addr2; | 8256 | u8 *mac = header->addr2; |
8257 | int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; | 8257 | int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; |
8258 | 8258 | ||
8259 | __list_for_each(p, &priv->ibss_mac_hash[index]) { | 8259 | list_for_each(p, &priv->ibss_mac_hash[index]) { |
8260 | entry = | 8260 | entry = |
8261 | list_entry(p, struct ipw_ibss_seq, list); | 8261 | list_entry(p, struct ipw_ibss_seq, list); |
8262 | if (!memcmp(entry->mac, mac, ETH_ALEN)) | 8262 | if (!memcmp(entry->mac, mac, ETH_ALEN)) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 90dc14336980..c8b9ef0c21f8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c | |||
@@ -1321,7 +1321,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) | |||
1321 | * Initialize work. | 1321 | * Initialize work. |
1322 | */ | 1322 | */ |
1323 | rt2x00dev->workqueue = | 1323 | rt2x00dev->workqueue = |
1324 | alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0); | 1324 | alloc_ordered_workqueue("%s", 0, wiphy_name(rt2x00dev->hw->wiphy)); |
1325 | if (!rt2x00dev->workqueue) { | 1325 | if (!rt2x00dev->workqueue) { |
1326 | retval = -ENOMEM; | 1326 | retval = -ENOMEM; |
1327 | goto exit; | 1327 | goto exit; |
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index af59dd5718e1..a5f223145b0f 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c | |||
@@ -380,7 +380,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) | |||
380 | 380 | ||
381 | /* <2> work queue */ | 381 | /* <2> work queue */ |
382 | rtlpriv->works.hw = hw; | 382 | rtlpriv->works.hw = hw; |
383 | rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); | 383 | rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); |
384 | INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, | 384 | INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, |
385 | (void *)rtl_watchdog_wq_callback); | 385 | (void *)rtl_watchdog_wq_callback); |
386 | INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, | 386 | INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, |
diff --git a/drivers/parport/share.c b/drivers/parport/share.c index a848e02e6be3..6a83ee1e9178 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c | |||
@@ -282,14 +282,13 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, | |||
282 | int device; | 282 | int device; |
283 | char *name; | 283 | char *name; |
284 | 284 | ||
285 | tmp = kmalloc(sizeof(struct parport), GFP_KERNEL); | 285 | tmp = kzalloc(sizeof(struct parport), GFP_KERNEL); |
286 | if (!tmp) { | 286 | if (!tmp) { |
287 | printk(KERN_WARNING "parport: memory squeeze\n"); | 287 | printk(KERN_WARNING "parport: memory squeeze\n"); |
288 | return NULL; | 288 | return NULL; |
289 | } | 289 | } |
290 | 290 | ||
291 | /* Init our structure */ | 291 | /* Init our structure */ |
292 | memset(tmp, 0, sizeof(struct parport)); | ||
293 | tmp->base = base; | 292 | tmp->base = base; |
294 | tmp->irq = irq; | 293 | tmp->irq = irq; |
295 | tmp->dma = dma; | 294 | tmp->dma = dma; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 5127f3f41821..b2255736ac81 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -773,14 +773,12 @@ static void pcie_shutdown_notification(struct controller *ctrl) | |||
773 | static int pcie_init_slot(struct controller *ctrl) | 773 | static int pcie_init_slot(struct controller *ctrl) |
774 | { | 774 | { |
775 | struct slot *slot; | 775 | struct slot *slot; |
776 | char name[32]; | ||
777 | 776 | ||
778 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 777 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
779 | if (!slot) | 778 | if (!slot) |
780 | return -ENOMEM; | 779 | return -ENOMEM; |
781 | 780 | ||
782 | snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl)); | 781 | slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl)); |
783 | slot->wq = alloc_workqueue(name, 0, 0); | ||
784 | if (!slot->wq) | 782 | if (!slot->wq) |
785 | goto abort; | 783 | goto abort; |
786 | 784 | ||
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 3100c52c837c..d3f757df691c 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -128,8 +128,7 @@ static int init_slots(struct controller *ctrl) | |||
128 | slot->hpc_ops = ctrl->hpc_ops; | 128 | slot->hpc_ops = ctrl->hpc_ops; |
129 | slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); | 129 | slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); |
130 | 130 | ||
131 | snprintf(name, sizeof(name), "shpchp-%d", slot->number); | 131 | slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); |
132 | slot->wq = alloc_workqueue(name, 0, 0); | ||
133 | if (!slot->wq) { | 132 | if (!slot->wq) { |
134 | retval = -ENOMEM; | 133 | retval = -ENOMEM; |
135 | goto error_info; | 134 | goto error_info; |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index e4ac38aca580..b13344c59808 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -743,7 +743,7 @@ static int wmi_create_device(const struct guid_block *gblock, | |||
743 | wblock->dev.class = &wmi_class; | 743 | wblock->dev.class = &wmi_class; |
744 | 744 | ||
745 | wmi_gtoa(gblock->guid, guid_string); | 745 | wmi_gtoa(gblock->guid, guid_string); |
746 | dev_set_name(&wblock->dev, guid_string); | 746 | dev_set_name(&wblock->dev, "%s", guid_string); |
747 | 747 | ||
748 | dev_set_drvdata(&wblock->dev, wblock); | 748 | dev_set_drvdata(&wblock->dev, wblock); |
749 | 749 | ||
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c index d3db26e46489..eae0eda9ff39 100644 --- a/drivers/pps/clients/pps-gpio.c +++ b/drivers/pps/clients/pps-gpio.c | |||
@@ -33,13 +33,17 @@ | |||
33 | #include <linux/pps-gpio.h> | 33 | #include <linux/pps-gpio.h> |
34 | #include <linux/gpio.h> | 34 | #include <linux/gpio.h> |
35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
36 | #include <linux/of_device.h> | ||
37 | #include <linux/of_gpio.h> | ||
36 | 38 | ||
37 | /* Info for each registered platform device */ | 39 | /* Info for each registered platform device */ |
38 | struct pps_gpio_device_data { | 40 | struct pps_gpio_device_data { |
39 | int irq; /* IRQ used as PPS source */ | 41 | int irq; /* IRQ used as PPS source */ |
40 | struct pps_device *pps; /* PPS source device */ | 42 | struct pps_device *pps; /* PPS source device */ |
41 | struct pps_source_info info; /* PPS source information */ | 43 | struct pps_source_info info; /* PPS source information */ |
42 | const struct pps_gpio_platform_data *pdata; | 44 | bool assert_falling_edge; |
45 | bool capture_clear; | ||
46 | unsigned int gpio_pin; | ||
43 | }; | 47 | }; |
44 | 48 | ||
45 | /* | 49 | /* |
@@ -57,46 +61,25 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data) | |||
57 | 61 | ||
58 | info = data; | 62 | info = data; |
59 | 63 | ||
60 | rising_edge = gpio_get_value(info->pdata->gpio_pin); | 64 | rising_edge = gpio_get_value(info->gpio_pin); |
61 | if ((rising_edge && !info->pdata->assert_falling_edge) || | 65 | if ((rising_edge && !info->assert_falling_edge) || |
62 | (!rising_edge && info->pdata->assert_falling_edge)) | 66 | (!rising_edge && info->assert_falling_edge)) |
63 | pps_event(info->pps, &ts, PPS_CAPTUREASSERT, NULL); | 67 | pps_event(info->pps, &ts, PPS_CAPTUREASSERT, NULL); |
64 | else if (info->pdata->capture_clear && | 68 | else if (info->capture_clear && |
65 | ((rising_edge && info->pdata->assert_falling_edge) || | 69 | ((rising_edge && info->assert_falling_edge) || |
66 | (!rising_edge && !info->pdata->assert_falling_edge))) | 70 | (!rising_edge && !info->assert_falling_edge))) |
67 | pps_event(info->pps, &ts, PPS_CAPTURECLEAR, NULL); | 71 | pps_event(info->pps, &ts, PPS_CAPTURECLEAR, NULL); |
68 | 72 | ||
69 | return IRQ_HANDLED; | 73 | return IRQ_HANDLED; |
70 | } | 74 | } |
71 | 75 | ||
72 | static int pps_gpio_setup(struct platform_device *pdev) | ||
73 | { | ||
74 | int ret; | ||
75 | const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data; | ||
76 | |||
77 | ret = gpio_request(pdata->gpio_pin, pdata->gpio_label); | ||
78 | if (ret) { | ||
79 | pr_warning("failed to request GPIO %u\n", pdata->gpio_pin); | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
83 | ret = gpio_direction_input(pdata->gpio_pin); | ||
84 | if (ret) { | ||
85 | pr_warning("failed to set pin direction\n"); | ||
86 | gpio_free(pdata->gpio_pin); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static unsigned long | 76 | static unsigned long |
94 | get_irqf_trigger_flags(const struct pps_gpio_platform_data *pdata) | 77 | get_irqf_trigger_flags(const struct pps_gpio_device_data *data) |
95 | { | 78 | { |
96 | unsigned long flags = pdata->assert_falling_edge ? | 79 | unsigned long flags = data->assert_falling_edge ? |
97 | IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; | 80 | IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; |
98 | 81 | ||
99 | if (pdata->capture_clear) { | 82 | if (data->capture_clear) { |
100 | flags |= ((flags & IRQF_TRIGGER_RISING) ? | 83 | flags |= ((flags & IRQF_TRIGGER_RISING) ? |
101 | IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING); | 84 | IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING); |
102 | } | 85 | } |
@@ -107,38 +90,63 @@ get_irqf_trigger_flags(const struct pps_gpio_platform_data *pdata) | |||
107 | static int pps_gpio_probe(struct platform_device *pdev) | 90 | static int pps_gpio_probe(struct platform_device *pdev) |
108 | { | 91 | { |
109 | struct pps_gpio_device_data *data; | 92 | struct pps_gpio_device_data *data; |
110 | int irq; | 93 | const char *gpio_label; |
111 | int ret; | 94 | int ret; |
112 | int err; | ||
113 | int pps_default_params; | 95 | int pps_default_params; |
114 | const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data; | 96 | const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data; |
97 | struct device_node *np = pdev->dev.of_node; | ||
115 | 98 | ||
99 | /* allocate space for device info */ | ||
100 | data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data), | ||
101 | GFP_KERNEL); | ||
102 | if (!data) | ||
103 | return -ENOMEM; | ||
104 | |||
105 | if (pdata) { | ||
106 | data->gpio_pin = pdata->gpio_pin; | ||
107 | gpio_label = pdata->gpio_label; | ||
108 | |||
109 | data->assert_falling_edge = pdata->assert_falling_edge; | ||
110 | data->capture_clear = pdata->capture_clear; | ||
111 | } else { | ||
112 | ret = of_get_gpio(np, 0); | ||
113 | if (ret < 0) { | ||
114 | dev_err(&pdev->dev, "failed to get GPIO from device tree\n"); | ||
115 | return ret; | ||
116 | } | ||
117 | data->gpio_pin = ret; | ||
118 | gpio_label = PPS_GPIO_NAME; | ||
119 | |||
120 | if (of_get_property(np, "assert-falling-edge", NULL)) | ||
121 | data->assert_falling_edge = true; | ||
122 | } | ||
116 | 123 | ||
117 | /* GPIO setup */ | 124 | /* GPIO setup */ |
118 | ret = pps_gpio_setup(pdev); | 125 | ret = devm_gpio_request(&pdev->dev, data->gpio_pin, gpio_label); |
119 | if (ret) | 126 | if (ret) { |
120 | return -EINVAL; | 127 | dev_err(&pdev->dev, "failed to request GPIO %u\n", |
128 | data->gpio_pin); | ||
129 | return ret; | ||
130 | } | ||
121 | 131 | ||
122 | /* IRQ setup */ | 132 | ret = gpio_direction_input(data->gpio_pin); |
123 | irq = gpio_to_irq(pdata->gpio_pin); | 133 | if (ret) { |
124 | if (irq < 0) { | 134 | dev_err(&pdev->dev, "failed to set pin direction\n"); |
125 | pr_err("failed to map GPIO to IRQ: %d\n", irq); | 135 | return -EINVAL; |
126 | err = -EINVAL; | ||
127 | goto return_error; | ||
128 | } | 136 | } |
129 | 137 | ||
130 | /* allocate space for device info */ | 138 | /* IRQ setup */ |
131 | data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data), | 139 | ret = gpio_to_irq(data->gpio_pin); |
132 | GFP_KERNEL); | 140 | if (ret < 0) { |
133 | if (data == NULL) { | 141 | dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret); |
134 | err = -ENOMEM; | 142 | return -EINVAL; |
135 | goto return_error; | ||
136 | } | 143 | } |
144 | data->irq = ret; | ||
137 | 145 | ||
138 | /* initialize PPS specific parts of the bookkeeping data structure. */ | 146 | /* initialize PPS specific parts of the bookkeeping data structure. */ |
139 | data->info.mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | | 147 | data->info.mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | |
140 | PPS_ECHOASSERT | PPS_CANWAIT | PPS_TSFMT_TSPEC; | 148 | PPS_ECHOASSERT | PPS_CANWAIT | PPS_TSFMT_TSPEC; |
141 | if (pdata->capture_clear) | 149 | if (data->capture_clear) |
142 | data->info.mode |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR | | 150 | data->info.mode |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR | |
143 | PPS_ECHOCLEAR; | 151 | PPS_ECHOCLEAR; |
144 | data->info.owner = THIS_MODULE; | 152 | data->info.owner = THIS_MODULE; |
@@ -147,77 +155,58 @@ static int pps_gpio_probe(struct platform_device *pdev) | |||
147 | 155 | ||
148 | /* register PPS source */ | 156 | /* register PPS source */ |
149 | pps_default_params = PPS_CAPTUREASSERT | PPS_OFFSETASSERT; | 157 | pps_default_params = PPS_CAPTUREASSERT | PPS_OFFSETASSERT; |
150 | if (pdata->capture_clear) | 158 | if (data->capture_clear) |
151 | pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; | 159 | pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; |
152 | data->pps = pps_register_source(&data->info, pps_default_params); | 160 | data->pps = pps_register_source(&data->info, pps_default_params); |
153 | if (data->pps == NULL) { | 161 | if (data->pps == NULL) { |
154 | pr_err("failed to register IRQ %d as PPS source\n", irq); | 162 | dev_err(&pdev->dev, "failed to register IRQ %d as PPS source\n", |
155 | err = -EINVAL; | 163 | data->irq); |
156 | goto return_error; | 164 | return -EINVAL; |
157 | } | 165 | } |
158 | 166 | ||
159 | data->irq = irq; | ||
160 | data->pdata = pdata; | ||
161 | |||
162 | /* register IRQ interrupt handler */ | 167 | /* register IRQ interrupt handler */ |
163 | ret = request_irq(irq, pps_gpio_irq_handler, | 168 | ret = devm_request_irq(&pdev->dev, data->irq, pps_gpio_irq_handler, |
164 | get_irqf_trigger_flags(pdata), data->info.name, data); | 169 | get_irqf_trigger_flags(data), data->info.name, data); |
165 | if (ret) { | 170 | if (ret) { |
166 | pps_unregister_source(data->pps); | 171 | pps_unregister_source(data->pps); |
167 | pr_err("failed to acquire IRQ %d\n", irq); | 172 | dev_err(&pdev->dev, "failed to acquire IRQ %d\n", data->irq); |
168 | err = -EINVAL; | 173 | return -EINVAL; |
169 | goto return_error; | ||
170 | } | 174 | } |
171 | 175 | ||
172 | platform_set_drvdata(pdev, data); | 176 | platform_set_drvdata(pdev, data); |
173 | dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n", irq); | 177 | dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n", |
178 | data->irq); | ||
174 | 179 | ||
175 | return 0; | 180 | return 0; |
176 | |||
177 | return_error: | ||
178 | gpio_free(pdata->gpio_pin); | ||
179 | return err; | ||
180 | } | 181 | } |
181 | 182 | ||
182 | static int pps_gpio_remove(struct platform_device *pdev) | 183 | static int pps_gpio_remove(struct platform_device *pdev) |
183 | { | 184 | { |
184 | struct pps_gpio_device_data *data = platform_get_drvdata(pdev); | 185 | struct pps_gpio_device_data *data = platform_get_drvdata(pdev); |
185 | const struct pps_gpio_platform_data *pdata = data->pdata; | ||
186 | 186 | ||
187 | platform_set_drvdata(pdev, NULL); | 187 | platform_set_drvdata(pdev, NULL); |
188 | free_irq(data->irq, data); | ||
189 | gpio_free(pdata->gpio_pin); | ||
190 | pps_unregister_source(data->pps); | 188 | pps_unregister_source(data->pps); |
191 | pr_info("removed IRQ %d as PPS source\n", data->irq); | 189 | dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq); |
192 | return 0; | 190 | return 0; |
193 | } | 191 | } |
194 | 192 | ||
193 | static const struct of_device_id pps_gpio_dt_ids[] = { | ||
194 | { .compatible = "pps-gpio", }, | ||
195 | { /* sentinel */ } | ||
196 | }; | ||
197 | MODULE_DEVICE_TABLE(of, pps_gpio_dt_ids); | ||
198 | |||
195 | static struct platform_driver pps_gpio_driver = { | 199 | static struct platform_driver pps_gpio_driver = { |
196 | .probe = pps_gpio_probe, | 200 | .probe = pps_gpio_probe, |
197 | .remove = pps_gpio_remove, | 201 | .remove = pps_gpio_remove, |
198 | .driver = { | 202 | .driver = { |
199 | .name = PPS_GPIO_NAME, | 203 | .name = PPS_GPIO_NAME, |
200 | .owner = THIS_MODULE | 204 | .owner = THIS_MODULE, |
205 | .of_match_table = of_match_ptr(pps_gpio_dt_ids), | ||
201 | }, | 206 | }, |
202 | }; | 207 | }; |
203 | 208 | ||
204 | static int __init pps_gpio_init(void) | 209 | module_platform_driver(pps_gpio_driver); |
205 | { | ||
206 | int ret = platform_driver_register(&pps_gpio_driver); | ||
207 | if (ret < 0) | ||
208 | pr_err("failed to register platform driver\n"); | ||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static void __exit pps_gpio_exit(void) | ||
213 | { | ||
214 | platform_driver_unregister(&pps_gpio_driver); | ||
215 | pr_debug("unregistered platform driver\n"); | ||
216 | } | ||
217 | |||
218 | module_init(pps_gpio_init); | ||
219 | module_exit(pps_gpio_exit); | ||
220 | |||
221 | MODULE_AUTHOR("Ricardo Martins <rasm@fe.up.pt>"); | 210 | MODULE_AUTHOR("Ricardo Martins <rasm@fe.up.pt>"); |
222 | MODULE_AUTHOR("James Nuss <jamesnuss@nanometrics.ca>"); | 211 | MODULE_AUTHOR("James Nuss <jamesnuss@nanometrics.ca>"); |
223 | MODULE_DESCRIPTION("Use GPIO pin as PPS source"); | 212 | MODULE_DESCRIPTION("Use GPIO pin as PPS source"); |
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 5ab056494bbe..3e3be57e9a1a 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig | |||
@@ -67,4 +67,9 @@ config RAPIDIO_ENUM_BASIC | |||
67 | 67 | ||
68 | endchoice | 68 | endchoice |
69 | 69 | ||
70 | menu "RapidIO Switch drivers" | ||
71 | depends on RAPIDIO | ||
72 | |||
70 | source "drivers/rapidio/switches/Kconfig" | 73 | source "drivers/rapidio/switches/Kconfig" |
74 | |||
75 | endmenu | ||
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index 3036702ffe8b..6271ada6993f 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile | |||
@@ -1,7 +1,9 @@ | |||
1 | # | 1 | # |
2 | # Makefile for RapidIO interconnect services | 2 | # Makefile for RapidIO interconnect services |
3 | # | 3 | # |
4 | obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o | 4 | obj-$(CONFIG_RAPIDIO) += rapidio.o |
5 | rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o | ||
6 | |||
5 | obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o | 7 | obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o |
6 | 8 | ||
7 | obj-$(CONFIG_RAPIDIO) += switches/ | 9 | obj-$(CONFIG_RAPIDIO) += switches/ |
diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig index 12a9d7f7040b..c4cb0877592b 100644 --- a/drivers/rapidio/devices/Kconfig +++ b/drivers/rapidio/devices/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config RAPIDIO_TSI721 | 5 | config RAPIDIO_TSI721 |
6 | bool "IDT Tsi721 PCI Express SRIO Controller support" | 6 | tristate "IDT Tsi721 PCI Express SRIO Controller support" |
7 | depends on RAPIDIO && PCIEPORTBUS | 7 | depends on RAPIDIO && PCIEPORTBUS |
8 | default "n" | 8 | default "n" |
9 | ---help--- | 9 | ---help--- |
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile index 7b62860f34f8..9432c494cf57 100644 --- a/drivers/rapidio/devices/Makefile +++ b/drivers/rapidio/devices/Makefile | |||
@@ -2,7 +2,6 @@ | |||
2 | # Makefile for RapidIO devices | 2 | # Makefile for RapidIO devices |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o | 5 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_mport.o |
6 | ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y) | 6 | tsi721_mport-y := tsi721.o |
7 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o | 7 | tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o |
8 | endif | ||
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index a8b2c23a7ef4..ff7cbf2d28e3 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -2515,9 +2515,8 @@ static int __init tsi721_init(void) | |||
2515 | return pci_register_driver(&tsi721_driver); | 2515 | return pci_register_driver(&tsi721_driver); |
2516 | } | 2516 | } |
2517 | 2517 | ||
2518 | static void __exit tsi721_exit(void) | ||
2519 | { | ||
2520 | pci_unregister_driver(&tsi721_driver); | ||
2521 | } | ||
2522 | |||
2523 | device_initcall(tsi721_init); | 2518 | device_initcall(tsi721_init); |
2519 | |||
2520 | MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); | ||
2521 | MODULE_AUTHOR("Integrated Device Technology, Inc."); | ||
2522 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index a0c875563d76..3e9b6a78ad18 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c | |||
@@ -199,6 +199,23 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv) | |||
199 | out:return 0; | 199 | out:return 0; |
200 | } | 200 | } |
201 | 201 | ||
202 | static int rio_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
203 | { | ||
204 | struct rio_dev *rdev; | ||
205 | |||
206 | if (!dev) | ||
207 | return -ENODEV; | ||
208 | |||
209 | rdev = to_rio_dev(dev); | ||
210 | if (!rdev) | ||
211 | return -ENODEV; | ||
212 | |||
213 | if (add_uevent_var(env, "MODALIAS=rapidio:v%04Xd%04Xav%04Xad%04X", | ||
214 | rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did)) | ||
215 | return -ENOMEM; | ||
216 | return 0; | ||
217 | } | ||
218 | |||
202 | struct device rio_bus = { | 219 | struct device rio_bus = { |
203 | .init_name = "rapidio", | 220 | .init_name = "rapidio", |
204 | }; | 221 | }; |
@@ -210,6 +227,7 @@ struct bus_type rio_bus_type = { | |||
210 | .bus_attrs = rio_bus_attrs, | 227 | .bus_attrs = rio_bus_attrs, |
211 | .probe = rio_device_probe, | 228 | .probe = rio_device_probe, |
212 | .remove = rio_device_remove, | 229 | .remove = rio_device_remove, |
230 | .uevent = rio_uevent, | ||
213 | }; | 231 | }; |
214 | 232 | ||
215 | /** | 233 | /** |
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 4c15dbf81087..d3a6539a77cc 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
@@ -406,6 +406,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
406 | rio_mport_write_config_32(port, destid, hopcount, | 406 | rio_mport_write_config_32(port, destid, hopcount, |
407 | RIO_COMPONENT_TAG_CSR, next_comptag); | 407 | RIO_COMPONENT_TAG_CSR, next_comptag); |
408 | rdev->comp_tag = next_comptag++; | 408 | rdev->comp_tag = next_comptag++; |
409 | rdev->do_enum = true; | ||
409 | } else { | 410 | } else { |
410 | rio_mport_read_config_32(port, destid, hopcount, | 411 | rio_mport_read_config_32(port, destid, hopcount, |
411 | RIO_COMPONENT_TAG_CSR, | 412 | RIO_COMPONENT_TAG_CSR, |
@@ -432,8 +433,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
432 | /* If a PE has both switch and other functions, show it as a switch */ | 433 | /* If a PE has both switch and other functions, show it as a switch */ |
433 | if (rio_is_switch(rdev)) { | 434 | if (rio_is_switch(rdev)) { |
434 | rswitch = rdev->rswitch; | 435 | rswitch = rdev->rswitch; |
435 | rswitch->switchid = rdev->comp_tag & RIO_CTAG_UDEVID; | ||
436 | rswitch->port_ok = 0; | 436 | rswitch->port_ok = 0; |
437 | spin_lock_init(&rswitch->lock); | ||
437 | rswitch->route_table = kzalloc(sizeof(u8)* | 438 | rswitch->route_table = kzalloc(sizeof(u8)* |
438 | RIO_MAX_ROUTE_ENTRIES(port->sys_size), | 439 | RIO_MAX_ROUTE_ENTRIES(port->sys_size), |
439 | GFP_KERNEL); | 440 | GFP_KERNEL); |
@@ -444,12 +445,10 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
444 | rdid++) | 445 | rdid++) |
445 | rswitch->route_table[rdid] = RIO_INVALID_ROUTE; | 446 | rswitch->route_table[rdid] = RIO_INVALID_ROUTE; |
446 | dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, | 447 | dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, |
447 | rswitch->switchid); | 448 | rdev->comp_tag & RIO_CTAG_UDEVID); |
448 | rio_switch_init(rdev, do_enum); | ||
449 | 449 | ||
450 | if (do_enum && rswitch->clr_table) | 450 | if (do_enum) |
451 | rswitch->clr_table(port, destid, hopcount, | 451 | rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); |
452 | RIO_GLOBAL_TABLE); | ||
453 | 452 | ||
454 | list_add_tail(&rswitch->node, &net->switches); | 453 | list_add_tail(&rswitch->node, &net->switches); |
455 | 454 | ||
@@ -459,7 +458,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
459 | rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); | 458 | rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); |
460 | 459 | ||
461 | dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, | 460 | dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, |
462 | rdev->destid); | 461 | rdev->comp_tag & RIO_CTAG_UDEVID); |
463 | } | 462 | } |
464 | 463 | ||
465 | rio_attach_device(rdev); | 464 | rio_attach_device(rdev); |
@@ -533,156 +532,6 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) | |||
533 | } | 532 | } |
534 | 533 | ||
535 | /** | 534 | /** |
536 | * rio_lock_device - Acquires host device lock for specified device | ||
537 | * @port: Master port to send transaction | ||
538 | * @destid: Destination ID for device/switch | ||
539 | * @hopcount: Hopcount to reach switch | ||
540 | * @wait_ms: Max wait time in msec (0 = no timeout) | ||
541 | * | ||
542 | * Attepts to acquire host device lock for specified device | ||
543 | * Returns 0 if device lock acquired or EINVAL if timeout expires. | ||
544 | */ | ||
545 | static int | ||
546 | rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms) | ||
547 | { | ||
548 | u32 result; | ||
549 | int tcnt = 0; | ||
550 | |||
551 | /* Attempt to acquire device lock */ | ||
552 | rio_mport_write_config_32(port, destid, hopcount, | ||
553 | RIO_HOST_DID_LOCK_CSR, port->host_deviceid); | ||
554 | rio_mport_read_config_32(port, destid, hopcount, | ||
555 | RIO_HOST_DID_LOCK_CSR, &result); | ||
556 | |||
557 | while (result != port->host_deviceid) { | ||
558 | if (wait_ms != 0 && tcnt == wait_ms) { | ||
559 | pr_debug("RIO: timeout when locking device %x:%x\n", | ||
560 | destid, hopcount); | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | |||
564 | /* Delay a bit */ | ||
565 | mdelay(1); | ||
566 | tcnt++; | ||
567 | /* Try to acquire device lock again */ | ||
568 | rio_mport_write_config_32(port, destid, | ||
569 | hopcount, | ||
570 | RIO_HOST_DID_LOCK_CSR, | ||
571 | port->host_deviceid); | ||
572 | rio_mport_read_config_32(port, destid, | ||
573 | hopcount, | ||
574 | RIO_HOST_DID_LOCK_CSR, &result); | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * rio_unlock_device - Releases host device lock for specified device | ||
582 | * @port: Master port to send transaction | ||
583 | * @destid: Destination ID for device/switch | ||
584 | * @hopcount: Hopcount to reach switch | ||
585 | * | ||
586 | * Returns 0 if device lock released or EINVAL if fails. | ||
587 | */ | ||
588 | static int | ||
589 | rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) | ||
590 | { | ||
591 | u32 result; | ||
592 | |||
593 | /* Release device lock */ | ||
594 | rio_mport_write_config_32(port, destid, | ||
595 | hopcount, | ||
596 | RIO_HOST_DID_LOCK_CSR, | ||
597 | port->host_deviceid); | ||
598 | rio_mport_read_config_32(port, destid, hopcount, | ||
599 | RIO_HOST_DID_LOCK_CSR, &result); | ||
600 | if ((result & 0xffff) != 0xffff) { | ||
601 | pr_debug("RIO: badness when releasing device lock %x:%x\n", | ||
602 | destid, hopcount); | ||
603 | return -EINVAL; | ||
604 | } | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | /** | ||
610 | * rio_route_add_entry- Add a route entry to a switch routing table | ||
611 | * @rdev: RIO device | ||
612 | * @table: Routing table ID | ||
613 | * @route_destid: Destination ID to be routed | ||
614 | * @route_port: Port number to be routed | ||
615 | * @lock: lock switch device flag | ||
616 | * | ||
617 | * Calls the switch specific add_entry() method to add a route entry | ||
618 | * on a switch. The route table can be specified using the @table | ||
619 | * argument if a switch has per port routing tables or the normal | ||
620 | * use is to specific all tables (or the global table) by passing | ||
621 | * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL | ||
622 | * on failure. | ||
623 | */ | ||
624 | static int | ||
625 | rio_route_add_entry(struct rio_dev *rdev, | ||
626 | u16 table, u16 route_destid, u8 route_port, int lock) | ||
627 | { | ||
628 | int rc; | ||
629 | |||
630 | if (lock) { | ||
631 | rc = rio_lock_device(rdev->net->hport, rdev->destid, | ||
632 | rdev->hopcount, 1000); | ||
633 | if (rc) | ||
634 | return rc; | ||
635 | } | ||
636 | |||
637 | rc = rdev->rswitch->add_entry(rdev->net->hport, rdev->destid, | ||
638 | rdev->hopcount, table, | ||
639 | route_destid, route_port); | ||
640 | if (lock) | ||
641 | rio_unlock_device(rdev->net->hport, rdev->destid, | ||
642 | rdev->hopcount); | ||
643 | |||
644 | return rc; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * rio_route_get_entry- Read a route entry in a switch routing table | ||
649 | * @rdev: RIO device | ||
650 | * @table: Routing table ID | ||
651 | * @route_destid: Destination ID to be routed | ||
652 | * @route_port: Pointer to read port number into | ||
653 | * @lock: lock switch device flag | ||
654 | * | ||
655 | * Calls the switch specific get_entry() method to read a route entry | ||
656 | * in a switch. The route table can be specified using the @table | ||
657 | * argument if a switch has per port routing tables or the normal | ||
658 | * use is to specific all tables (or the global table) by passing | ||
659 | * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL | ||
660 | * on failure. | ||
661 | */ | ||
662 | static int | ||
663 | rio_route_get_entry(struct rio_dev *rdev, u16 table, | ||
664 | u16 route_destid, u8 *route_port, int lock) | ||
665 | { | ||
666 | int rc; | ||
667 | |||
668 | if (lock) { | ||
669 | rc = rio_lock_device(rdev->net->hport, rdev->destid, | ||
670 | rdev->hopcount, 1000); | ||
671 | if (rc) | ||
672 | return rc; | ||
673 | } | ||
674 | |||
675 | rc = rdev->rswitch->get_entry(rdev->net->hport, rdev->destid, | ||
676 | rdev->hopcount, table, | ||
677 | route_destid, route_port); | ||
678 | if (lock) | ||
679 | rio_unlock_device(rdev->net->hport, rdev->destid, | ||
680 | rdev->hopcount); | ||
681 | |||
682 | return rc; | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device | 535 | * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device |
687 | * @port: Master port to send transaction | 536 | * @port: Master port to send transaction |
688 | * @hopcount: Number of hops to the device | 537 | * @hopcount: Number of hops to the device |
@@ -1094,12 +943,9 @@ static void rio_update_route_tables(struct rio_net *net) | |||
1094 | 943 | ||
1095 | sport = RIO_GET_PORT_NUM(swrdev->swpinfo); | 944 | sport = RIO_GET_PORT_NUM(swrdev->swpinfo); |
1096 | 945 | ||
1097 | if (rswitch->add_entry) { | 946 | rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE, |
1098 | rio_route_add_entry(swrdev, | 947 | destid, sport, 0); |
1099 | RIO_GLOBAL_TABLE, destid, | 948 | rswitch->route_table[destid] = sport; |
1100 | sport, 0); | ||
1101 | rswitch->route_table[destid] = sport; | ||
1102 | } | ||
1103 | } | 949 | } |
1104 | } | 950 | } |
1105 | } | 951 | } |
@@ -1115,8 +961,8 @@ static void rio_update_route_tables(struct rio_net *net) | |||
1115 | static void rio_init_em(struct rio_dev *rdev) | 961 | static void rio_init_em(struct rio_dev *rdev) |
1116 | { | 962 | { |
1117 | if (rio_is_switch(rdev) && (rdev->em_efptr) && | 963 | if (rio_is_switch(rdev) && (rdev->em_efptr) && |
1118 | (rdev->rswitch->em_init)) { | 964 | rdev->rswitch->ops && rdev->rswitch->ops->em_init) { |
1119 | rdev->rswitch->em_init(rdev); | 965 | rdev->rswitch->ops->em_init(rdev); |
1120 | } | 966 | } |
1121 | } | 967 | } |
1122 | 968 | ||
@@ -1141,7 +987,7 @@ static void rio_pw_enable(struct rio_mport *port, int enable) | |||
1141 | * link, then start recursive peer enumeration. Returns %0 if | 987 | * link, then start recursive peer enumeration. Returns %0 if |
1142 | * enumeration succeeds or %-EBUSY if enumeration fails. | 988 | * enumeration succeeds or %-EBUSY if enumeration fails. |
1143 | */ | 989 | */ |
1144 | int rio_enum_mport(struct rio_mport *mport, u32 flags) | 990 | static int rio_enum_mport(struct rio_mport *mport, u32 flags) |
1145 | { | 991 | { |
1146 | struct rio_net *net = NULL; | 992 | struct rio_net *net = NULL; |
1147 | int rc = 0; | 993 | int rc = 0; |
@@ -1256,7 +1102,7 @@ static void rio_build_route_tables(struct rio_net *net) | |||
1256 | * peer discovery. Returns %0 if discovery succeeds or %-EBUSY | 1102 | * peer discovery. Returns %0 if discovery succeeds or %-EBUSY |
1257 | * on failure. | 1103 | * on failure. |
1258 | */ | 1104 | */ |
1259 | int rio_disc_mport(struct rio_mport *mport, u32 flags) | 1105 | static int rio_disc_mport(struct rio_mport *mport, u32 flags) |
1260 | { | 1106 | { |
1261 | struct rio_net *net = NULL; | 1107 | struct rio_net *net = NULL; |
1262 | unsigned long to_end; | 1108 | unsigned long to_end; |
@@ -1315,6 +1161,7 @@ bail: | |||
1315 | } | 1161 | } |
1316 | 1162 | ||
1317 | static struct rio_scan rio_scan_ops = { | 1163 | static struct rio_scan rio_scan_ops = { |
1164 | .owner = THIS_MODULE, | ||
1318 | .enumerate = rio_enum_mport, | 1165 | .enumerate = rio_enum_mport, |
1319 | .discover = rio_disc_mport, | 1166 | .discover = rio_disc_mport, |
1320 | }; | 1167 | }; |
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 66d4acd5e18f..9331be646dc3 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c | |||
@@ -84,6 +84,15 @@ static ssize_t lnext_show(struct device *dev, | |||
84 | return str - buf; | 84 | return str - buf; |
85 | } | 85 | } |
86 | 86 | ||
87 | static ssize_t modalias_show(struct device *dev, | ||
88 | struct device_attribute *attr, char *buf) | ||
89 | { | ||
90 | struct rio_dev *rdev = to_rio_dev(dev); | ||
91 | |||
92 | return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n", | ||
93 | rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did); | ||
94 | } | ||
95 | |||
87 | struct device_attribute rio_dev_attrs[] = { | 96 | struct device_attribute rio_dev_attrs[] = { |
88 | __ATTR_RO(did), | 97 | __ATTR_RO(did), |
89 | __ATTR_RO(vid), | 98 | __ATTR_RO(vid), |
@@ -93,6 +102,7 @@ struct device_attribute rio_dev_attrs[] = { | |||
93 | __ATTR_RO(asm_rev), | 102 | __ATTR_RO(asm_rev), |
94 | __ATTR_RO(lprev), | 103 | __ATTR_RO(lprev), |
95 | __ATTR_RO(destid), | 104 | __ATTR_RO(destid), |
105 | __ATTR_RO(modalias), | ||
96 | __ATTR_NULL, | 106 | __ATTR_NULL, |
97 | }; | 107 | }; |
98 | 108 | ||
@@ -257,8 +267,6 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev) | |||
257 | err |= device_create_file(&rdev->dev, &dev_attr_routes); | 267 | err |= device_create_file(&rdev->dev, &dev_attr_routes); |
258 | err |= device_create_file(&rdev->dev, &dev_attr_lnext); | 268 | err |= device_create_file(&rdev->dev, &dev_attr_lnext); |
259 | err |= device_create_file(&rdev->dev, &dev_attr_hopcount); | 269 | err |= device_create_file(&rdev->dev, &dev_attr_hopcount); |
260 | if (!err && rdev->rswitch->sw_sysfs) | ||
261 | err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE); | ||
262 | } | 270 | } |
263 | 271 | ||
264 | if (err) | 272 | if (err) |
@@ -281,8 +289,6 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev) | |||
281 | device_remove_file(&rdev->dev, &dev_attr_routes); | 289 | device_remove_file(&rdev->dev, &dev_attr_routes); |
282 | device_remove_file(&rdev->dev, &dev_attr_lnext); | 290 | device_remove_file(&rdev->dev, &dev_attr_lnext); |
283 | device_remove_file(&rdev->dev, &dev_attr_hopcount); | 291 | device_remove_file(&rdev->dev, &dev_attr_hopcount); |
284 | if (rdev->rswitch->sw_sysfs) | ||
285 | rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); | ||
286 | } | 292 | } |
287 | } | 293 | } |
288 | 294 | ||
@@ -290,7 +296,6 @@ static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, | |||
290 | size_t count) | 296 | size_t count) |
291 | { | 297 | { |
292 | long val; | 298 | long val; |
293 | struct rio_mport *port = NULL; | ||
294 | int rc; | 299 | int rc; |
295 | 300 | ||
296 | if (kstrtol(buf, 0, &val) < 0) | 301 | if (kstrtol(buf, 0, &val) < 0) |
@@ -304,21 +309,7 @@ static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, | |||
304 | if (val < 0 || val >= RIO_MAX_MPORTS) | 309 | if (val < 0 || val >= RIO_MAX_MPORTS) |
305 | return -EINVAL; | 310 | return -EINVAL; |
306 | 311 | ||
307 | port = rio_find_mport((int)val); | 312 | rc = rio_mport_scan((int)val); |
308 | |||
309 | if (!port) { | ||
310 | pr_debug("RIO: %s: mport_%d not available\n", | ||
311 | __func__, (int)val); | ||
312 | return -EINVAL; | ||
313 | } | ||
314 | |||
315 | if (!port->nscan) | ||
316 | return -EINVAL; | ||
317 | |||
318 | if (port->host_deviceid >= 0) | ||
319 | rc = port->nscan->enumerate(port, 0); | ||
320 | else | ||
321 | rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); | ||
322 | exit: | 313 | exit: |
323 | if (!rc) | 314 | if (!rc) |
324 | rc = count; | 315 | rc = count; |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index cb1c08996fbb..f4f30af2df68 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
@@ -5,9 +5,8 @@ | |||
5 | * Copyright 2005 MontaVista Software, Inc. | 5 | * Copyright 2005 MontaVista Software, Inc. |
6 | * Matt Porter <mporter@kernel.crashing.org> | 6 | * Matt Porter <mporter@kernel.crashing.org> |
7 | * | 7 | * |
8 | * Copyright 2009 Integrated Device Technology, Inc. | 8 | * Copyright 2009 - 2013 Integrated Device Technology, Inc. |
9 | * Alex Bounine <alexandre.bounine@idt.com> | 9 | * Alex Bounine <alexandre.bounine@idt.com> |
10 | * - Added Port-Write/Error Management initialization and handling | ||
11 | * | 10 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License as published by the | 12 | * under the terms of the GNU General Public License as published by the |
@@ -31,10 +30,22 @@ | |||
31 | 30 | ||
32 | #include "rio.h" | 31 | #include "rio.h" |
33 | 32 | ||
33 | MODULE_DESCRIPTION("RapidIO Subsystem Core"); | ||
34 | MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>"); | ||
35 | MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | |||
38 | static int hdid[RIO_MAX_MPORTS]; | ||
39 | static int ids_num; | ||
40 | module_param_array(hdid, int, &ids_num, 0); | ||
41 | MODULE_PARM_DESC(hdid, | ||
42 | "Destination ID assignment to local RapidIO controllers"); | ||
43 | |||
34 | static LIST_HEAD(rio_devices); | 44 | static LIST_HEAD(rio_devices); |
35 | static DEFINE_SPINLOCK(rio_global_list_lock); | 45 | static DEFINE_SPINLOCK(rio_global_list_lock); |
36 | 46 | ||
37 | static LIST_HEAD(rio_mports); | 47 | static LIST_HEAD(rio_mports); |
48 | static LIST_HEAD(rio_scans); | ||
38 | static DEFINE_MUTEX(rio_mport_list_lock); | 49 | static DEFINE_MUTEX(rio_mport_list_lock); |
39 | static unsigned char next_portid; | 50 | static unsigned char next_portid; |
40 | static DEFINE_SPINLOCK(rio_mmap_lock); | 51 | static DEFINE_SPINLOCK(rio_mmap_lock); |
@@ -580,44 +591,6 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) | |||
580 | EXPORT_SYMBOL_GPL(rio_set_port_lockout); | 591 | EXPORT_SYMBOL_GPL(rio_set_port_lockout); |
581 | 592 | ||
582 | /** | 593 | /** |
583 | * rio_switch_init - Sets switch operations for a particular vendor switch | ||
584 | * @rdev: RIO device | ||
585 | * @do_enum: Enumeration/Discovery mode flag | ||
586 | * | ||
587 | * Searches the RIO switch ops table for known switch types. If the vid | ||
588 | * and did match a switch table entry, then call switch initialization | ||
589 | * routine to setup switch-specific routines. | ||
590 | */ | ||
591 | void rio_switch_init(struct rio_dev *rdev, int do_enum) | ||
592 | { | ||
593 | struct rio_switch_ops *cur = __start_rio_switch_ops; | ||
594 | struct rio_switch_ops *end = __end_rio_switch_ops; | ||
595 | |||
596 | while (cur < end) { | ||
597 | if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { | ||
598 | pr_debug("RIO: calling init routine for %s\n", | ||
599 | rio_name(rdev)); | ||
600 | cur->init_hook(rdev, do_enum); | ||
601 | break; | ||
602 | } | ||
603 | cur++; | ||
604 | } | ||
605 | |||
606 | if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { | ||
607 | pr_debug("RIO: adding STD routing ops for %s\n", | ||
608 | rio_name(rdev)); | ||
609 | rdev->rswitch->add_entry = rio_std_route_add_entry; | ||
610 | rdev->rswitch->get_entry = rio_std_route_get_entry; | ||
611 | rdev->rswitch->clr_table = rio_std_route_clr_table; | ||
612 | } | ||
613 | |||
614 | if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) | ||
615 | printk(KERN_ERR "RIO: missing routing ops for %s\n", | ||
616 | rio_name(rdev)); | ||
617 | } | ||
618 | EXPORT_SYMBOL_GPL(rio_switch_init); | ||
619 | |||
620 | /** | ||
621 | * rio_enable_rx_tx_port - enable input receiver and output transmitter of | 594 | * rio_enable_rx_tx_port - enable input receiver and output transmitter of |
622 | * given port | 595 | * given port |
623 | * @port: Master port associated with the RIO network | 596 | * @port: Master port associated with the RIO network |
@@ -970,8 +943,8 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) | |||
970 | /* | 943 | /* |
971 | * Process the port-write notification from switch | 944 | * Process the port-write notification from switch |
972 | */ | 945 | */ |
973 | if (rdev->rswitch->em_handle) | 946 | if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) |
974 | rdev->rswitch->em_handle(rdev, portnum); | 947 | rdev->rswitch->ops->em_handle(rdev, portnum); |
975 | 948 | ||
976 | rio_read_config_32(rdev, | 949 | rio_read_config_32(rdev, |
977 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), | 950 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), |
@@ -1207,8 +1180,9 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) | |||
1207 | * @route_destid: destID entry in the RT | 1180 | * @route_destid: destID entry in the RT |
1208 | * @route_port: destination port for specified destID | 1181 | * @route_port: destination port for specified destID |
1209 | */ | 1182 | */ |
1210 | int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | 1183 | static int |
1211 | u16 table, u16 route_destid, u8 route_port) | 1184 | rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, |
1185 | u16 table, u16 route_destid, u8 route_port) | ||
1212 | { | 1186 | { |
1213 | if (table == RIO_GLOBAL_TABLE) { | 1187 | if (table == RIO_GLOBAL_TABLE) { |
1214 | rio_mport_write_config_32(mport, destid, hopcount, | 1188 | rio_mport_write_config_32(mport, destid, hopcount, |
@@ -1234,8 +1208,9 @@ int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
1234 | * @route_destid: destID entry in the RT | 1208 | * @route_destid: destID entry in the RT |
1235 | * @route_port: returned destination port for specified destID | 1209 | * @route_port: returned destination port for specified destID |
1236 | */ | 1210 | */ |
1237 | int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | 1211 | static int |
1238 | u16 table, u16 route_destid, u8 *route_port) | 1212 | rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, |
1213 | u16 table, u16 route_destid, u8 *route_port) | ||
1239 | { | 1214 | { |
1240 | u32 result; | 1215 | u32 result; |
1241 | 1216 | ||
@@ -1259,8 +1234,9 @@ int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
1259 | * @hopcount: Number of switch hops to the device | 1234 | * @hopcount: Number of switch hops to the device |
1260 | * @table: routing table ID (global or port-specific) | 1235 | * @table: routing table ID (global or port-specific) |
1261 | */ | 1236 | */ |
1262 | int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, | 1237 | static int |
1263 | u16 table) | 1238 | rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, |
1239 | u16 table) | ||
1264 | { | 1240 | { |
1265 | u32 max_destid = 0xff; | 1241 | u32 max_destid = 0xff; |
1266 | u32 i, pef, id_inc = 1, ext_cfg = 0; | 1242 | u32 i, pef, id_inc = 1, ext_cfg = 0; |
@@ -1301,6 +1277,234 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
1301 | return 0; | 1277 | return 0; |
1302 | } | 1278 | } |
1303 | 1279 | ||
1280 | /** | ||
1281 | * rio_lock_device - Acquires host device lock for specified device | ||
1282 | * @port: Master port to send transaction | ||
1283 | * @destid: Destination ID for device/switch | ||
1284 | * @hopcount: Hopcount to reach switch | ||
1285 | * @wait_ms: Max wait time in msec (0 = no timeout) | ||
1286 | * | ||
1287 | * Attepts to acquire host device lock for specified device | ||
1288 | * Returns 0 if device lock acquired or EINVAL if timeout expires. | ||
1289 | */ | ||
1290 | int rio_lock_device(struct rio_mport *port, u16 destid, | ||
1291 | u8 hopcount, int wait_ms) | ||
1292 | { | ||
1293 | u32 result; | ||
1294 | int tcnt = 0; | ||
1295 | |||
1296 | /* Attempt to acquire device lock */ | ||
1297 | rio_mport_write_config_32(port, destid, hopcount, | ||
1298 | RIO_HOST_DID_LOCK_CSR, port->host_deviceid); | ||
1299 | rio_mport_read_config_32(port, destid, hopcount, | ||
1300 | RIO_HOST_DID_LOCK_CSR, &result); | ||
1301 | |||
1302 | while (result != port->host_deviceid) { | ||
1303 | if (wait_ms != 0 && tcnt == wait_ms) { | ||
1304 | pr_debug("RIO: timeout when locking device %x:%x\n", | ||
1305 | destid, hopcount); | ||
1306 | return -EINVAL; | ||
1307 | } | ||
1308 | |||
1309 | /* Delay a bit */ | ||
1310 | mdelay(1); | ||
1311 | tcnt++; | ||
1312 | /* Try to acquire device lock again */ | ||
1313 | rio_mport_write_config_32(port, destid, | ||
1314 | hopcount, | ||
1315 | RIO_HOST_DID_LOCK_CSR, | ||
1316 | port->host_deviceid); | ||
1317 | rio_mport_read_config_32(port, destid, | ||
1318 | hopcount, | ||
1319 | RIO_HOST_DID_LOCK_CSR, &result); | ||
1320 | } | ||
1321 | |||
1322 | return 0; | ||
1323 | } | ||
1324 | EXPORT_SYMBOL_GPL(rio_lock_device); | ||
1325 | |||
1326 | /** | ||
1327 | * rio_unlock_device - Releases host device lock for specified device | ||
1328 | * @port: Master port to send transaction | ||
1329 | * @destid: Destination ID for device/switch | ||
1330 | * @hopcount: Hopcount to reach switch | ||
1331 | * | ||
1332 | * Returns 0 if device lock released or EINVAL if fails. | ||
1333 | */ | ||
1334 | int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) | ||
1335 | { | ||
1336 | u32 result; | ||
1337 | |||
1338 | /* Release device lock */ | ||
1339 | rio_mport_write_config_32(port, destid, | ||
1340 | hopcount, | ||
1341 | RIO_HOST_DID_LOCK_CSR, | ||
1342 | port->host_deviceid); | ||
1343 | rio_mport_read_config_32(port, destid, hopcount, | ||
1344 | RIO_HOST_DID_LOCK_CSR, &result); | ||
1345 | if ((result & 0xffff) != 0xffff) { | ||
1346 | pr_debug("RIO: badness when releasing device lock %x:%x\n", | ||
1347 | destid, hopcount); | ||
1348 | return -EINVAL; | ||
1349 | } | ||
1350 | |||
1351 | return 0; | ||
1352 | } | ||
1353 | EXPORT_SYMBOL_GPL(rio_unlock_device); | ||
1354 | |||
1355 | /** | ||
1356 | * rio_route_add_entry- Add a route entry to a switch routing table | ||
1357 | * @rdev: RIO device | ||
1358 | * @table: Routing table ID | ||
1359 | * @route_destid: Destination ID to be routed | ||
1360 | * @route_port: Port number to be routed | ||
1361 | * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) | ||
1362 | * | ||
1363 | * If available calls the switch specific add_entry() method to add a route | ||
1364 | * entry into a switch routing table. Otherwise uses standard RT update method | ||
1365 | * as defined by RapidIO specification. A specific routing table can be selected | ||
1366 | * using the @table argument if a switch has per port routing tables or | ||
1367 | * the standard (or global) table may be used by passing | ||
1368 | * %RIO_GLOBAL_TABLE in @table. | ||
1369 | * | ||
1370 | * Returns %0 on success or %-EINVAL on failure. | ||
1371 | */ | ||
1372 | int rio_route_add_entry(struct rio_dev *rdev, | ||
1373 | u16 table, u16 route_destid, u8 route_port, int lock) | ||
1374 | { | ||
1375 | int rc = -EINVAL; | ||
1376 | struct rio_switch_ops *ops = rdev->rswitch->ops; | ||
1377 | |||
1378 | if (lock) { | ||
1379 | rc = rio_lock_device(rdev->net->hport, rdev->destid, | ||
1380 | rdev->hopcount, 1000); | ||
1381 | if (rc) | ||
1382 | return rc; | ||
1383 | } | ||
1384 | |||
1385 | spin_lock(&rdev->rswitch->lock); | ||
1386 | |||
1387 | if (ops == NULL || ops->add_entry == NULL) { | ||
1388 | rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, | ||
1389 | rdev->hopcount, table, | ||
1390 | route_destid, route_port); | ||
1391 | } else if (try_module_get(ops->owner)) { | ||
1392 | rc = ops->add_entry(rdev->net->hport, rdev->destid, | ||
1393 | rdev->hopcount, table, route_destid, | ||
1394 | route_port); | ||
1395 | module_put(ops->owner); | ||
1396 | } | ||
1397 | |||
1398 | spin_unlock(&rdev->rswitch->lock); | ||
1399 | |||
1400 | if (lock) | ||
1401 | rio_unlock_device(rdev->net->hport, rdev->destid, | ||
1402 | rdev->hopcount); | ||
1403 | |||
1404 | return rc; | ||
1405 | } | ||
1406 | EXPORT_SYMBOL_GPL(rio_route_add_entry); | ||
1407 | |||
1408 | /** | ||
1409 | * rio_route_get_entry- Read an entry from a switch routing table | ||
1410 | * @rdev: RIO device | ||
1411 | * @table: Routing table ID | ||
1412 | * @route_destid: Destination ID to be routed | ||
1413 | * @route_port: Pointer to read port number into | ||
1414 | * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) | ||
1415 | * | ||
1416 | * If available calls the switch specific get_entry() method to fetch a route | ||
1417 | * entry from a switch routing table. Otherwise uses standard RT read method | ||
1418 | * as defined by RapidIO specification. A specific routing table can be selected | ||
1419 | * using the @table argument if a switch has per port routing tables or | ||
1420 | * the standard (or global) table may be used by passing | ||
1421 | * %RIO_GLOBAL_TABLE in @table. | ||
1422 | * | ||
1423 | * Returns %0 on success or %-EINVAL on failure. | ||
1424 | */ | ||
1425 | int rio_route_get_entry(struct rio_dev *rdev, u16 table, | ||
1426 | u16 route_destid, u8 *route_port, int lock) | ||
1427 | { | ||
1428 | int rc = -EINVAL; | ||
1429 | struct rio_switch_ops *ops = rdev->rswitch->ops; | ||
1430 | |||
1431 | if (lock) { | ||
1432 | rc = rio_lock_device(rdev->net->hport, rdev->destid, | ||
1433 | rdev->hopcount, 1000); | ||
1434 | if (rc) | ||
1435 | return rc; | ||
1436 | } | ||
1437 | |||
1438 | spin_lock(&rdev->rswitch->lock); | ||
1439 | |||
1440 | if (ops == NULL || ops->get_entry == NULL) { | ||
1441 | rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, | ||
1442 | rdev->hopcount, table, | ||
1443 | route_destid, route_port); | ||
1444 | } else if (try_module_get(ops->owner)) { | ||
1445 | rc = ops->get_entry(rdev->net->hport, rdev->destid, | ||
1446 | rdev->hopcount, table, route_destid, | ||
1447 | route_port); | ||
1448 | module_put(ops->owner); | ||
1449 | } | ||
1450 | |||
1451 | spin_unlock(&rdev->rswitch->lock); | ||
1452 | |||
1453 | if (lock) | ||
1454 | rio_unlock_device(rdev->net->hport, rdev->destid, | ||
1455 | rdev->hopcount); | ||
1456 | return rc; | ||
1457 | } | ||
1458 | EXPORT_SYMBOL_GPL(rio_route_get_entry); | ||
1459 | |||
1460 | /** | ||
1461 | * rio_route_clr_table - Clear a switch routing table | ||
1462 | * @rdev: RIO device | ||
1463 | * @table: Routing table ID | ||
1464 | * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) | ||
1465 | * | ||
1466 | * If available calls the switch specific clr_table() method to clear a switch | ||
1467 | * routing table. Otherwise uses standard RT write method as defined by RapidIO | ||
1468 | * specification. A specific routing table can be selected using the @table | ||
1469 | * argument if a switch has per port routing tables or the standard (or global) | ||
1470 | * table may be used by passing %RIO_GLOBAL_TABLE in @table. | ||
1471 | * | ||
1472 | * Returns %0 on success or %-EINVAL on failure. | ||
1473 | */ | ||
1474 | int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) | ||
1475 | { | ||
1476 | int rc = -EINVAL; | ||
1477 | struct rio_switch_ops *ops = rdev->rswitch->ops; | ||
1478 | |||
1479 | if (lock) { | ||
1480 | rc = rio_lock_device(rdev->net->hport, rdev->destid, | ||
1481 | rdev->hopcount, 1000); | ||
1482 | if (rc) | ||
1483 | return rc; | ||
1484 | } | ||
1485 | |||
1486 | spin_lock(&rdev->rswitch->lock); | ||
1487 | |||
1488 | if (ops == NULL || ops->clr_table == NULL) { | ||
1489 | rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, | ||
1490 | rdev->hopcount, table); | ||
1491 | } else if (try_module_get(ops->owner)) { | ||
1492 | rc = ops->clr_table(rdev->net->hport, rdev->destid, | ||
1493 | rdev->hopcount, table); | ||
1494 | |||
1495 | module_put(ops->owner); | ||
1496 | } | ||
1497 | |||
1498 | spin_unlock(&rdev->rswitch->lock); | ||
1499 | |||
1500 | if (lock) | ||
1501 | rio_unlock_device(rdev->net->hport, rdev->destid, | ||
1502 | rdev->hopcount); | ||
1503 | |||
1504 | return rc; | ||
1505 | } | ||
1506 | EXPORT_SYMBOL_GPL(rio_route_clr_table); | ||
1507 | |||
1304 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | 1508 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
1305 | 1509 | ||
1306 | static bool rio_chan_filter(struct dma_chan *chan, void *arg) | 1510 | static bool rio_chan_filter(struct dma_chan *chan, void *arg) |
@@ -1410,34 +1614,73 @@ found: | |||
1410 | * rio_register_scan - enumeration/discovery method registration interface | 1614 | * rio_register_scan - enumeration/discovery method registration interface |
1411 | * @mport_id: mport device ID for which fabric scan routine has to be set | 1615 | * @mport_id: mport device ID for which fabric scan routine has to be set |
1412 | * (RIO_MPORT_ANY = set for all available mports) | 1616 | * (RIO_MPORT_ANY = set for all available mports) |
1413 | * @scan_ops: enumeration/discovery control structure | 1617 | * @scan_ops: enumeration/discovery operations structure |
1618 | * | ||
1619 | * Registers enumeration/discovery operations with RapidIO subsystem and | ||
1620 | * attaches it to the specified mport device (or all available mports | ||
1621 | * if RIO_MPORT_ANY is specified). | ||
1414 | * | 1622 | * |
1415 | * Assigns enumeration or discovery method to the specified mport device (or all | ||
1416 | * available mports if RIO_MPORT_ANY is specified). | ||
1417 | * Returns error if the mport already has an enumerator attached to it. | 1623 | * Returns error if the mport already has an enumerator attached to it. |
1418 | * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns | 1624 | * In case of RIO_MPORT_ANY skips mports with valid scan routines (no error). |
1419 | * an error if was unable to find at least one available mport. | ||
1420 | */ | 1625 | */ |
1421 | int rio_register_scan(int mport_id, struct rio_scan *scan_ops) | 1626 | int rio_register_scan(int mport_id, struct rio_scan *scan_ops) |
1422 | { | 1627 | { |
1423 | struct rio_mport *port; | 1628 | struct rio_mport *port; |
1424 | int rc = -EBUSY; | 1629 | struct rio_scan_node *scan; |
1630 | int rc = 0; | ||
1425 | 1631 | ||
1426 | mutex_lock(&rio_mport_list_lock); | 1632 | pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); |
1427 | list_for_each_entry(port, &rio_mports, node) { | ||
1428 | if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { | ||
1429 | if (port->nscan && mport_id == RIO_MPORT_ANY) | ||
1430 | continue; | ||
1431 | else if (port->nscan) | ||
1432 | break; | ||
1433 | 1633 | ||
1434 | port->nscan = scan_ops; | 1634 | if ((mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) || |
1435 | rc = 0; | 1635 | !scan_ops) |
1636 | return -EINVAL; | ||
1436 | 1637 | ||
1437 | if (mport_id != RIO_MPORT_ANY) | 1638 | mutex_lock(&rio_mport_list_lock); |
1438 | break; | 1639 | |
1640 | /* | ||
1641 | * Check if there is another enumerator already registered for | ||
1642 | * the same mport ID (including RIO_MPORT_ANY). Multiple enumerators | ||
1643 | * for the same mport ID are not supported. | ||
1644 | */ | ||
1645 | list_for_each_entry(scan, &rio_scans, node) { | ||
1646 | if (scan->mport_id == mport_id) { | ||
1647 | rc = -EBUSY; | ||
1648 | goto err_out; | ||
1439 | } | 1649 | } |
1440 | } | 1650 | } |
1651 | |||
1652 | /* | ||
1653 | * Allocate and initialize new scan registration node. | ||
1654 | */ | ||
1655 | scan = kzalloc(sizeof(*scan), GFP_KERNEL); | ||
1656 | if (!scan) { | ||
1657 | rc = -ENOMEM; | ||
1658 | goto err_out; | ||
1659 | } | ||
1660 | |||
1661 | scan->mport_id = mport_id; | ||
1662 | scan->ops = scan_ops; | ||
1663 | |||
1664 | /* | ||
1665 | * Traverse the list of registered mports to attach this new scan. | ||
1666 | * | ||
1667 | * The new scan with matching mport ID overrides any previously attached | ||
1668 | * scan assuming that old scan (if any) is the default one (based on the | ||
1669 | * enumerator registration check above). | ||
1670 | * If the new scan is the global one, it will be attached only to mports | ||
1671 | * that do not have their own individual operations already attached. | ||
1672 | */ | ||
1673 | list_for_each_entry(port, &rio_mports, node) { | ||
1674 | if (port->id == mport_id) { | ||
1675 | port->nscan = scan_ops; | ||
1676 | break; | ||
1677 | } else if (mport_id == RIO_MPORT_ANY && !port->nscan) | ||
1678 | port->nscan = scan_ops; | ||
1679 | } | ||
1680 | |||
1681 | list_add_tail(&scan->node, &rio_scans); | ||
1682 | |||
1683 | err_out: | ||
1441 | mutex_unlock(&rio_mport_list_lock); | 1684 | mutex_unlock(&rio_mport_list_lock); |
1442 | 1685 | ||
1443 | return rc; | 1686 | return rc; |
@@ -1447,30 +1690,81 @@ EXPORT_SYMBOL_GPL(rio_register_scan); | |||
1447 | /** | 1690 | /** |
1448 | * rio_unregister_scan - removes enumeration/discovery method from mport | 1691 | * rio_unregister_scan - removes enumeration/discovery method from mport |
1449 | * @mport_id: mport device ID for which fabric scan routine has to be | 1692 | * @mport_id: mport device ID for which fabric scan routine has to be |
1450 | * unregistered (RIO_MPORT_ANY = set for all available mports) | 1693 | * unregistered (RIO_MPORT_ANY = apply to all mports that use |
1694 | * the specified scan_ops) | ||
1695 | * @scan_ops: enumeration/discovery operations structure | ||
1451 | * | 1696 | * |
1452 | * Removes enumeration or discovery method assigned to the specified mport | 1697 | * Removes enumeration or discovery method assigned to the specified mport |
1453 | * device (or all available mports if RIO_MPORT_ANY is specified). | 1698 | * device. If RIO_MPORT_ANY is specified, removes the specified operations from |
1699 | * all mports that have them attached. | ||
1454 | */ | 1700 | */ |
1455 | int rio_unregister_scan(int mport_id) | 1701 | int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) |
1456 | { | 1702 | { |
1457 | struct rio_mport *port; | 1703 | struct rio_mport *port; |
1704 | struct rio_scan_node *scan; | ||
1705 | |||
1706 | pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); | ||
1707 | |||
1708 | if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) | ||
1709 | return -EINVAL; | ||
1458 | 1710 | ||
1459 | mutex_lock(&rio_mport_list_lock); | 1711 | mutex_lock(&rio_mport_list_lock); |
1460 | list_for_each_entry(port, &rio_mports, node) { | 1712 | |
1461 | if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { | 1713 | list_for_each_entry(port, &rio_mports, node) |
1462 | if (port->nscan) | 1714 | if (port->id == mport_id || |
1463 | port->nscan = NULL; | 1715 | (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) |
1464 | if (mport_id != RIO_MPORT_ANY) | 1716 | port->nscan = NULL; |
1465 | break; | 1717 | |
1718 | list_for_each_entry(scan, &rio_scans, node) | ||
1719 | if (scan->mport_id == mport_id) { | ||
1720 | list_del(&scan->node); | ||
1721 | kfree(scan); | ||
1466 | } | 1722 | } |
1467 | } | 1723 | |
1468 | mutex_unlock(&rio_mport_list_lock); | 1724 | mutex_unlock(&rio_mport_list_lock); |
1469 | 1725 | ||
1470 | return 0; | 1726 | return 0; |
1471 | } | 1727 | } |
1472 | EXPORT_SYMBOL_GPL(rio_unregister_scan); | 1728 | EXPORT_SYMBOL_GPL(rio_unregister_scan); |
1473 | 1729 | ||
1730 | /** | ||
1731 | * rio_mport_scan - execute enumeration/discovery on the specified mport | ||
1732 | * @mport_id: number (ID) of mport device | ||
1733 | */ | ||
1734 | int rio_mport_scan(int mport_id) | ||
1735 | { | ||
1736 | struct rio_mport *port = NULL; | ||
1737 | int rc; | ||
1738 | |||
1739 | mutex_lock(&rio_mport_list_lock); | ||
1740 | list_for_each_entry(port, &rio_mports, node) { | ||
1741 | if (port->id == mport_id) | ||
1742 | goto found; | ||
1743 | } | ||
1744 | mutex_unlock(&rio_mport_list_lock); | ||
1745 | return -ENODEV; | ||
1746 | found: | ||
1747 | if (!port->nscan) { | ||
1748 | mutex_unlock(&rio_mport_list_lock); | ||
1749 | return -EINVAL; | ||
1750 | } | ||
1751 | |||
1752 | if (!try_module_get(port->nscan->owner)) { | ||
1753 | mutex_unlock(&rio_mport_list_lock); | ||
1754 | return -ENODEV; | ||
1755 | } | ||
1756 | |||
1757 | mutex_unlock(&rio_mport_list_lock); | ||
1758 | |||
1759 | if (port->host_deviceid >= 0) | ||
1760 | rc = port->nscan->enumerate(port, 0); | ||
1761 | else | ||
1762 | rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); | ||
1763 | |||
1764 | module_put(port->nscan->owner); | ||
1765 | return rc; | ||
1766 | } | ||
1767 | |||
1474 | static void rio_fixup_device(struct rio_dev *dev) | 1768 | static void rio_fixup_device(struct rio_dev *dev) |
1475 | { | 1769 | { |
1476 | } | 1770 | } |
@@ -1499,7 +1793,10 @@ static void disc_work_handler(struct work_struct *_work) | |||
1499 | work = container_of(_work, struct rio_disc_work, work); | 1793 | work = container_of(_work, struct rio_disc_work, work); |
1500 | pr_debug("RIO: discovery work for mport %d %s\n", | 1794 | pr_debug("RIO: discovery work for mport %d %s\n", |
1501 | work->mport->id, work->mport->name); | 1795 | work->mport->id, work->mport->name); |
1502 | work->mport->nscan->discover(work->mport, 0); | 1796 | if (try_module_get(work->mport->nscan->owner)) { |
1797 | work->mport->nscan->discover(work->mport, 0); | ||
1798 | module_put(work->mport->nscan->owner); | ||
1799 | } | ||
1503 | } | 1800 | } |
1504 | 1801 | ||
1505 | int rio_init_mports(void) | 1802 | int rio_init_mports(void) |
@@ -1518,8 +1815,10 @@ int rio_init_mports(void) | |||
1518 | mutex_lock(&rio_mport_list_lock); | 1815 | mutex_lock(&rio_mport_list_lock); |
1519 | list_for_each_entry(port, &rio_mports, node) { | 1816 | list_for_each_entry(port, &rio_mports, node) { |
1520 | if (port->host_deviceid >= 0) { | 1817 | if (port->host_deviceid >= 0) { |
1521 | if (port->nscan) | 1818 | if (port->nscan && try_module_get(port->nscan->owner)) { |
1522 | port->nscan->enumerate(port, 0); | 1819 | port->nscan->enumerate(port, 0); |
1820 | module_put(port->nscan->owner); | ||
1821 | } | ||
1523 | } else | 1822 | } else |
1524 | n++; | 1823 | n++; |
1525 | } | 1824 | } |
@@ -1533,7 +1832,7 @@ int rio_init_mports(void) | |||
1533 | * for each of them. If the code below fails to allocate needed | 1832 | * for each of them. If the code below fails to allocate needed |
1534 | * resources, exit without error to keep results of enumeration | 1833 | * resources, exit without error to keep results of enumeration |
1535 | * process (if any). | 1834 | * process (if any). |
1536 | * TODO: Implement restart of dicovery process for all or | 1835 | * TODO: Implement restart of discovery process for all or |
1537 | * individual discovering mports. | 1836 | * individual discovering mports. |
1538 | */ | 1837 | */ |
1539 | rio_wq = alloc_workqueue("riodisc", 0, 0); | 1838 | rio_wq = alloc_workqueue("riodisc", 0, 0); |
@@ -1559,9 +1858,9 @@ int rio_init_mports(void) | |||
1559 | n++; | 1858 | n++; |
1560 | } | 1859 | } |
1561 | } | 1860 | } |
1562 | mutex_unlock(&rio_mport_list_lock); | ||
1563 | 1861 | ||
1564 | flush_workqueue(rio_wq); | 1862 | flush_workqueue(rio_wq); |
1863 | mutex_unlock(&rio_mport_list_lock); | ||
1565 | pr_debug("RIO: destroy discovery workqueue\n"); | 1864 | pr_debug("RIO: destroy discovery workqueue\n"); |
1566 | destroy_workqueue(rio_wq); | 1865 | destroy_workqueue(rio_wq); |
1567 | kfree(work); | 1866 | kfree(work); |
@@ -1572,26 +1871,18 @@ no_disc: | |||
1572 | return 0; | 1871 | return 0; |
1573 | } | 1872 | } |
1574 | 1873 | ||
1575 | static int hdids[RIO_MAX_MPORTS + 1]; | ||
1576 | |||
1577 | static int rio_get_hdid(int index) | 1874 | static int rio_get_hdid(int index) |
1578 | { | 1875 | { |
1579 | if (!hdids[0] || hdids[0] <= index || index >= RIO_MAX_MPORTS) | 1876 | if (ids_num == 0 || ids_num <= index || index >= RIO_MAX_MPORTS) |
1580 | return -1; | 1877 | return -1; |
1581 | 1878 | ||
1582 | return hdids[index + 1]; | 1879 | return hdid[index]; |
1583 | } | ||
1584 | |||
1585 | static int rio_hdid_setup(char *str) | ||
1586 | { | ||
1587 | (void)get_options(str, ARRAY_SIZE(hdids), hdids); | ||
1588 | return 1; | ||
1589 | } | 1880 | } |
1590 | 1881 | ||
1591 | __setup("riohdid=", rio_hdid_setup); | ||
1592 | |||
1593 | int rio_register_mport(struct rio_mport *port) | 1882 | int rio_register_mport(struct rio_mport *port) |
1594 | { | 1883 | { |
1884 | struct rio_scan_node *scan = NULL; | ||
1885 | |||
1595 | if (next_portid >= RIO_MAX_MPORTS) { | 1886 | if (next_portid >= RIO_MAX_MPORTS) { |
1596 | pr_err("RIO: reached specified max number of mports\n"); | 1887 | pr_err("RIO: reached specified max number of mports\n"); |
1597 | return 1; | 1888 | return 1; |
@@ -1600,11 +1891,28 @@ int rio_register_mport(struct rio_mport *port) | |||
1600 | port->id = next_portid++; | 1891 | port->id = next_portid++; |
1601 | port->host_deviceid = rio_get_hdid(port->id); | 1892 | port->host_deviceid = rio_get_hdid(port->id); |
1602 | port->nscan = NULL; | 1893 | port->nscan = NULL; |
1894 | |||
1603 | mutex_lock(&rio_mport_list_lock); | 1895 | mutex_lock(&rio_mport_list_lock); |
1604 | list_add_tail(&port->node, &rio_mports); | 1896 | list_add_tail(&port->node, &rio_mports); |
1897 | |||
1898 | /* | ||
1899 | * Check if there are any registered enumeration/discovery operations | ||
1900 | * that have to be attached to the added mport. | ||
1901 | */ | ||
1902 | list_for_each_entry(scan, &rio_scans, node) { | ||
1903 | if (port->id == scan->mport_id || | ||
1904 | scan->mport_id == RIO_MPORT_ANY) { | ||
1905 | port->nscan = scan->ops; | ||
1906 | if (port->id == scan->mport_id) | ||
1907 | break; | ||
1908 | } | ||
1909 | } | ||
1605 | mutex_unlock(&rio_mport_list_lock); | 1910 | mutex_unlock(&rio_mport_list_lock); |
1911 | |||
1912 | pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); | ||
1606 | return 0; | 1913 | return 0; |
1607 | } | 1914 | } |
1915 | EXPORT_SYMBOL_GPL(rio_register_mport); | ||
1608 | 1916 | ||
1609 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); | 1917 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); |
1610 | EXPORT_SYMBOL_GPL(rio_get_device); | 1918 | EXPORT_SYMBOL_GPL(rio_get_device); |
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index c14f864dea5c..085215cd8502 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h | |||
@@ -28,52 +28,28 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, | |||
28 | extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, | 28 | extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, |
29 | u8 hopcount); | 29 | u8 hopcount); |
30 | extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); | 30 | extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); |
31 | extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, | 31 | extern int rio_lock_device(struct rio_mport *port, u16 destid, |
32 | u8 hopcount, u16 table, u16 route_destid, | 32 | u8 hopcount, int wait_ms); |
33 | u8 route_port); | 33 | extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount); |
34 | extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, | 34 | extern int rio_route_add_entry(struct rio_dev *rdev, |
35 | u8 hopcount, u16 table, u16 route_destid, | 35 | u16 table, u16 route_destid, u8 route_port, int lock); |
36 | u8 *route_port); | 36 | extern int rio_route_get_entry(struct rio_dev *rdev, u16 table, |
37 | extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, | 37 | u16 route_destid, u8 *route_port, int lock); |
38 | u8 hopcount, u16 table); | 38 | extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock); |
39 | extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); | 39 | extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); |
40 | extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); | 40 | extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); |
41 | extern int rio_add_device(struct rio_dev *rdev); | 41 | extern int rio_add_device(struct rio_dev *rdev); |
42 | extern void rio_switch_init(struct rio_dev *rdev, int do_enum); | ||
43 | extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, | 42 | extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, |
44 | u8 hopcount, u8 port_num); | 43 | u8 hopcount, u8 port_num); |
45 | extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); | 44 | extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); |
46 | extern int rio_unregister_scan(int mport_id); | 45 | extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops); |
47 | extern void rio_attach_device(struct rio_dev *rdev); | 46 | extern void rio_attach_device(struct rio_dev *rdev); |
48 | extern struct rio_mport *rio_find_mport(int mport_id); | 47 | extern struct rio_mport *rio_find_mport(int mport_id); |
48 | extern int rio_mport_scan(int mport_id); | ||
49 | 49 | ||
50 | /* Structures internal to the RIO core code */ | 50 | /* Structures internal to the RIO core code */ |
51 | extern struct device_attribute rio_dev_attrs[]; | 51 | extern struct device_attribute rio_dev_attrs[]; |
52 | extern struct bus_attribute rio_bus_attrs[]; | 52 | extern struct bus_attribute rio_bus_attrs[]; |
53 | 53 | ||
54 | extern struct rio_switch_ops __start_rio_switch_ops[]; | ||
55 | extern struct rio_switch_ops __end_rio_switch_ops[]; | ||
56 | |||
57 | /* Helpers internal to the RIO core code */ | ||
58 | #define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \ | ||
59 | static const struct rio_switch_ops __rio_switch_##name __used \ | ||
60 | __section(section) = { vid, did, init_hook }; | ||
61 | |||
62 | /** | ||
63 | * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine | ||
64 | * @vid: RIO vendor ID | ||
65 | * @did: RIO device ID | ||
66 | * @init_hook: Callback that performs switch-specific initialization | ||
67 | * | ||
68 | * Manipulating switch route tables and error management in RIO | ||
69 | * is switch specific. This registers a switch by vendor and device ID with | ||
70 | * initialization callback for setting up switch operations and (if required) | ||
71 | * hardware initialization. A &struct rio_switch_ops is initialized with | ||
72 | * pointer to the init routine and placed into a RIO-specific kernel section. | ||
73 | */ | ||
74 | #define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \ | ||
75 | DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \ | ||
76 | vid, did, init_hook) | ||
77 | |||
78 | #define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) | 54 | #define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) |
79 | #define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) | 55 | #define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) |
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig index f47fee5d4563..345841562f95 100644 --- a/drivers/rapidio/switches/Kconfig +++ b/drivers/rapidio/switches/Kconfig | |||
@@ -2,34 +2,23 @@ | |||
2 | # RapidIO switches configuration | 2 | # RapidIO switches configuration |
3 | # | 3 | # |
4 | config RAPIDIO_TSI57X | 4 | config RAPIDIO_TSI57X |
5 | bool "IDT Tsi57x SRIO switches support" | 5 | tristate "IDT Tsi57x SRIO switches support" |
6 | depends on RAPIDIO | ||
7 | ---help--- | 6 | ---help--- |
8 | Includes support for IDT Tsi57x family of serial RapidIO switches. | 7 | Includes support for IDT Tsi57x family of serial RapidIO switches. |
9 | 8 | ||
10 | config RAPIDIO_CPS_XX | 9 | config RAPIDIO_CPS_XX |
11 | bool "IDT CPS-xx SRIO switches support" | 10 | tristate "IDT CPS-xx SRIO switches support" |
12 | depends on RAPIDIO | ||
13 | ---help--- | 11 | ---help--- |
14 | Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. | 12 | Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. |
15 | 13 | ||
16 | config RAPIDIO_TSI568 | 14 | config RAPIDIO_TSI568 |
17 | bool "Tsi568 SRIO switch support" | 15 | tristate "Tsi568 SRIO switch support" |
18 | depends on RAPIDIO | ||
19 | default n | 16 | default n |
20 | ---help--- | 17 | ---help--- |
21 | Includes support for IDT Tsi568 serial RapidIO switch. | 18 | Includes support for IDT Tsi568 serial RapidIO switch. |
22 | 19 | ||
23 | config RAPIDIO_CPS_GEN2 | 20 | config RAPIDIO_CPS_GEN2 |
24 | bool "IDT CPS Gen.2 SRIO switch support" | 21 | tristate "IDT CPS Gen.2 SRIO switch support" |
25 | depends on RAPIDIO | ||
26 | default n | 22 | default n |
27 | ---help--- | 23 | ---help--- |
28 | Includes support for ITD CPS Gen.2 serial RapidIO switches. | 24 | Includes support for ITD CPS Gen.2 serial RapidIO switches. |
29 | |||
30 | config RAPIDIO_TSI500 | ||
31 | bool "Tsi500 Parallel RapidIO switch support" | ||
32 | depends on RAPIDIO | ||
33 | default n | ||
34 | ---help--- | ||
35 | Includes support for IDT Tsi500 parallel RapidIO switch. | ||
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index c4d3acc3c715..051cc6b38188 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile | |||
@@ -5,5 +5,4 @@ | |||
5 | obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o | 5 | obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o |
6 | obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o | 6 | obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o |
7 | obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o | 7 | obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o |
8 | obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o | ||
9 | obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o | 8 | obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o |
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 809b7a3336ba..00a71ebb5cac 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/stat.h> | 13 | #include <linux/stat.h> |
14 | #include <linux/module.h> | ||
14 | #include <linux/rio.h> | 15 | #include <linux/rio.h> |
15 | #include <linux/rio_drv.h> | 16 | #include <linux/rio_drv.h> |
16 | #include <linux/rio_ids.h> | 17 | #include <linux/rio_ids.h> |
@@ -387,12 +388,12 @@ idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) | |||
387 | 388 | ||
388 | static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); | 389 | static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); |
389 | 390 | ||
390 | static int idtg2_sysfs(struct rio_dev *rdev, int create) | 391 | static int idtg2_sysfs(struct rio_dev *rdev, bool create) |
391 | { | 392 | { |
392 | struct device *dev = &rdev->dev; | 393 | struct device *dev = &rdev->dev; |
393 | int err = 0; | 394 | int err = 0; |
394 | 395 | ||
395 | if (create == RIO_SW_SYSFS_CREATE) { | 396 | if (create) { |
396 | /* Initialize sysfs entries */ | 397 | /* Initialize sysfs entries */ |
397 | err = device_create_file(dev, &dev_attr_errlog); | 398 | err = device_create_file(dev, &dev_attr_errlog); |
398 | if (err) | 399 | if (err) |
@@ -403,29 +404,90 @@ static int idtg2_sysfs(struct rio_dev *rdev, int create) | |||
403 | return err; | 404 | return err; |
404 | } | 405 | } |
405 | 406 | ||
406 | static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) | 407 | static struct rio_switch_ops idtg2_switch_ops = { |
408 | .owner = THIS_MODULE, | ||
409 | .add_entry = idtg2_route_add_entry, | ||
410 | .get_entry = idtg2_route_get_entry, | ||
411 | .clr_table = idtg2_route_clr_table, | ||
412 | .set_domain = idtg2_set_domain, | ||
413 | .get_domain = idtg2_get_domain, | ||
414 | .em_init = idtg2_em_init, | ||
415 | .em_handle = idtg2_em_handler, | ||
416 | }; | ||
417 | |||
418 | static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id) | ||
407 | { | 419 | { |
408 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | 420 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
409 | rdev->rswitch->add_entry = idtg2_route_add_entry; | 421 | |
410 | rdev->rswitch->get_entry = idtg2_route_get_entry; | 422 | spin_lock(&rdev->rswitch->lock); |
411 | rdev->rswitch->clr_table = idtg2_route_clr_table; | 423 | |
412 | rdev->rswitch->set_domain = idtg2_set_domain; | 424 | if (rdev->rswitch->ops) { |
413 | rdev->rswitch->get_domain = idtg2_get_domain; | 425 | spin_unlock(&rdev->rswitch->lock); |
414 | rdev->rswitch->em_init = idtg2_em_init; | 426 | return -EINVAL; |
415 | rdev->rswitch->em_handle = idtg2_em_handler; | 427 | } |
416 | rdev->rswitch->sw_sysfs = idtg2_sysfs; | 428 | |
417 | 429 | rdev->rswitch->ops = &idtg2_switch_ops; | |
418 | if (do_enum) { | 430 | |
431 | if (rdev->do_enum) { | ||
419 | /* Ensure that default routing is disabled on startup */ | 432 | /* Ensure that default routing is disabled on startup */ |
420 | rio_write_config_32(rdev, | 433 | rio_write_config_32(rdev, |
421 | RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); | 434 | RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); |
422 | } | 435 | } |
423 | 436 | ||
437 | /* Create device-specific sysfs attributes */ | ||
438 | idtg2_sysfs(rdev, true); | ||
439 | |||
440 | spin_unlock(&rdev->rswitch->lock); | ||
424 | return 0; | 441 | return 0; |
425 | } | 442 | } |
426 | 443 | ||
427 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); | 444 | static void idtg2_remove(struct rio_dev *rdev) |
428 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); | 445 | { |
429 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); | 446 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
430 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); | 447 | spin_lock(&rdev->rswitch->lock); |
431 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init); | 448 | if (rdev->rswitch->ops != &idtg2_switch_ops) { |
449 | spin_unlock(&rdev->rswitch->lock); | ||
450 | return; | ||
451 | } | ||
452 | rdev->rswitch->ops = NULL; | ||
453 | |||
454 | /* Remove device-specific sysfs attributes */ | ||
455 | idtg2_sysfs(rdev, false); | ||
456 | |||
457 | spin_unlock(&rdev->rswitch->lock); | ||
458 | } | ||
459 | |||
460 | static struct rio_device_id idtg2_id_table[] = { | ||
461 | {RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)}, | ||
462 | {RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)}, | ||
463 | {RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)}, | ||
464 | {RIO_DEVICE(RIO_DID_IDTSPS1616, RIO_VID_IDT)}, | ||
465 | {RIO_DEVICE(RIO_DID_IDTCPS1432, RIO_VID_IDT)}, | ||
466 | { 0, } /* terminate list */ | ||
467 | }; | ||
468 | |||
469 | static struct rio_driver idtg2_driver = { | ||
470 | .name = "idt_gen2", | ||
471 | .id_table = idtg2_id_table, | ||
472 | .probe = idtg2_probe, | ||
473 | .remove = idtg2_remove, | ||
474 | }; | ||
475 | |||
476 | static int __init idtg2_init(void) | ||
477 | { | ||
478 | return rio_register_driver(&idtg2_driver); | ||
479 | } | ||
480 | |||
481 | static void __exit idtg2_exit(void) | ||
482 | { | ||
483 | pr_debug("RIO: %s\n", __func__); | ||
484 | rio_unregister_driver(&idtg2_driver); | ||
485 | pr_debug("RIO: %s done\n", __func__); | ||
486 | } | ||
487 | |||
488 | device_initcall(idtg2_init); | ||
489 | module_exit(idtg2_exit); | ||
490 | |||
491 | MODULE_DESCRIPTION("IDT CPS Gen.2 Serial RapidIO switch family driver"); | ||
492 | MODULE_AUTHOR("Integrated Device Technology, Inc."); | ||
493 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index d06ee2d44b44..7fbb60d31796 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/rio.h> | 13 | #include <linux/rio.h> |
14 | #include <linux/rio_drv.h> | 14 | #include <linux/rio_drv.h> |
15 | #include <linux/rio_ids.h> | 15 | #include <linux/rio_ids.h> |
16 | #include <linux/module.h> | ||
16 | #include "../rio.h" | 17 | #include "../rio.h" |
17 | 18 | ||
18 | #define CPS_DEFAULT_ROUTE 0xde | 19 | #define CPS_DEFAULT_ROUTE 0xde |
@@ -118,18 +119,31 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
118 | return 0; | 119 | return 0; |
119 | } | 120 | } |
120 | 121 | ||
121 | static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) | 122 | static struct rio_switch_ops idtcps_switch_ops = { |
123 | .owner = THIS_MODULE, | ||
124 | .add_entry = idtcps_route_add_entry, | ||
125 | .get_entry = idtcps_route_get_entry, | ||
126 | .clr_table = idtcps_route_clr_table, | ||
127 | .set_domain = idtcps_set_domain, | ||
128 | .get_domain = idtcps_get_domain, | ||
129 | .em_init = NULL, | ||
130 | .em_handle = NULL, | ||
131 | }; | ||
132 | |||
133 | static int idtcps_probe(struct rio_dev *rdev, const struct rio_device_id *id) | ||
122 | { | 134 | { |
123 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | 135 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
124 | rdev->rswitch->add_entry = idtcps_route_add_entry; | 136 | |
125 | rdev->rswitch->get_entry = idtcps_route_get_entry; | 137 | spin_lock(&rdev->rswitch->lock); |
126 | rdev->rswitch->clr_table = idtcps_route_clr_table; | 138 | |
127 | rdev->rswitch->set_domain = idtcps_set_domain; | 139 | if (rdev->rswitch->ops) { |
128 | rdev->rswitch->get_domain = idtcps_get_domain; | 140 | spin_unlock(&rdev->rswitch->lock); |
129 | rdev->rswitch->em_init = NULL; | 141 | return -EINVAL; |
130 | rdev->rswitch->em_handle = NULL; | 142 | } |
131 | 143 | ||
132 | if (do_enum) { | 144 | rdev->rswitch->ops = &idtcps_switch_ops; |
145 | |||
146 | if (rdev->do_enum) { | ||
133 | /* set TVAL = ~50us */ | 147 | /* set TVAL = ~50us */ |
134 | rio_write_config_32(rdev, | 148 | rio_write_config_32(rdev, |
135 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); | 149 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); |
@@ -138,12 +152,52 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) | |||
138 | RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); | 152 | RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); |
139 | } | 153 | } |
140 | 154 | ||
155 | spin_unlock(&rdev->rswitch->lock); | ||
141 | return 0; | 156 | return 0; |
142 | } | 157 | } |
143 | 158 | ||
144 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init); | 159 | static void idtcps_remove(struct rio_dev *rdev) |
145 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init); | 160 | { |
146 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init); | 161 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
147 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init); | 162 | spin_lock(&rdev->rswitch->lock); |
148 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init); | 163 | if (rdev->rswitch->ops != &idtcps_switch_ops) { |
149 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init); | 164 | spin_unlock(&rdev->rswitch->lock); |
165 | return; | ||
166 | } | ||
167 | rdev->rswitch->ops = NULL; | ||
168 | spin_unlock(&rdev->rswitch->lock); | ||
169 | } | ||
170 | |||
171 | static struct rio_device_id idtcps_id_table[] = { | ||
172 | {RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)}, | ||
173 | {RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)}, | ||
174 | {RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)}, | ||
175 | {RIO_DEVICE(RIO_DID_IDTCPS12, RIO_VID_IDT)}, | ||
176 | {RIO_DEVICE(RIO_DID_IDTCPS16, RIO_VID_IDT)}, | ||
177 | {RIO_DEVICE(RIO_DID_IDT70K200, RIO_VID_IDT)}, | ||
178 | { 0, } /* terminate list */ | ||
179 | }; | ||
180 | |||
181 | static struct rio_driver idtcps_driver = { | ||
182 | .name = "idtcps", | ||
183 | .id_table = idtcps_id_table, | ||
184 | .probe = idtcps_probe, | ||
185 | .remove = idtcps_remove, | ||
186 | }; | ||
187 | |||
188 | static int __init idtcps_init(void) | ||
189 | { | ||
190 | return rio_register_driver(&idtcps_driver); | ||
191 | } | ||
192 | |||
193 | static void __exit idtcps_exit(void) | ||
194 | { | ||
195 | rio_unregister_driver(&idtcps_driver); | ||
196 | } | ||
197 | |||
198 | device_initcall(idtcps_init); | ||
199 | module_exit(idtcps_exit); | ||
200 | |||
201 | MODULE_DESCRIPTION("IDT CPS Gen.1 Serial RapidIO switch family driver"); | ||
202 | MODULE_AUTHOR("Integrated Device Technology, Inc."); | ||
203 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c deleted file mode 100644 index 914eddd5aa42..000000000000 --- a/drivers/rapidio/switches/tsi500.c +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | /* | ||
2 | * RapidIO Tsi500 switch support | ||
3 | * | ||
4 | * Copyright 2009-2010 Integrated Device Technology, Inc. | ||
5 | * Alexandre Bounine <alexandre.bounine@idt.com> | ||
6 | * - Modified switch operations initialization. | ||
7 | * | ||
8 | * Copyright 2005 MontaVista Software, Inc. | ||
9 | * Matt Porter <mporter@kernel.crashing.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/rio.h> | ||
18 | #include <linux/rio_drv.h> | ||
19 | #include <linux/rio_ids.h> | ||
20 | #include "../rio.h" | ||
21 | |||
22 | static int | ||
23 | tsi500_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) | ||
24 | { | ||
25 | int i; | ||
26 | u32 offset = 0x10000 + 0xa00 + ((route_destid / 2)&~0x3); | ||
27 | u32 result; | ||
28 | |||
29 | if (table == 0xff) { | ||
30 | rio_mport_read_config_32(mport, destid, hopcount, offset, &result); | ||
31 | result &= ~(0xf << (4*(route_destid & 0x7))); | ||
32 | for (i=0;i<4;i++) | ||
33 | rio_mport_write_config_32(mport, destid, hopcount, offset + (0x20000*i), result | (route_port << (4*(route_destid & 0x7)))); | ||
34 | } | ||
35 | else { | ||
36 | rio_mport_read_config_32(mport, destid, hopcount, offset + (0x20000*table), &result); | ||
37 | result &= ~(0xf << (4*(route_destid & 0x7))); | ||
38 | rio_mport_write_config_32(mport, destid, hopcount, offset + (0x20000*table), result | (route_port << (4*(route_destid & 0x7)))); | ||
39 | } | ||
40 | |||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | static int | ||
45 | tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) | ||
46 | { | ||
47 | int ret = 0; | ||
48 | u32 offset = 0x10000 + 0xa00 + ((route_destid / 2)&~0x3); | ||
49 | u32 result; | ||
50 | |||
51 | if (table == 0xff) | ||
52 | rio_mport_read_config_32(mport, destid, hopcount, offset, &result); | ||
53 | else | ||
54 | rio_mport_read_config_32(mport, destid, hopcount, offset + (0x20000*table), &result); | ||
55 | |||
56 | result &= 0xf << (4*(route_destid & 0x7)); | ||
57 | *route_port = result >> (4*(route_destid & 0x7)); | ||
58 | if (*route_port > 3) | ||
59 | ret = -1; | ||
60 | |||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | static int tsi500_switch_init(struct rio_dev *rdev, int do_enum) | ||
65 | { | ||
66 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | ||
67 | rdev->rswitch->add_entry = tsi500_route_add_entry; | ||
68 | rdev->rswitch->get_entry = tsi500_route_get_entry; | ||
69 | rdev->rswitch->clr_table = NULL; | ||
70 | rdev->rswitch->set_domain = NULL; | ||
71 | rdev->rswitch->get_domain = NULL; | ||
72 | rdev->rswitch->em_init = NULL; | ||
73 | rdev->rswitch->em_handle = NULL; | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init); | ||
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c index 3994c00aa01f..8a43561b9d17 100644 --- a/drivers/rapidio/switches/tsi568.c +++ b/drivers/rapidio/switches/tsi568.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/rio_drv.h> | 19 | #include <linux/rio_drv.h> |
20 | #include <linux/rio_ids.h> | 20 | #include <linux/rio_ids.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/module.h> | ||
22 | #include "../rio.h" | 23 | #include "../rio.h" |
23 | 24 | ||
24 | /* Global (broadcast) route registers */ | 25 | /* Global (broadcast) route registers */ |
@@ -129,18 +130,70 @@ tsi568_em_init(struct rio_dev *rdev) | |||
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
131 | 132 | ||
132 | static int tsi568_switch_init(struct rio_dev *rdev, int do_enum) | 133 | static struct rio_switch_ops tsi568_switch_ops = { |
134 | .owner = THIS_MODULE, | ||
135 | .add_entry = tsi568_route_add_entry, | ||
136 | .get_entry = tsi568_route_get_entry, | ||
137 | .clr_table = tsi568_route_clr_table, | ||
138 | .set_domain = NULL, | ||
139 | .get_domain = NULL, | ||
140 | .em_init = tsi568_em_init, | ||
141 | .em_handle = NULL, | ||
142 | }; | ||
143 | |||
144 | static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id) | ||
133 | { | 145 | { |
134 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | 146 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
135 | rdev->rswitch->add_entry = tsi568_route_add_entry; | ||
136 | rdev->rswitch->get_entry = tsi568_route_get_entry; | ||
137 | rdev->rswitch->clr_table = tsi568_route_clr_table; | ||
138 | rdev->rswitch->set_domain = NULL; | ||
139 | rdev->rswitch->get_domain = NULL; | ||
140 | rdev->rswitch->em_init = tsi568_em_init; | ||
141 | rdev->rswitch->em_handle = NULL; | ||
142 | 147 | ||
148 | spin_lock(&rdev->rswitch->lock); | ||
149 | |||
150 | if (rdev->rswitch->ops) { | ||
151 | spin_unlock(&rdev->rswitch->lock); | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | rdev->rswitch->ops = &tsi568_switch_ops; | ||
156 | spin_unlock(&rdev->rswitch->lock); | ||
143 | return 0; | 157 | return 0; |
144 | } | 158 | } |
145 | 159 | ||
146 | DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init); | 160 | static void tsi568_remove(struct rio_dev *rdev) |
161 | { | ||
162 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | ||
163 | spin_lock(&rdev->rswitch->lock); | ||
164 | if (rdev->rswitch->ops != &tsi568_switch_ops) { | ||
165 | spin_unlock(&rdev->rswitch->lock); | ||
166 | return; | ||
167 | } | ||
168 | rdev->rswitch->ops = NULL; | ||
169 | spin_unlock(&rdev->rswitch->lock); | ||
170 | } | ||
171 | |||
172 | static struct rio_device_id tsi568_id_table[] = { | ||
173 | {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)}, | ||
174 | { 0, } /* terminate list */ | ||
175 | }; | ||
176 | |||
177 | static struct rio_driver tsi568_driver = { | ||
178 | .name = "tsi568", | ||
179 | .id_table = tsi568_id_table, | ||
180 | .probe = tsi568_probe, | ||
181 | .remove = tsi568_remove, | ||
182 | }; | ||
183 | |||
184 | static int __init tsi568_init(void) | ||
185 | { | ||
186 | return rio_register_driver(&tsi568_driver); | ||
187 | } | ||
188 | |||
189 | static void __exit tsi568_exit(void) | ||
190 | { | ||
191 | rio_unregister_driver(&tsi568_driver); | ||
192 | } | ||
193 | |||
194 | device_initcall(tsi568_init); | ||
195 | module_exit(tsi568_exit); | ||
196 | |||
197 | MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver"); | ||
198 | MODULE_AUTHOR("Integrated Device Technology, Inc."); | ||
199 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index db8b8028988d..42c8b014fe15 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/rio_drv.h> | 19 | #include <linux/rio_drv.h> |
20 | #include <linux/rio_ids.h> | 20 | #include <linux/rio_ids.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/module.h> | ||
22 | #include "../rio.h" | 23 | #include "../rio.h" |
23 | 24 | ||
24 | /* Global (broadcast) route registers */ | 25 | /* Global (broadcast) route registers */ |
@@ -292,27 +293,79 @@ exit_es: | |||
292 | return 0; | 293 | return 0; |
293 | } | 294 | } |
294 | 295 | ||
295 | static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) | 296 | static struct rio_switch_ops tsi57x_switch_ops = { |
297 | .owner = THIS_MODULE, | ||
298 | .add_entry = tsi57x_route_add_entry, | ||
299 | .get_entry = tsi57x_route_get_entry, | ||
300 | .clr_table = tsi57x_route_clr_table, | ||
301 | .set_domain = tsi57x_set_domain, | ||
302 | .get_domain = tsi57x_get_domain, | ||
303 | .em_init = tsi57x_em_init, | ||
304 | .em_handle = tsi57x_em_handler, | ||
305 | }; | ||
306 | |||
307 | static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id) | ||
296 | { | 308 | { |
297 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | 309 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
298 | rdev->rswitch->add_entry = tsi57x_route_add_entry; | 310 | |
299 | rdev->rswitch->get_entry = tsi57x_route_get_entry; | 311 | spin_lock(&rdev->rswitch->lock); |
300 | rdev->rswitch->clr_table = tsi57x_route_clr_table; | 312 | |
301 | rdev->rswitch->set_domain = tsi57x_set_domain; | 313 | if (rdev->rswitch->ops) { |
302 | rdev->rswitch->get_domain = tsi57x_get_domain; | 314 | spin_unlock(&rdev->rswitch->lock); |
303 | rdev->rswitch->em_init = tsi57x_em_init; | 315 | return -EINVAL; |
304 | rdev->rswitch->em_handle = tsi57x_em_handler; | 316 | } |
305 | 317 | rdev->rswitch->ops = &tsi57x_switch_ops; | |
306 | if (do_enum) { | 318 | |
319 | if (rdev->do_enum) { | ||
307 | /* Ensure that default routing is disabled on startup */ | 320 | /* Ensure that default routing is disabled on startup */ |
308 | rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, | 321 | rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, |
309 | RIO_INVALID_ROUTE); | 322 | RIO_INVALID_ROUTE); |
310 | } | 323 | } |
311 | 324 | ||
325 | spin_unlock(&rdev->rswitch->lock); | ||
312 | return 0; | 326 | return 0; |
313 | } | 327 | } |
314 | 328 | ||
315 | DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init); | 329 | static void tsi57x_remove(struct rio_dev *rdev) |
316 | DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init); | 330 | { |
317 | DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init); | 331 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); |
318 | DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init); | 332 | spin_lock(&rdev->rswitch->lock); |
333 | if (rdev->rswitch->ops != &tsi57x_switch_ops) { | ||
334 | spin_unlock(&rdev->rswitch->lock); | ||
335 | return; | ||
336 | } | ||
337 | rdev->rswitch->ops = NULL; | ||
338 | spin_unlock(&rdev->rswitch->lock); | ||
339 | } | ||
340 | |||
341 | static struct rio_device_id tsi57x_id_table[] = { | ||
342 | {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)}, | ||
343 | {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)}, | ||
344 | {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)}, | ||
345 | {RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)}, | ||
346 | { 0, } /* terminate list */ | ||
347 | }; | ||
348 | |||
349 | static struct rio_driver tsi57x_driver = { | ||
350 | .name = "tsi57x", | ||
351 | .id_table = tsi57x_id_table, | ||
352 | .probe = tsi57x_probe, | ||
353 | .remove = tsi57x_remove, | ||
354 | }; | ||
355 | |||
356 | static int __init tsi57x_init(void) | ||
357 | { | ||
358 | return rio_register_driver(&tsi57x_driver); | ||
359 | } | ||
360 | |||
361 | static void __exit tsi57x_exit(void) | ||
362 | { | ||
363 | rio_unregister_driver(&tsi57x_driver); | ||
364 | } | ||
365 | |||
366 | device_initcall(tsi57x_init); | ||
367 | module_exit(tsi57x_exit); | ||
368 | |||
369 | MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver"); | ||
370 | MODULE_AUTHOR("Integrated Device Technology, Inc."); | ||
371 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index b9838130a7b0..9e3498bf302b 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -313,6 +313,15 @@ config RTC_DRV_PALMAS | |||
313 | This driver can also be built as a module. If so, the module | 313 | This driver can also be built as a module. If so, the module |
314 | will be called rtc-palma. | 314 | will be called rtc-palma. |
315 | 315 | ||
316 | config RTC_DRV_PCF2127 | ||
317 | tristate "NXP PCF2127" | ||
318 | help | ||
319 | If you say yes here you get support for the NXP PCF2127/29 RTC | ||
320 | chips. | ||
321 | |||
322 | This driver can also be built as a module. If so, the module | ||
323 | will be called rtc-pcf2127. | ||
324 | |||
316 | config RTC_DRV_PCF8523 | 325 | config RTC_DRV_PCF8523 |
317 | tristate "NXP PCF8523" | 326 | tristate "NXP PCF8523" |
318 | help | 327 | help |
@@ -1233,6 +1242,13 @@ config RTC_DRV_SNVS | |||
1233 | This driver can also be built as a module, if so, the module | 1242 | This driver can also be built as a module, if so, the module |
1234 | will be called "rtc-snvs". | 1243 | will be called "rtc-snvs". |
1235 | 1244 | ||
1245 | config RTC_DRV_SIRFSOC | ||
1246 | tristate "SiRFSOC RTC" | ||
1247 | depends on ARCH_SIRF | ||
1248 | help | ||
1249 | Say "yes" here to support the real time clock on SiRF SOC chips. | ||
1250 | This driver can also be built as a module called rtc-sirfsoc. | ||
1251 | |||
1236 | comment "HID Sensor RTC drivers" | 1252 | comment "HID Sensor RTC drivers" |
1237 | 1253 | ||
1238 | config RTC_DRV_HID_SENSOR_TIME | 1254 | config RTC_DRV_HID_SENSOR_TIME |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index c33f86f1a69b..d3b4488f48f2 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -83,6 +83,7 @@ obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o | |||
83 | obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o | 83 | obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o |
84 | obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o | 84 | obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o |
85 | obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o | 85 | obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o |
86 | obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o | ||
86 | obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o | 87 | obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o |
87 | obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o | 88 | obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o |
88 | obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o | 89 | obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o |
@@ -128,3 +129,4 @@ obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o | |||
128 | obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o | 129 | obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o |
129 | obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o | 130 | obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o |
130 | obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o | 131 | obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o |
132 | obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o | ||
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 66385402d20e..02426812bebc 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c | |||
@@ -38,7 +38,7 @@ static void rtc_device_release(struct device *dev) | |||
38 | int rtc_hctosys_ret = -ENODEV; | 38 | int rtc_hctosys_ret = -ENODEV; |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #if defined(CONFIG_PM) && defined(CONFIG_RTC_HCTOSYS_DEVICE) | 41 | #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE) |
42 | /* | 42 | /* |
43 | * On suspend(), measure the delta between one RTC and the | 43 | * On suspend(), measure the delta between one RTC and the |
44 | * system's wall clock; restore it on resume(). | 44 | * system's wall clock; restore it on resume(). |
@@ -47,7 +47,7 @@ int rtc_hctosys_ret = -ENODEV; | |||
47 | static struct timespec old_rtc, old_system, old_delta; | 47 | static struct timespec old_rtc, old_system, old_delta; |
48 | 48 | ||
49 | 49 | ||
50 | static int rtc_suspend(struct device *dev, pm_message_t mesg) | 50 | static int rtc_suspend(struct device *dev) |
51 | { | 51 | { |
52 | struct rtc_device *rtc = to_rtc_device(dev); | 52 | struct rtc_device *rtc = to_rtc_device(dev); |
53 | struct rtc_time tm; | 53 | struct rtc_time tm; |
@@ -135,9 +135,10 @@ static int rtc_resume(struct device *dev) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume); | ||
139 | #define RTC_CLASS_DEV_PM_OPS (&rtc_class_dev_pm_ops) | ||
138 | #else | 140 | #else |
139 | #define rtc_suspend NULL | 141 | #define RTC_CLASS_DEV_PM_OPS NULL |
140 | #define rtc_resume NULL | ||
141 | #endif | 142 | #endif |
142 | 143 | ||
143 | 144 | ||
@@ -336,8 +337,7 @@ static int __init rtc_init(void) | |||
336 | pr_err("couldn't create class\n"); | 337 | pr_err("couldn't create class\n"); |
337 | return PTR_ERR(rtc_class); | 338 | return PTR_ERR(rtc_class); |
338 | } | 339 | } |
339 | rtc_class->suspend = rtc_suspend; | 340 | rtc_class->pm = RTC_CLASS_DEV_PM_OPS; |
340 | rtc_class->resume = rtc_resume; | ||
341 | rtc_dev_init(); | 341 | rtc_dev_init(); |
342 | rtc_sysfs_init(rtc_class); | 342 | rtc_sysfs_init(rtc_class); |
343 | return 0; | 343 | return 0; |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 42bd57da239d..72c5cdbe0791 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -109,9 +109,9 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) | |||
109 | err = rtc->ops->set_time(rtc->dev.parent, | 109 | err = rtc->ops->set_time(rtc->dev.parent, |
110 | &new); | 110 | &new); |
111 | } | 111 | } |
112 | } | 112 | } else { |
113 | else | ||
114 | err = -EINVAL; | 113 | err = -EINVAL; |
114 | } | ||
115 | 115 | ||
116 | mutex_unlock(&rtc->ops_lock); | 116 | mutex_unlock(&rtc->ops_lock); |
117 | /* A timer might have just expired */ | 117 | /* A timer might have just expired */ |
@@ -367,14 +367,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
367 | err = mutex_lock_interruptible(&rtc->ops_lock); | 367 | err = mutex_lock_interruptible(&rtc->ops_lock); |
368 | if (err) | 368 | if (err) |
369 | return err; | 369 | return err; |
370 | if (rtc->aie_timer.enabled) { | 370 | if (rtc->aie_timer.enabled) |
371 | rtc_timer_remove(rtc, &rtc->aie_timer); | 371 | rtc_timer_remove(rtc, &rtc->aie_timer); |
372 | } | 372 | |
373 | rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); | 373 | rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); |
374 | rtc->aie_timer.period = ktime_set(0, 0); | 374 | rtc->aie_timer.period = ktime_set(0, 0); |
375 | if (alarm->enabled) { | 375 | if (alarm->enabled) |
376 | err = rtc_timer_enqueue(rtc, &rtc->aie_timer); | 376 | err = rtc_timer_enqueue(rtc, &rtc->aie_timer); |
377 | } | 377 | |
378 | mutex_unlock(&rtc->ops_lock); | 378 | mutex_unlock(&rtc->ops_lock); |
379 | return err; | 379 | return err; |
380 | } | 380 | } |
@@ -698,9 +698,9 @@ retry: | |||
698 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 698 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
699 | if (rtc->irq_task != NULL && task == NULL) | 699 | if (rtc->irq_task != NULL && task == NULL) |
700 | err = -EBUSY; | 700 | err = -EBUSY; |
701 | if (rtc->irq_task != task) | 701 | else if (rtc->irq_task != task) |
702 | err = -EACCES; | 702 | err = -EACCES; |
703 | if (!err) { | 703 | else { |
704 | if (rtc_update_hrtimer(rtc, enabled) < 0) { | 704 | if (rtc_update_hrtimer(rtc, enabled) < 0) { |
705 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | 705 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); |
706 | cpu_relax(); | 706 | cpu_relax(); |
@@ -734,9 +734,9 @@ retry: | |||
734 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 734 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
735 | if (rtc->irq_task != NULL && task == NULL) | 735 | if (rtc->irq_task != NULL && task == NULL) |
736 | err = -EBUSY; | 736 | err = -EBUSY; |
737 | if (rtc->irq_task != task) | 737 | else if (rtc->irq_task != task) |
738 | err = -EACCES; | 738 | err = -EACCES; |
739 | if (!err) { | 739 | else { |
740 | rtc->irq_freq = freq; | 740 | rtc->irq_freq = freq; |
741 | if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) { | 741 | if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) { |
742 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | 742 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); |
@@ -891,7 +891,7 @@ again: | |||
891 | * | 891 | * |
892 | * Kernel interface to initializing an rtc_timer. | 892 | * Kernel interface to initializing an rtc_timer. |
893 | */ | 893 | */ |
894 | void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data) | 894 | void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data) |
895 | { | 895 | { |
896 | timerqueue_init(&timer->node); | 896 | timerqueue_init(&timer->node); |
897 | timer->enabled = 0; | 897 | timer->enabled = 0; |
@@ -907,7 +907,7 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data) | |||
907 | * | 907 | * |
908 | * Kernel interface to set an rtc_timer | 908 | * Kernel interface to set an rtc_timer |
909 | */ | 909 | */ |
910 | int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, | 910 | int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, |
911 | ktime_t expires, ktime_t period) | 911 | ktime_t expires, ktime_t period) |
912 | { | 912 | { |
913 | int ret = 0; | 913 | int ret = 0; |
@@ -930,7 +930,7 @@ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, | |||
930 | * | 930 | * |
931 | * Kernel interface to cancel an rtc_timer | 931 | * Kernel interface to cancel an rtc_timer |
932 | */ | 932 | */ |
933 | int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) | 933 | int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer) |
934 | { | 934 | { |
935 | int ret = 0; | 935 | int ret = 0; |
936 | mutex_lock(&rtc->ops_lock); | 936 | mutex_lock(&rtc->ops_lock); |
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c index f3742f364eb8..354c937a5866 100644 --- a/drivers/rtc/rtc-88pm80x.c +++ b/drivers/rtc/rtc-88pm80x.c | |||
@@ -345,7 +345,6 @@ out: | |||
345 | static int pm80x_rtc_remove(struct platform_device *pdev) | 345 | static int pm80x_rtc_remove(struct platform_device *pdev) |
346 | { | 346 | { |
347 | struct pm80x_rtc_info *info = platform_get_drvdata(pdev); | 347 | struct pm80x_rtc_info *info = platform_get_drvdata(pdev); |
348 | platform_set_drvdata(pdev, NULL); | ||
349 | pm80x_free_irq(info->chip, info->irq, info); | 348 | pm80x_free_irq(info->chip, info->irq, info); |
350 | return 0; | 349 | return 0; |
351 | } | 350 | } |
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 0f2b91bfee37..4e30c85728e5 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c | |||
@@ -418,7 +418,6 @@ static int pm860x_rtc_remove(struct platform_device *pdev) | |||
418 | pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); | 418 | pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); |
419 | #endif /* VRTC_CALIBRATION */ | 419 | #endif /* VRTC_CALIBRATION */ |
420 | 420 | ||
421 | platform_set_drvdata(pdev, NULL); | ||
422 | return 0; | 421 | return 0; |
423 | } | 422 | } |
424 | 423 | ||
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c index 47a4f2c4d30e..ff435343ba9f 100644 --- a/drivers/rtc/rtc-ab3100.c +++ b/drivers/rtc/rtc-ab3100.c | |||
@@ -240,18 +240,11 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) | |||
240 | return 0; | 240 | return 0; |
241 | } | 241 | } |
242 | 242 | ||
243 | static int __exit ab3100_rtc_remove(struct platform_device *pdev) | ||
244 | { | ||
245 | platform_set_drvdata(pdev, NULL); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static struct platform_driver ab3100_rtc_driver = { | 243 | static struct platform_driver ab3100_rtc_driver = { |
250 | .driver = { | 244 | .driver = { |
251 | .name = "ab3100-rtc", | 245 | .name = "ab3100-rtc", |
252 | .owner = THIS_MODULE, | 246 | .owner = THIS_MODULE, |
253 | }, | 247 | }, |
254 | .remove = __exit_p(ab3100_rtc_remove), | ||
255 | }; | 248 | }; |
256 | 249 | ||
257 | module_platform_driver_probe(ab3100_rtc_driver, ab3100_rtc_probe); | 250 | module_platform_driver_probe(ab3100_rtc_driver, ab3100_rtc_probe); |
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c index 63cfa314a39f..727e2f5d14d9 100644 --- a/drivers/rtc/rtc-ab8500.c +++ b/drivers/rtc/rtc-ab8500.c | |||
@@ -35,6 +35,10 @@ | |||
35 | #define AB8500_RTC_FORCE_BKUP_REG 0x0D | 35 | #define AB8500_RTC_FORCE_BKUP_REG 0x0D |
36 | #define AB8500_RTC_CALIB_REG 0x0E | 36 | #define AB8500_RTC_CALIB_REG 0x0E |
37 | #define AB8500_RTC_SWITCH_STAT_REG 0x0F | 37 | #define AB8500_RTC_SWITCH_STAT_REG 0x0F |
38 | #define AB8540_RTC_ALRM_SEC 0x22 | ||
39 | #define AB8540_RTC_ALRM_MIN_LOW_REG 0x23 | ||
40 | #define AB8540_RTC_ALRM_MIN_MID_REG 0x24 | ||
41 | #define AB8540_RTC_ALRM_MIN_HI_REG 0x25 | ||
38 | 42 | ||
39 | /* RtcReadRequest bits */ | 43 | /* RtcReadRequest bits */ |
40 | #define RTC_READ_REQUEST 0x01 | 44 | #define RTC_READ_REQUEST 0x01 |
@@ -58,6 +62,11 @@ static const u8 ab8500_rtc_alarm_regs[] = { | |||
58 | AB8500_RTC_ALRM_MIN_LOW_REG | 62 | AB8500_RTC_ALRM_MIN_LOW_REG |
59 | }; | 63 | }; |
60 | 64 | ||
65 | static const u8 ab8540_rtc_alarm_regs[] = { | ||
66 | AB8540_RTC_ALRM_MIN_HI_REG, AB8540_RTC_ALRM_MIN_MID_REG, | ||
67 | AB8540_RTC_ALRM_MIN_LOW_REG, AB8540_RTC_ALRM_SEC | ||
68 | }; | ||
69 | |||
61 | /* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */ | 70 | /* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */ |
62 | static unsigned long get_elapsed_seconds(int year) | 71 | static unsigned long get_elapsed_seconds(int year) |
63 | { | 72 | { |
@@ -267,6 +276,42 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
267 | return ab8500_rtc_irq_enable(dev, alarm->enabled); | 276 | return ab8500_rtc_irq_enable(dev, alarm->enabled); |
268 | } | 277 | } |
269 | 278 | ||
279 | static int ab8540_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | ||
280 | { | ||
281 | int retval, i; | ||
282 | unsigned char buf[ARRAY_SIZE(ab8540_rtc_alarm_regs)]; | ||
283 | unsigned long mins, secs = 0; | ||
284 | |||
285 | if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) { | ||
286 | dev_dbg(dev, "year should be equal to or greater than %d\n", | ||
287 | AB8500_RTC_EPOCH); | ||
288 | return -EINVAL; | ||
289 | } | ||
290 | |||
291 | /* Get the number of seconds since 1970 */ | ||
292 | rtc_tm_to_time(&alarm->time, &secs); | ||
293 | |||
294 | /* | ||
295 | * Convert it to the number of seconds since 01-01-2000 00:00:00 | ||
296 | */ | ||
297 | secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); | ||
298 | mins = secs / 60; | ||
299 | |||
300 | buf[3] = secs % 60; | ||
301 | buf[2] = mins & 0xFF; | ||
302 | buf[1] = (mins >> 8) & 0xFF; | ||
303 | buf[0] = (mins >> 16) & 0xFF; | ||
304 | |||
305 | /* Set the alarm time */ | ||
306 | for (i = 0; i < ARRAY_SIZE(ab8540_rtc_alarm_regs); i++) { | ||
307 | retval = abx500_set_register_interruptible(dev, AB8500_RTC, | ||
308 | ab8540_rtc_alarm_regs[i], buf[i]); | ||
309 | if (retval < 0) | ||
310 | return retval; | ||
311 | } | ||
312 | |||
313 | return ab8500_rtc_irq_enable(dev, alarm->enabled); | ||
314 | } | ||
270 | 315 | ||
271 | static int ab8500_rtc_set_calibration(struct device *dev, int calibration) | 316 | static int ab8500_rtc_set_calibration(struct device *dev, int calibration) |
272 | { | 317 | { |
@@ -389,8 +434,22 @@ static const struct rtc_class_ops ab8500_rtc_ops = { | |||
389 | .alarm_irq_enable = ab8500_rtc_irq_enable, | 434 | .alarm_irq_enable = ab8500_rtc_irq_enable, |
390 | }; | 435 | }; |
391 | 436 | ||
437 | static const struct rtc_class_ops ab8540_rtc_ops = { | ||
438 | .read_time = ab8500_rtc_read_time, | ||
439 | .set_time = ab8500_rtc_set_time, | ||
440 | .read_alarm = ab8500_rtc_read_alarm, | ||
441 | .set_alarm = ab8540_rtc_set_alarm, | ||
442 | .alarm_irq_enable = ab8500_rtc_irq_enable, | ||
443 | }; | ||
444 | |||
445 | static struct platform_device_id ab85xx_rtc_ids[] = { | ||
446 | { "ab8500-rtc", (kernel_ulong_t)&ab8500_rtc_ops, }, | ||
447 | { "ab8540-rtc", (kernel_ulong_t)&ab8540_rtc_ops, }, | ||
448 | }; | ||
449 | |||
392 | static int ab8500_rtc_probe(struct platform_device *pdev) | 450 | static int ab8500_rtc_probe(struct platform_device *pdev) |
393 | { | 451 | { |
452 | const struct platform_device_id *platid = platform_get_device_id(pdev); | ||
394 | int err; | 453 | int err; |
395 | struct rtc_device *rtc; | 454 | struct rtc_device *rtc; |
396 | u8 rtc_ctrl; | 455 | u8 rtc_ctrl; |
@@ -423,7 +482,8 @@ static int ab8500_rtc_probe(struct platform_device *pdev) | |||
423 | device_init_wakeup(&pdev->dev, true); | 482 | device_init_wakeup(&pdev->dev, true); |
424 | 483 | ||
425 | rtc = devm_rtc_device_register(&pdev->dev, "ab8500-rtc", | 484 | rtc = devm_rtc_device_register(&pdev->dev, "ab8500-rtc", |
426 | &ab8500_rtc_ops, THIS_MODULE); | 485 | (struct rtc_class_ops *)platid->driver_data, |
486 | THIS_MODULE); | ||
427 | if (IS_ERR(rtc)) { | 487 | if (IS_ERR(rtc)) { |
428 | dev_err(&pdev->dev, "Registration failed\n"); | 488 | dev_err(&pdev->dev, "Registration failed\n"); |
429 | err = PTR_ERR(rtc); | 489 | err = PTR_ERR(rtc); |
@@ -451,8 +511,6 @@ static int ab8500_rtc_remove(struct platform_device *pdev) | |||
451 | { | 511 | { |
452 | ab8500_sysfs_rtc_unregister(&pdev->dev); | 512 | ab8500_sysfs_rtc_unregister(&pdev->dev); |
453 | 513 | ||
454 | platform_set_drvdata(pdev, NULL); | ||
455 | |||
456 | return 0; | 514 | return 0; |
457 | } | 515 | } |
458 | 516 | ||
@@ -463,6 +521,7 @@ static struct platform_driver ab8500_rtc_driver = { | |||
463 | }, | 521 | }, |
464 | .probe = ab8500_rtc_probe, | 522 | .probe = ab8500_rtc_probe, |
465 | .remove = ab8500_rtc_remove, | 523 | .remove = ab8500_rtc_remove, |
524 | .id_table = ab85xx_rtc_ids, | ||
466 | }; | 525 | }; |
467 | 526 | ||
468 | module_platform_driver(ab8500_rtc_driver); | 527 | module_platform_driver(ab8500_rtc_driver); |
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index f47fbb5eee8b..3161ab5263ed 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c | |||
@@ -141,7 +141,7 @@ static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
141 | 141 | ||
142 | spin_lock_irq(&rtc->lock); | 142 | spin_lock_irq(&rtc->lock); |
143 | 143 | ||
144 | if(enabled) { | 144 | if (enabled) { |
145 | if (rtc_readl(rtc, VAL) > rtc->alarm_time) { | 145 | if (rtc_readl(rtc, VAL) > rtc->alarm_time) { |
146 | ret = -EINVAL; | 146 | ret = -EINVAL; |
147 | goto out; | 147 | goto out; |
@@ -212,23 +212,20 @@ static int __init at32_rtc_probe(struct platform_device *pdev) | |||
212 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 212 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
213 | if (!regs) { | 213 | if (!regs) { |
214 | dev_dbg(&pdev->dev, "no mmio resource defined\n"); | 214 | dev_dbg(&pdev->dev, "no mmio resource defined\n"); |
215 | ret = -ENXIO; | 215 | return -ENXIO; |
216 | goto out; | ||
217 | } | 216 | } |
218 | 217 | ||
219 | irq = platform_get_irq(pdev, 0); | 218 | irq = platform_get_irq(pdev, 0); |
220 | if (irq <= 0) { | 219 | if (irq <= 0) { |
221 | dev_dbg(&pdev->dev, "could not get irq\n"); | 220 | dev_dbg(&pdev->dev, "could not get irq\n"); |
222 | ret = -ENXIO; | 221 | return -ENXIO; |
223 | goto out; | ||
224 | } | 222 | } |
225 | 223 | ||
226 | rtc->irq = irq; | 224 | rtc->irq = irq; |
227 | rtc->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); | 225 | rtc->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); |
228 | if (!rtc->regs) { | 226 | if (!rtc->regs) { |
229 | ret = -ENOMEM; | ||
230 | dev_dbg(&pdev->dev, "could not map I/O memory\n"); | 227 | dev_dbg(&pdev->dev, "could not map I/O memory\n"); |
231 | goto out; | 228 | return -ENOMEM; |
232 | } | 229 | } |
233 | spin_lock_init(&rtc->lock); | 230 | spin_lock_init(&rtc->lock); |
234 | 231 | ||
@@ -249,7 +246,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev) | |||
249 | "rtc", rtc); | 246 | "rtc", rtc); |
250 | if (ret) { | 247 | if (ret) { |
251 | dev_dbg(&pdev->dev, "could not request irq %d\n", irq); | 248 | dev_dbg(&pdev->dev, "could not request irq %d\n", irq); |
252 | goto out; | 249 | return ret; |
253 | } | 250 | } |
254 | 251 | ||
255 | platform_set_drvdata(pdev, rtc); | 252 | platform_set_drvdata(pdev, rtc); |
@@ -258,8 +255,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev) | |||
258 | &at32_rtc_ops, THIS_MODULE); | 255 | &at32_rtc_ops, THIS_MODULE); |
259 | if (IS_ERR(rtc->rtc)) { | 256 | if (IS_ERR(rtc->rtc)) { |
260 | dev_dbg(&pdev->dev, "could not register rtc device\n"); | 257 | dev_dbg(&pdev->dev, "could not register rtc device\n"); |
261 | ret = PTR_ERR(rtc->rtc); | 258 | return PTR_ERR(rtc->rtc); |
262 | goto out; | ||
263 | } | 259 | } |
264 | 260 | ||
265 | device_init_wakeup(&pdev->dev, 1); | 261 | device_init_wakeup(&pdev->dev, 1); |
@@ -268,18 +264,12 @@ static int __init at32_rtc_probe(struct platform_device *pdev) | |||
268 | (unsigned long)rtc->regs, rtc->irq); | 264 | (unsigned long)rtc->regs, rtc->irq); |
269 | 265 | ||
270 | return 0; | 266 | return 0; |
271 | |||
272 | out: | ||
273 | platform_set_drvdata(pdev, NULL); | ||
274 | return ret; | ||
275 | } | 267 | } |
276 | 268 | ||
277 | static int __exit at32_rtc_remove(struct platform_device *pdev) | 269 | static int __exit at32_rtc_remove(struct platform_device *pdev) |
278 | { | 270 | { |
279 | device_init_wakeup(&pdev->dev, 0); | 271 | device_init_wakeup(&pdev->dev, 0); |
280 | 272 | ||
281 | platform_set_drvdata(pdev, NULL); | ||
282 | |||
283 | return 0; | 273 | return 0; |
284 | } | 274 | } |
285 | 275 | ||
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index f296f3f7db9b..741892632ae0 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -31,8 +31,7 @@ | |||
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/of.h> | 32 | #include <linux/of.h> |
33 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
34 | 34 | #include <linux/uaccess.h> | |
35 | #include <asm/uaccess.h> | ||
36 | 35 | ||
37 | #include "rtc-at91rm9200.h" | 36 | #include "rtc-at91rm9200.h" |
38 | 37 | ||
@@ -439,7 +438,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
439 | 438 | ||
440 | rtc_device_unregister(rtc); | 439 | rtc_device_unregister(rtc); |
441 | iounmap(at91_rtc_regs); | 440 | iounmap(at91_rtc_regs); |
442 | platform_set_drvdata(pdev, NULL); | ||
443 | 441 | ||
444 | return 0; | 442 | return 0; |
445 | } | 443 | } |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index b60a34cb145a..309b8b342d9c 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -324,16 +324,14 @@ static int at91_rtc_probe(struct platform_device *pdev) | |||
324 | rtc->rtt = devm_ioremap(&pdev->dev, r->start, resource_size(r)); | 324 | rtc->rtt = devm_ioremap(&pdev->dev, r->start, resource_size(r)); |
325 | if (!rtc->rtt) { | 325 | if (!rtc->rtt) { |
326 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); | 326 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); |
327 | ret = -ENOMEM; | 327 | return -ENOMEM; |
328 | goto fail; | ||
329 | } | 328 | } |
330 | 329 | ||
331 | rtc->gpbr = devm_ioremap(&pdev->dev, r_gpbr->start, | 330 | rtc->gpbr = devm_ioremap(&pdev->dev, r_gpbr->start, |
332 | resource_size(r_gpbr)); | 331 | resource_size(r_gpbr)); |
333 | if (!rtc->gpbr) { | 332 | if (!rtc->gpbr) { |
334 | dev_err(&pdev->dev, "failed to map gpbr registers, aborting.\n"); | 333 | dev_err(&pdev->dev, "failed to map gpbr registers, aborting.\n"); |
335 | ret = -ENOMEM; | 334 | return -ENOMEM; |
336 | goto fail; | ||
337 | } | 335 | } |
338 | 336 | ||
339 | mr = rtt_readl(rtc, MR); | 337 | mr = rtt_readl(rtc, MR); |
@@ -350,17 +348,15 @@ static int at91_rtc_probe(struct platform_device *pdev) | |||
350 | 348 | ||
351 | rtc->rtcdev = devm_rtc_device_register(&pdev->dev, pdev->name, | 349 | rtc->rtcdev = devm_rtc_device_register(&pdev->dev, pdev->name, |
352 | &at91_rtc_ops, THIS_MODULE); | 350 | &at91_rtc_ops, THIS_MODULE); |
353 | if (IS_ERR(rtc->rtcdev)) { | 351 | if (IS_ERR(rtc->rtcdev)) |
354 | ret = PTR_ERR(rtc->rtcdev); | 352 | return PTR_ERR(rtc->rtcdev); |
355 | goto fail; | ||
356 | } | ||
357 | 353 | ||
358 | /* register irq handler after we know what name we'll use */ | 354 | /* register irq handler after we know what name we'll use */ |
359 | ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, | 355 | ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, |
360 | IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); | 356 | IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); |
361 | if (ret) { | 357 | if (ret) { |
362 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); | 358 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); |
363 | goto fail; | 359 | return ret; |
364 | } | 360 | } |
365 | 361 | ||
366 | /* NOTE: sam9260 rev A silicon has a ROM bug which resets the | 362 | /* NOTE: sam9260 rev A silicon has a ROM bug which resets the |
@@ -374,10 +370,6 @@ static int at91_rtc_probe(struct platform_device *pdev) | |||
374 | dev_name(&rtc->rtcdev->dev)); | 370 | dev_name(&rtc->rtcdev->dev)); |
375 | 371 | ||
376 | return 0; | 372 | return 0; |
377 | |||
378 | fail: | ||
379 | platform_set_drvdata(pdev, NULL); | ||
380 | return ret; | ||
381 | } | 373 | } |
382 | 374 | ||
383 | /* | 375 | /* |
@@ -391,7 +383,6 @@ static int at91_rtc_remove(struct platform_device *pdev) | |||
391 | /* disable all interrupts */ | 383 | /* disable all interrupts */ |
392 | rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); | 384 | rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); |
393 | 385 | ||
394 | platform_set_drvdata(pdev, NULL); | ||
395 | return 0; | 386 | return 0; |
396 | } | 387 | } |
397 | 388 | ||
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c index 7995abc391fc..ed526a192ce0 100644 --- a/drivers/rtc/rtc-au1xxx.c +++ b/drivers/rtc/rtc-au1xxx.c | |||
@@ -116,19 +116,11 @@ out_err: | |||
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
118 | 118 | ||
119 | static int au1xtoy_rtc_remove(struct platform_device *pdev) | ||
120 | { | ||
121 | platform_set_drvdata(pdev, NULL); | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static struct platform_driver au1xrtc_driver = { | 119 | static struct platform_driver au1xrtc_driver = { |
127 | .driver = { | 120 | .driver = { |
128 | .name = "rtc-au1xxx", | 121 | .name = "rtc-au1xxx", |
129 | .owner = THIS_MODULE, | 122 | .owner = THIS_MODULE, |
130 | }, | 123 | }, |
131 | .remove = au1xtoy_rtc_remove, | ||
132 | }; | 124 | }; |
133 | 125 | ||
134 | module_platform_driver_probe(au1xrtc_driver, au1xtoy_rtc_probe); | 126 | module_platform_driver_probe(au1xrtc_driver, au1xtoy_rtc_probe); |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index ad44ec5dc29a..0c53f452849d 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -391,7 +391,6 @@ static int bfin_rtc_remove(struct platform_device *pdev) | |||
391 | struct device *dev = &pdev->dev; | 391 | struct device *dev = &pdev->dev; |
392 | 392 | ||
393 | bfin_rtc_reset(dev, 0); | 393 | bfin_rtc_reset(dev, 0); |
394 | platform_set_drvdata(pdev, NULL); | ||
395 | 394 | ||
396 | return 0; | 395 | return 0; |
397 | } | 396 | } |
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index fea78bc713ca..c74bf0dc52cc 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c | |||
@@ -163,11 +163,6 @@ static int bq32k_probe(struct i2c_client *client, | |||
163 | return 0; | 163 | return 0; |
164 | } | 164 | } |
165 | 165 | ||
166 | static int bq32k_remove(struct i2c_client *client) | ||
167 | { | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static const struct i2c_device_id bq32k_id[] = { | 166 | static const struct i2c_device_id bq32k_id[] = { |
172 | { "bq32000", 0 }, | 167 | { "bq32000", 0 }, |
173 | { } | 168 | { } |
@@ -180,7 +175,6 @@ static struct i2c_driver bq32k_driver = { | |||
180 | .owner = THIS_MODULE, | 175 | .owner = THIS_MODULE, |
181 | }, | 176 | }, |
182 | .probe = bq32k_probe, | 177 | .probe = bq32k_probe, |
183 | .remove = bq32k_remove, | ||
184 | .id_table = bq32k_id, | 178 | .id_table = bq32k_id, |
185 | }; | 179 | }; |
186 | 180 | ||
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c index af2886784a7b..fc0ff87aa5df 100644 --- a/drivers/rtc/rtc-bq4802.c +++ b/drivers/rtc/rtc-bq4802.c | |||
@@ -186,13 +186,6 @@ out: | |||
186 | 186 | ||
187 | } | 187 | } |
188 | 188 | ||
189 | static int bq4802_remove(struct platform_device *pdev) | ||
190 | { | ||
191 | platform_set_drvdata(pdev, NULL); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | /* work with hotplug and coldplug */ | 189 | /* work with hotplug and coldplug */ |
197 | MODULE_ALIAS("platform:rtc-bq4802"); | 190 | MODULE_ALIAS("platform:rtc-bq4802"); |
198 | 191 | ||
@@ -202,7 +195,6 @@ static struct platform_driver bq4802_driver = { | |||
202 | .owner = THIS_MODULE, | 195 | .owner = THIS_MODULE, |
203 | }, | 196 | }, |
204 | .probe = bq4802_probe, | 197 | .probe = bq4802_probe, |
205 | .remove = bq4802_remove, | ||
206 | }; | 198 | }; |
207 | 199 | ||
208 | module_platform_driver(bq4802_driver); | 200 | module_platform_driver(bq4802_driver); |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index f1cb706445c7..be06d7150de5 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -326,7 +326,7 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask) | |||
326 | static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) | 326 | static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) |
327 | { | 327 | { |
328 | struct cmos_rtc *cmos = dev_get_drvdata(dev); | 328 | struct cmos_rtc *cmos = dev_get_drvdata(dev); |
329 | unsigned char mon, mday, hrs, min, sec, rtc_control; | 329 | unsigned char mon, mday, hrs, min, sec, rtc_control; |
330 | 330 | ||
331 | if (!is_valid_irq(cmos->irq)) | 331 | if (!is_valid_irq(cmos->irq)) |
332 | return -EIO; | 332 | return -EIO; |
@@ -556,17 +556,24 @@ static irqreturn_t cmos_interrupt(int irq, void *p) | |||
556 | rtc_control = CMOS_READ(RTC_CONTROL); | 556 | rtc_control = CMOS_READ(RTC_CONTROL); |
557 | if (is_hpet_enabled()) | 557 | if (is_hpet_enabled()) |
558 | irqstat = (unsigned long)irq & 0xF0; | 558 | irqstat = (unsigned long)irq & 0xF0; |
559 | irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; | 559 | |
560 | /* If we were suspended, RTC_CONTROL may not be accurate since the | ||
561 | * bios may have cleared it. | ||
562 | */ | ||
563 | if (!cmos_rtc.suspend_ctrl) | ||
564 | irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; | ||
565 | else | ||
566 | irqstat &= (cmos_rtc.suspend_ctrl & RTC_IRQMASK) | RTC_IRQF; | ||
560 | 567 | ||
561 | /* All Linux RTC alarms should be treated as if they were oneshot. | 568 | /* All Linux RTC alarms should be treated as if they were oneshot. |
562 | * Similar code may be needed in system wakeup paths, in case the | 569 | * Similar code may be needed in system wakeup paths, in case the |
563 | * alarm woke the system. | 570 | * alarm woke the system. |
564 | */ | 571 | */ |
565 | if (irqstat & RTC_AIE) { | 572 | if (irqstat & RTC_AIE) { |
573 | cmos_rtc.suspend_ctrl &= ~RTC_AIE; | ||
566 | rtc_control &= ~RTC_AIE; | 574 | rtc_control &= ~RTC_AIE; |
567 | CMOS_WRITE(rtc_control, RTC_CONTROL); | 575 | CMOS_WRITE(rtc_control, RTC_CONTROL); |
568 | hpet_mask_rtc_irq_bit(RTC_AIE); | 576 | hpet_mask_rtc_irq_bit(RTC_AIE); |
569 | |||
570 | CMOS_READ(RTC_INTR_FLAGS); | 577 | CMOS_READ(RTC_INTR_FLAGS); |
571 | } | 578 | } |
572 | spin_unlock(&rtc_lock); | 579 | spin_unlock(&rtc_lock); |
@@ -691,7 +698,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) | |||
691 | /* FIXME: | 698 | /* FIXME: |
692 | * <asm-generic/rtc.h> doesn't know 12-hour mode either. | 699 | * <asm-generic/rtc.h> doesn't know 12-hour mode either. |
693 | */ | 700 | */ |
694 | if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) { | 701 | if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) { |
695 | dev_warn(dev, "only 24-hr supported\n"); | 702 | dev_warn(dev, "only 24-hr supported\n"); |
696 | retval = -ENXIO; | 703 | retval = -ENXIO; |
697 | goto cleanup1; | 704 | goto cleanup1; |
@@ -839,21 +846,23 @@ static inline int cmos_poweroff(struct device *dev) | |||
839 | static int cmos_resume(struct device *dev) | 846 | static int cmos_resume(struct device *dev) |
840 | { | 847 | { |
841 | struct cmos_rtc *cmos = dev_get_drvdata(dev); | 848 | struct cmos_rtc *cmos = dev_get_drvdata(dev); |
842 | unsigned char tmp = cmos->suspend_ctrl; | 849 | unsigned char tmp; |
850 | |||
851 | if (cmos->enabled_wake) { | ||
852 | if (cmos->wake_off) | ||
853 | cmos->wake_off(dev); | ||
854 | else | ||
855 | disable_irq_wake(cmos->irq); | ||
856 | cmos->enabled_wake = 0; | ||
857 | } | ||
843 | 858 | ||
859 | spin_lock_irq(&rtc_lock); | ||
860 | tmp = cmos->suspend_ctrl; | ||
861 | cmos->suspend_ctrl = 0; | ||
844 | /* re-enable any irqs previously active */ | 862 | /* re-enable any irqs previously active */ |
845 | if (tmp & RTC_IRQMASK) { | 863 | if (tmp & RTC_IRQMASK) { |
846 | unsigned char mask; | 864 | unsigned char mask; |
847 | 865 | ||
848 | if (cmos->enabled_wake) { | ||
849 | if (cmos->wake_off) | ||
850 | cmos->wake_off(dev); | ||
851 | else | ||
852 | disable_irq_wake(cmos->irq); | ||
853 | cmos->enabled_wake = 0; | ||
854 | } | ||
855 | |||
856 | spin_lock_irq(&rtc_lock); | ||
857 | if (device_may_wakeup(dev)) | 866 | if (device_may_wakeup(dev)) |
858 | hpet_rtc_timer_init(); | 867 | hpet_rtc_timer_init(); |
859 | 868 | ||
@@ -873,8 +882,8 @@ static int cmos_resume(struct device *dev) | |||
873 | tmp &= ~RTC_AIE; | 882 | tmp &= ~RTC_AIE; |
874 | hpet_mask_rtc_irq_bit(RTC_AIE); | 883 | hpet_mask_rtc_irq_bit(RTC_AIE); |
875 | } while (mask & RTC_AIE); | 884 | } while (mask & RTC_AIE); |
876 | spin_unlock_irq(&rtc_lock); | ||
877 | } | 885 | } |
886 | spin_unlock_irq(&rtc_lock); | ||
878 | 887 | ||
879 | dev_dbg(dev, "resume, ctrl %02x\n", tmp); | 888 | dev_dbg(dev, "resume, ctrl %02x\n", tmp); |
880 | 889 | ||
@@ -991,7 +1000,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) | |||
991 | { | 1000 | { |
992 | cmos_wake_setup(&pnp->dev); | 1001 | cmos_wake_setup(&pnp->dev); |
993 | 1002 | ||
994 | if (pnp_port_start(pnp,0) == 0x70 && !pnp_irq_valid(pnp,0)) | 1003 | if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) |
995 | /* Some machines contain a PNP entry for the RTC, but | 1004 | /* Some machines contain a PNP entry for the RTC, but |
996 | * don't define the IRQ. It should always be safe to | 1005 | * don't define the IRQ. It should always be safe to |
997 | * hardcode it in these cases | 1006 | * hardcode it in these cases |
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c index ad6863a76af9..73f157519dff 100644 --- a/drivers/rtc/rtc-coh901331.c +++ b/drivers/rtc/rtc-coh901331.c | |||
@@ -152,12 +152,10 @@ static struct rtc_class_ops coh901331_ops = { | |||
152 | 152 | ||
153 | static int __exit coh901331_remove(struct platform_device *pdev) | 153 | static int __exit coh901331_remove(struct platform_device *pdev) |
154 | { | 154 | { |
155 | struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev); | 155 | struct coh901331_port *rtap = platform_get_drvdata(pdev); |
156 | 156 | ||
157 | if (rtap) { | 157 | if (rtap) |
158 | clk_unprepare(rtap->clk); | 158 | clk_unprepare(rtap->clk); |
159 | platform_set_drvdata(pdev, NULL); | ||
160 | } | ||
161 | 159 | ||
162 | return 0; | 160 | return 0; |
163 | } | 161 | } |
@@ -220,7 +218,6 @@ static int __init coh901331_probe(struct platform_device *pdev) | |||
220 | return 0; | 218 | return 0; |
221 | 219 | ||
222 | out_no_rtc: | 220 | out_no_rtc: |
223 | platform_set_drvdata(pdev, NULL); | ||
224 | clk_unprepare(rtap->clk); | 221 | clk_unprepare(rtap->clk); |
225 | return ret; | 222 | return ret; |
226 | } | 223 | } |
@@ -267,7 +264,7 @@ static SIMPLE_DEV_PM_OPS(coh901331_pm_ops, coh901331_suspend, coh901331_resume); | |||
267 | 264 | ||
268 | static void coh901331_shutdown(struct platform_device *pdev) | 265 | static void coh901331_shutdown(struct platform_device *pdev) |
269 | { | 266 | { |
270 | struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev); | 267 | struct coh901331_port *rtap = platform_get_drvdata(pdev); |
271 | 268 | ||
272 | clk_enable(rtap->clk); | 269 | clk_enable(rtap->clk); |
273 | writel(0, rtap->virtbase + COH901331_IRQ_MASK); | 270 | writel(0, rtap->virtbase + COH901331_IRQ_MASK); |
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c index 7286b279cf2d..9c8c19441cc6 100644 --- a/drivers/rtc/rtc-da9052.c +++ b/drivers/rtc/rtc-da9052.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/rtc.h> | 17 | #include <linux/rtc.h> |
18 | #include <linux/err.h> | ||
18 | 19 | ||
19 | #include <linux/mfd/da9052/da9052.h> | 20 | #include <linux/mfd/da9052/da9052.h> |
20 | #include <linux/mfd/da9052/reg.h> | 21 | #include <linux/mfd/da9052/reg.h> |
@@ -249,22 +250,11 @@ static int da9052_rtc_probe(struct platform_device *pdev) | |||
249 | 250 | ||
250 | rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | 251 | rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
251 | &da9052_rtc_ops, THIS_MODULE); | 252 | &da9052_rtc_ops, THIS_MODULE); |
252 | if (IS_ERR(rtc->rtc)) | 253 | return PTR_RET(rtc->rtc); |
253 | return PTR_ERR(rtc->rtc); | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static int da9052_rtc_remove(struct platform_device *pdev) | ||
259 | { | ||
260 | platform_set_drvdata(pdev, NULL); | ||
261 | |||
262 | return 0; | ||
263 | } | 254 | } |
264 | 255 | ||
265 | static struct platform_driver da9052_rtc_driver = { | 256 | static struct platform_driver da9052_rtc_driver = { |
266 | .probe = da9052_rtc_probe, | 257 | .probe = da9052_rtc_probe, |
267 | .remove = da9052_rtc_remove, | ||
268 | .driver = { | 258 | .driver = { |
269 | .name = "da9052-rtc", | 259 | .name = "da9052-rtc", |
270 | .owner = THIS_MODULE, | 260 | .owner = THIS_MODULE, |
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c index 73858ca9709a..e00642b61076 100644 --- a/drivers/rtc/rtc-da9055.c +++ b/drivers/rtc/rtc-da9055.c | |||
@@ -315,13 +315,6 @@ err_rtc: | |||
315 | 315 | ||
316 | } | 316 | } |
317 | 317 | ||
318 | static int da9055_rtc_remove(struct platform_device *pdev) | ||
319 | { | ||
320 | platform_set_drvdata(pdev, NULL); | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | #ifdef CONFIG_PM | 318 | #ifdef CONFIG_PM |
326 | /* Turn off the alarm if it should not be a wake source. */ | 319 | /* Turn off the alarm if it should not be a wake source. */ |
327 | static int da9055_rtc_suspend(struct device *dev) | 320 | static int da9055_rtc_suspend(struct device *dev) |
@@ -394,7 +387,6 @@ static const struct dev_pm_ops da9055_rtc_pm_ops = { | |||
394 | 387 | ||
395 | static struct platform_driver da9055_rtc_driver = { | 388 | static struct platform_driver da9055_rtc_driver = { |
396 | .probe = da9055_rtc_probe, | 389 | .probe = da9055_rtc_probe, |
397 | .remove = da9055_rtc_remove, | ||
398 | .driver = { | 390 | .driver = { |
399 | .name = "da9055-rtc", | 391 | .name = "da9055-rtc", |
400 | .owner = THIS_MODULE, | 392 | .owner = THIS_MODULE, |
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index a55048c3e26f..24677ef8c39a 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c | |||
@@ -117,7 +117,7 @@ | |||
117 | static DEFINE_SPINLOCK(davinci_rtc_lock); | 117 | static DEFINE_SPINLOCK(davinci_rtc_lock); |
118 | 118 | ||
119 | struct davinci_rtc { | 119 | struct davinci_rtc { |
120 | struct rtc_device *rtc; | 120 | struct rtc_device *rtc; |
121 | void __iomem *base; | 121 | void __iomem *base; |
122 | resource_size_t pbase; | 122 | resource_size_t pbase; |
123 | size_t base_size; | 123 | size_t base_size; |
@@ -526,10 +526,9 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
526 | davinci_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | 526 | davinci_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
527 | &davinci_rtc_ops, THIS_MODULE); | 527 | &davinci_rtc_ops, THIS_MODULE); |
528 | if (IS_ERR(davinci_rtc->rtc)) { | 528 | if (IS_ERR(davinci_rtc->rtc)) { |
529 | ret = PTR_ERR(davinci_rtc->rtc); | ||
530 | dev_err(dev, "unable to register RTC device, err %d\n", | 529 | dev_err(dev, "unable to register RTC device, err %d\n", |
531 | ret); | 530 | ret); |
532 | goto fail1; | 531 | return PTR_ERR(davinci_rtc->rtc); |
533 | } | 532 | } |
534 | 533 | ||
535 | rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG); | 534 | rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG); |
@@ -543,7 +542,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
543 | 0, "davinci_rtc", davinci_rtc); | 542 | 0, "davinci_rtc", davinci_rtc); |
544 | if (ret < 0) { | 543 | if (ret < 0) { |
545 | dev_err(dev, "unable to register davinci RTC interrupt\n"); | 544 | dev_err(dev, "unable to register davinci RTC interrupt\n"); |
546 | goto fail1; | 545 | return ret; |
547 | } | 546 | } |
548 | 547 | ||
549 | /* Enable interrupts */ | 548 | /* Enable interrupts */ |
@@ -556,10 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
556 | device_init_wakeup(&pdev->dev, 0); | 555 | device_init_wakeup(&pdev->dev, 0); |
557 | 556 | ||
558 | return 0; | 557 | return 0; |
559 | |||
560 | fail1: | ||
561 | platform_set_drvdata(pdev, NULL); | ||
562 | return ret; | ||
563 | } | 558 | } |
564 | 559 | ||
565 | static int __exit davinci_rtc_remove(struct platform_device *pdev) | 560 | static int __exit davinci_rtc_remove(struct platform_device *pdev) |
@@ -570,8 +565,6 @@ static int __exit davinci_rtc_remove(struct platform_device *pdev) | |||
570 | 565 | ||
571 | rtcif_write(davinci_rtc, 0, PRTCIF_INTEN); | 566 | rtcif_write(davinci_rtc, 0, PRTCIF_INTEN); |
572 | 567 | ||
573 | platform_set_drvdata(pdev, NULL); | ||
574 | |||
575 | return 0; | 568 | return 0; |
576 | } | 569 | } |
577 | 570 | ||
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c index 1e1ca63d58a9..1aca08394c47 100644 --- a/drivers/rtc/rtc-dm355evm.c +++ b/drivers/rtc/rtc-dm355evm.c | |||
@@ -139,19 +139,12 @@ static int dm355evm_rtc_probe(struct platform_device *pdev) | |||
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int dm355evm_rtc_remove(struct platform_device *pdev) | ||
143 | { | ||
144 | platform_set_drvdata(pdev, NULL); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* | 142 | /* |
149 | * I2C is used to talk to the MSP430, but this platform device is | 143 | * I2C is used to talk to the MSP430, but this platform device is |
150 | * exposed by an MFD driver that manages I2C communications. | 144 | * exposed by an MFD driver that manages I2C communications. |
151 | */ | 145 | */ |
152 | static struct platform_driver rtc_dm355evm_driver = { | 146 | static struct platform_driver rtc_dm355evm_driver = { |
153 | .probe = dm355evm_rtc_probe, | 147 | .probe = dm355evm_rtc_probe, |
154 | .remove = dm355evm_rtc_remove, | ||
155 | .driver = { | 148 | .driver = { |
156 | .owner = THIS_MODULE, | 149 | .owner = THIS_MODULE, |
157 | .name = "rtc-dm355evm", | 150 | .name = "rtc-dm355evm", |
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c index c7702b7269f7..9c04fd2bc209 100644 --- a/drivers/rtc/rtc-ds1216.c +++ b/drivers/rtc/rtc-ds1216.c | |||
@@ -167,34 +167,17 @@ static int __init ds1216_rtc_probe(struct platform_device *pdev) | |||
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
169 | 169 | ||
170 | static int __exit ds1216_rtc_remove(struct platform_device *pdev) | ||
171 | { | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static struct platform_driver ds1216_rtc_platform_driver = { | 170 | static struct platform_driver ds1216_rtc_platform_driver = { |
176 | .driver = { | 171 | .driver = { |
177 | .name = "rtc-ds1216", | 172 | .name = "rtc-ds1216", |
178 | .owner = THIS_MODULE, | 173 | .owner = THIS_MODULE, |
179 | }, | 174 | }, |
180 | .remove = __exit_p(ds1216_rtc_remove), | ||
181 | }; | 175 | }; |
182 | 176 | ||
183 | static int __init ds1216_rtc_init(void) | 177 | module_platform_driver_probe(ds1216_rtc_platform_driver, ds1216_rtc_probe); |
184 | { | ||
185 | return platform_driver_probe(&ds1216_rtc_platform_driver, ds1216_rtc_probe); | ||
186 | } | ||
187 | |||
188 | static void __exit ds1216_rtc_exit(void) | ||
189 | { | ||
190 | platform_driver_unregister(&ds1216_rtc_platform_driver); | ||
191 | } | ||
192 | 178 | ||
193 | MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); | 179 | MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); |
194 | MODULE_DESCRIPTION("DS1216 RTC driver"); | 180 | MODULE_DESCRIPTION("DS1216 RTC driver"); |
195 | MODULE_LICENSE("GPL"); | 181 | MODULE_LICENSE("GPL"); |
196 | MODULE_VERSION(DRV_VERSION); | 182 | MODULE_VERSION(DRV_VERSION); |
197 | MODULE_ALIAS("platform:rtc-ds1216"); | 183 | MODULE_ALIAS("platform:rtc-ds1216"); |
198 | |||
199 | module_init(ds1216_rtc_init); | ||
200 | module_exit(ds1216_rtc_exit); | ||
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index 398c96a98fc4..50e109b78252 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c | |||
@@ -353,18 +353,12 @@ static int ds1286_probe(struct platform_device *pdev) | |||
353 | return 0; | 353 | return 0; |
354 | } | 354 | } |
355 | 355 | ||
356 | static int ds1286_remove(struct platform_device *pdev) | ||
357 | { | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static struct platform_driver ds1286_platform_driver = { | 356 | static struct platform_driver ds1286_platform_driver = { |
362 | .driver = { | 357 | .driver = { |
363 | .name = "rtc-ds1286", | 358 | .name = "rtc-ds1286", |
364 | .owner = THIS_MODULE, | 359 | .owner = THIS_MODULE, |
365 | }, | 360 | }, |
366 | .probe = ds1286_probe, | 361 | .probe = ds1286_probe, |
367 | .remove = ds1286_remove, | ||
368 | }; | 362 | }; |
369 | 363 | ||
370 | module_platform_driver(ds1286_platform_driver); | 364 | module_platform_driver(ds1286_platform_driver); |
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index d13954346286..07e8d79b4a09 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c | |||
@@ -23,8 +23,12 @@ | |||
23 | #define RTC_CMD_READ 0x81 /* Read command */ | 23 | #define RTC_CMD_READ 0x81 /* Read command */ |
24 | #define RTC_CMD_WRITE 0x80 /* Write command */ | 24 | #define RTC_CMD_WRITE 0x80 /* Write command */ |
25 | 25 | ||
26 | #define RTC_CMD_WRITE_ENABLE 0x00 /* Write enable */ | ||
27 | #define RTC_CMD_WRITE_DISABLE 0x80 /* Write disable */ | ||
28 | |||
26 | #define RTC_ADDR_RAM0 0x20 /* Address of RAM0 */ | 29 | #define RTC_ADDR_RAM0 0x20 /* Address of RAM0 */ |
27 | #define RTC_ADDR_TCR 0x08 /* Address of trickle charge register */ | 30 | #define RTC_ADDR_TCR 0x08 /* Address of trickle charge register */ |
31 | #define RTC_ADDR_CTRL 0x07 /* Address of control register */ | ||
28 | #define RTC_ADDR_YEAR 0x06 /* Address of year register */ | 32 | #define RTC_ADDR_YEAR 0x06 /* Address of year register */ |
29 | #define RTC_ADDR_DAY 0x05 /* Address of day of week register */ | 33 | #define RTC_ADDR_DAY 0x05 /* Address of day of week register */ |
30 | #define RTC_ADDR_MON 0x04 /* Address of month register */ | 34 | #define RTC_ADDR_MON 0x04 /* Address of month register */ |
@@ -161,6 +165,7 @@ static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
161 | 165 | ||
162 | static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *tm) | 166 | static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *tm) |
163 | { | 167 | { |
168 | ds1302_writebyte(RTC_ADDR_CTRL, RTC_CMD_WRITE_ENABLE); | ||
164 | /* Stop RTC */ | 169 | /* Stop RTC */ |
165 | ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) | 0x80); | 170 | ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) | 0x80); |
166 | 171 | ||
@@ -175,6 +180,8 @@ static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
175 | /* Start RTC */ | 180 | /* Start RTC */ |
176 | ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) & ~0x80); | 181 | ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) & ~0x80); |
177 | 182 | ||
183 | ds1302_writebyte(RTC_ADDR_CTRL, RTC_CMD_WRITE_DISABLE); | ||
184 | |||
178 | return 0; | 185 | return 0; |
179 | } | 186 | } |
180 | 187 | ||
@@ -234,19 +241,11 @@ static int __init ds1302_rtc_probe(struct platform_device *pdev) | |||
234 | return 0; | 241 | return 0; |
235 | } | 242 | } |
236 | 243 | ||
237 | static int __exit ds1302_rtc_remove(struct platform_device *pdev) | ||
238 | { | ||
239 | platform_set_drvdata(pdev, NULL); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static struct platform_driver ds1302_platform_driver = { | 244 | static struct platform_driver ds1302_platform_driver = { |
245 | .driver = { | 245 | .driver = { |
246 | .name = DRV_NAME, | 246 | .name = DRV_NAME, |
247 | .owner = THIS_MODULE, | 247 | .owner = THIS_MODULE, |
248 | }, | 248 | }, |
249 | .remove = __exit_p(ds1302_rtc_remove), | ||
250 | }; | 249 | }; |
251 | 250 | ||
252 | module_platform_driver_probe(ds1302_platform_driver, ds1302_rtc_probe); | 251 | module_platform_driver_probe(ds1302_platform_driver, ds1302_rtc_probe); |
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index bb5f13f63630..dd6170acde95 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c | |||
@@ -158,7 +158,7 @@ static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
158 | goto done; | 158 | goto done; |
159 | buf[1] &= ~DS1305_AEI0; | 159 | buf[1] &= ~DS1305_AEI0; |
160 | } | 160 | } |
161 | err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0); | 161 | err = spi_write_then_read(ds1305->spi, buf, sizeof(buf), NULL, 0); |
162 | if (err >= 0) | 162 | if (err >= 0) |
163 | ds1305->ctrl[0] = buf[1]; | 163 | ds1305->ctrl[0] = buf[1]; |
164 | done: | 164 | done: |
@@ -181,8 +181,8 @@ static int ds1305_get_time(struct device *dev, struct rtc_time *time) | |||
181 | /* Use write-then-read to get all the date/time registers | 181 | /* Use write-then-read to get all the date/time registers |
182 | * since dma from stack is nonportable | 182 | * since dma from stack is nonportable |
183 | */ | 183 | */ |
184 | status = spi_write_then_read(ds1305->spi, &addr, sizeof addr, | 184 | status = spi_write_then_read(ds1305->spi, &addr, sizeof(addr), |
185 | buf, sizeof buf); | 185 | buf, sizeof(buf)); |
186 | if (status < 0) | 186 | if (status < 0) |
187 | return status; | 187 | return status; |
188 | 188 | ||
@@ -237,7 +237,7 @@ static int ds1305_set_time(struct device *dev, struct rtc_time *time) | |||
237 | buf[4], buf[5], buf[6], buf[7]); | 237 | buf[4], buf[5], buf[6], buf[7]); |
238 | 238 | ||
239 | /* use write-then-read since dma from stack is nonportable */ | 239 | /* use write-then-read since dma from stack is nonportable */ |
240 | return spi_write_then_read(ds1305->spi, buf, sizeof buf, | 240 | return spi_write_then_read(ds1305->spi, buf, sizeof(buf), |
241 | NULL, 0); | 241 | NULL, 0); |
242 | } | 242 | } |
243 | 243 | ||
@@ -286,8 +286,8 @@ static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
286 | * of EFI status is at best fragile anyway (given IRQ handlers). | 286 | * of EFI status is at best fragile anyway (given IRQ handlers). |
287 | */ | 287 | */ |
288 | addr = DS1305_CONTROL; | 288 | addr = DS1305_CONTROL; |
289 | status = spi_write_then_read(spi, &addr, sizeof addr, | 289 | status = spi_write_then_read(spi, &addr, sizeof(addr), |
290 | ds1305->ctrl, sizeof ds1305->ctrl); | 290 | ds1305->ctrl, sizeof(ds1305->ctrl)); |
291 | if (status < 0) | 291 | if (status < 0) |
292 | return status; | 292 | return status; |
293 | 293 | ||
@@ -296,8 +296,8 @@ static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
296 | 296 | ||
297 | /* get and check ALM0 registers */ | 297 | /* get and check ALM0 registers */ |
298 | addr = DS1305_ALM0(DS1305_SEC); | 298 | addr = DS1305_ALM0(DS1305_SEC); |
299 | status = spi_write_then_read(spi, &addr, sizeof addr, | 299 | status = spi_write_then_read(spi, &addr, sizeof(addr), |
300 | buf, sizeof buf); | 300 | buf, sizeof(buf)); |
301 | if (status < 0) | 301 | if (status < 0) |
302 | return status; | 302 | return status; |
303 | 303 | ||
@@ -381,7 +381,7 @@ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
381 | "alm0 write", buf[1 + DS1305_SEC], buf[1 + DS1305_MIN], | 381 | "alm0 write", buf[1 + DS1305_SEC], buf[1 + DS1305_MIN], |
382 | buf[1 + DS1305_HOUR], buf[1 + DS1305_WDAY]); | 382 | buf[1 + DS1305_HOUR], buf[1 + DS1305_WDAY]); |
383 | 383 | ||
384 | status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); | 384 | status = spi_write_then_read(spi, buf, sizeof(buf), NULL, 0); |
385 | if (status < 0) | 385 | if (status < 0) |
386 | return status; | 386 | return status; |
387 | 387 | ||
@@ -474,7 +474,7 @@ static void ds1305_work(struct work_struct *work) | |||
474 | buf[1] = ds1305->ctrl[0]; | 474 | buf[1] = ds1305->ctrl[0]; |
475 | buf[2] = 0; | 475 | buf[2] = 0; |
476 | 476 | ||
477 | status = spi_write_then_read(spi, buf, sizeof buf, | 477 | status = spi_write_then_read(spi, buf, sizeof(buf), |
478 | NULL, 0); | 478 | NULL, 0); |
479 | if (status < 0) | 479 | if (status < 0) |
480 | dev_dbg(&spi->dev, "clear irq --> %d\n", status); | 480 | dev_dbg(&spi->dev, "clear irq --> %d\n", status); |
@@ -627,8 +627,8 @@ static int ds1305_probe(struct spi_device *spi) | |||
627 | 627 | ||
628 | /* read and cache control registers */ | 628 | /* read and cache control registers */ |
629 | addr = DS1305_CONTROL; | 629 | addr = DS1305_CONTROL; |
630 | status = spi_write_then_read(spi, &addr, sizeof addr, | 630 | status = spi_write_then_read(spi, &addr, sizeof(addr), |
631 | ds1305->ctrl, sizeof ds1305->ctrl); | 631 | ds1305->ctrl, sizeof(ds1305->ctrl)); |
632 | if (status < 0) { | 632 | if (status < 0) { |
633 | dev_dbg(&spi->dev, "can't %s, %d\n", | 633 | dev_dbg(&spi->dev, "can't %s, %d\n", |
634 | "read", status); | 634 | "read", status); |
@@ -659,7 +659,7 @@ static int ds1305_probe(struct spi_device *spi) | |||
659 | 659 | ||
660 | buf[0] = DS1305_WRITE | DS1305_CONTROL; | 660 | buf[0] = DS1305_WRITE | DS1305_CONTROL; |
661 | buf[1] = ds1305->ctrl[0]; | 661 | buf[1] = ds1305->ctrl[0]; |
662 | status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); | 662 | status = spi_write_then_read(spi, buf, sizeof(buf), NULL, 0); |
663 | 663 | ||
664 | dev_dbg(&spi->dev, "clear WP --> %d\n", status); | 664 | dev_dbg(&spi->dev, "clear WP --> %d\n", status); |
665 | if (status < 0) | 665 | if (status < 0) |
@@ -713,7 +713,7 @@ static int ds1305_probe(struct spi_device *spi) | |||
713 | buf[1] = ds1305->ctrl[0]; | 713 | buf[1] = ds1305->ctrl[0]; |
714 | buf[2] = ds1305->ctrl[1]; | 714 | buf[2] = ds1305->ctrl[1]; |
715 | buf[3] = ds1305->ctrl[2]; | 715 | buf[3] = ds1305->ctrl[2]; |
716 | status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0); | 716 | status = spi_write_then_read(spi, buf, sizeof(buf), NULL, 0); |
717 | if (status < 0) { | 717 | if (status < 0) { |
718 | dev_dbg(&spi->dev, "can't %s, %d\n", | 718 | dev_dbg(&spi->dev, "can't %s, %d\n", |
719 | "write", status); | 719 | "write", status); |
@@ -725,8 +725,8 @@ static int ds1305_probe(struct spi_device *spi) | |||
725 | 725 | ||
726 | /* see if non-Linux software set up AM/PM mode */ | 726 | /* see if non-Linux software set up AM/PM mode */ |
727 | addr = DS1305_HOUR; | 727 | addr = DS1305_HOUR; |
728 | status = spi_write_then_read(spi, &addr, sizeof addr, | 728 | status = spi_write_then_read(spi, &addr, sizeof(addr), |
729 | &value, sizeof value); | 729 | &value, sizeof(value)); |
730 | if (status < 0) { | 730 | if (status < 0) { |
731 | dev_dbg(&spi->dev, "read HOUR --> %d\n", status); | 731 | dev_dbg(&spi->dev, "read HOUR --> %d\n", status); |
732 | return status; | 732 | return status; |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index b53992ab3090..ca18fd1433b3 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
@@ -683,7 +683,7 @@ static int ds1307_probe(struct i2c_client *client, | |||
683 | && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) | 683 | && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) |
684 | return -EIO; | 684 | return -EIO; |
685 | 685 | ||
686 | ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL); | 686 | ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL); |
687 | if (!ds1307) | 687 | if (!ds1307) |
688 | return -ENOMEM; | 688 | return -ENOMEM; |
689 | 689 | ||
@@ -715,7 +715,7 @@ static int ds1307_probe(struct i2c_client *client, | |||
715 | if (tmp != 2) { | 715 | if (tmp != 2) { |
716 | dev_dbg(&client->dev, "read error %d\n", tmp); | 716 | dev_dbg(&client->dev, "read error %d\n", tmp); |
717 | err = -EIO; | 717 | err = -EIO; |
718 | goto exit_free; | 718 | goto exit; |
719 | } | 719 | } |
720 | 720 | ||
721 | /* oscillator off? turn it on, so clock can tick. */ | 721 | /* oscillator off? turn it on, so clock can tick. */ |
@@ -754,7 +754,7 @@ static int ds1307_probe(struct i2c_client *client, | |||
754 | if (tmp != 2) { | 754 | if (tmp != 2) { |
755 | dev_dbg(&client->dev, "read error %d\n", tmp); | 755 | dev_dbg(&client->dev, "read error %d\n", tmp); |
756 | err = -EIO; | 756 | err = -EIO; |
757 | goto exit_free; | 757 | goto exit; |
758 | } | 758 | } |
759 | 759 | ||
760 | /* oscillator off? turn it on, so clock can tick. */ | 760 | /* oscillator off? turn it on, so clock can tick. */ |
@@ -798,7 +798,7 @@ static int ds1307_probe(struct i2c_client *client, | |||
798 | if (tmp != 2) { | 798 | if (tmp != 2) { |
799 | dev_dbg(&client->dev, "read error %d\n", tmp); | 799 | dev_dbg(&client->dev, "read error %d\n", tmp); |
800 | err = -EIO; | 800 | err = -EIO; |
801 | goto exit_free; | 801 | goto exit; |
802 | } | 802 | } |
803 | 803 | ||
804 | /* correct hour */ | 804 | /* correct hour */ |
@@ -826,7 +826,7 @@ read_rtc: | |||
826 | if (tmp != 8) { | 826 | if (tmp != 8) { |
827 | dev_dbg(&client->dev, "read error %d\n", tmp); | 827 | dev_dbg(&client->dev, "read error %d\n", tmp); |
828 | err = -EIO; | 828 | err = -EIO; |
829 | goto exit_free; | 829 | goto exit; |
830 | } | 830 | } |
831 | 831 | ||
832 | /* | 832 | /* |
@@ -868,7 +868,7 @@ read_rtc: | |||
868 | if (tmp < 0) { | 868 | if (tmp < 0) { |
869 | dev_dbg(&client->dev, "read error %d\n", tmp); | 869 | dev_dbg(&client->dev, "read error %d\n", tmp); |
870 | err = -EIO; | 870 | err = -EIO; |
871 | goto exit_free; | 871 | goto exit; |
872 | } | 872 | } |
873 | 873 | ||
874 | /* oscillator fault? clear flag, and warn */ | 874 | /* oscillator fault? clear flag, and warn */ |
@@ -927,13 +927,13 @@ read_rtc: | |||
927 | bin2bcd(tmp)); | 927 | bin2bcd(tmp)); |
928 | } | 928 | } |
929 | 929 | ||
930 | ds1307->rtc = rtc_device_register(client->name, &client->dev, | 930 | ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, |
931 | &ds13xx_rtc_ops, THIS_MODULE); | 931 | &ds13xx_rtc_ops, THIS_MODULE); |
932 | if (IS_ERR(ds1307->rtc)) { | 932 | if (IS_ERR(ds1307->rtc)) { |
933 | err = PTR_ERR(ds1307->rtc); | 933 | err = PTR_ERR(ds1307->rtc); |
934 | dev_err(&client->dev, | 934 | dev_err(&client->dev, |
935 | "unable to register the class device\n"); | 935 | "unable to register the class device\n"); |
936 | goto exit_free; | 936 | goto exit; |
937 | } | 937 | } |
938 | 938 | ||
939 | if (want_irq) { | 939 | if (want_irq) { |
@@ -942,7 +942,7 @@ read_rtc: | |||
942 | if (err) { | 942 | if (err) { |
943 | dev_err(&client->dev, | 943 | dev_err(&client->dev, |
944 | "unable to request IRQ!\n"); | 944 | "unable to request IRQ!\n"); |
945 | goto exit_irq; | 945 | goto exit; |
946 | } | 946 | } |
947 | 947 | ||
948 | device_set_wakeup_capable(&client->dev, 1); | 948 | device_set_wakeup_capable(&client->dev, 1); |
@@ -951,11 +951,12 @@ read_rtc: | |||
951 | } | 951 | } |
952 | 952 | ||
953 | if (chip->nvram_size) { | 953 | if (chip->nvram_size) { |
954 | ds1307->nvram = kzalloc(sizeof(struct bin_attribute), | 954 | ds1307->nvram = devm_kzalloc(&client->dev, |
955 | GFP_KERNEL); | 955 | sizeof(struct bin_attribute), |
956 | GFP_KERNEL); | ||
956 | if (!ds1307->nvram) { | 957 | if (!ds1307->nvram) { |
957 | err = -ENOMEM; | 958 | err = -ENOMEM; |
958 | goto exit_nvram; | 959 | goto exit; |
959 | } | 960 | } |
960 | ds1307->nvram->attr.name = "nvram"; | 961 | ds1307->nvram->attr.name = "nvram"; |
961 | ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; | 962 | ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; |
@@ -965,21 +966,15 @@ read_rtc: | |||
965 | ds1307->nvram->size = chip->nvram_size; | 966 | ds1307->nvram->size = chip->nvram_size; |
966 | ds1307->nvram_offset = chip->nvram_offset; | 967 | ds1307->nvram_offset = chip->nvram_offset; |
967 | err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram); | 968 | err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram); |
968 | if (err) { | 969 | if (err) |
969 | kfree(ds1307->nvram); | 970 | goto exit; |
970 | goto exit_nvram; | ||
971 | } | ||
972 | set_bit(HAS_NVRAM, &ds1307->flags); | 971 | set_bit(HAS_NVRAM, &ds1307->flags); |
973 | dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size); | 972 | dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size); |
974 | } | 973 | } |
975 | 974 | ||
976 | return 0; | 975 | return 0; |
977 | 976 | ||
978 | exit_nvram: | 977 | exit: |
979 | exit_irq: | ||
980 | rtc_device_unregister(ds1307->rtc); | ||
981 | exit_free: | ||
982 | kfree(ds1307); | ||
983 | return err; | 978 | return err; |
984 | } | 979 | } |
985 | 980 | ||
@@ -992,13 +987,9 @@ static int ds1307_remove(struct i2c_client *client) | |||
992 | cancel_work_sync(&ds1307->work); | 987 | cancel_work_sync(&ds1307->work); |
993 | } | 988 | } |
994 | 989 | ||
995 | if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) { | 990 | if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) |
996 | sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); | 991 | sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); |
997 | kfree(ds1307->nvram); | ||
998 | } | ||
999 | 992 | ||
1000 | rtc_device_unregister(ds1307->rtc); | ||
1001 | kfree(ds1307); | ||
1002 | return 0; | 993 | return 0; |
1003 | } | 994 | } |
1004 | 995 | ||
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 94366e12f40f..9e6e14fb53d7 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c | |||
@@ -65,7 +65,7 @@ struct ds1374 { | |||
65 | static struct i2c_driver ds1374_driver; | 65 | static struct i2c_driver ds1374_driver; |
66 | 66 | ||
67 | static int ds1374_read_rtc(struct i2c_client *client, u32 *time, | 67 | static int ds1374_read_rtc(struct i2c_client *client, u32 *time, |
68 | int reg, int nbytes) | 68 | int reg, int nbytes) |
69 | { | 69 | { |
70 | u8 buf[4]; | 70 | u8 buf[4]; |
71 | int ret; | 71 | int ret; |
@@ -90,7 +90,7 @@ static int ds1374_read_rtc(struct i2c_client *client, u32 *time, | |||
90 | } | 90 | } |
91 | 91 | ||
92 | static int ds1374_write_rtc(struct i2c_client *client, u32 time, | 92 | static int ds1374_write_rtc(struct i2c_client *client, u32 time, |
93 | int reg, int nbytes) | 93 | int reg, int nbytes) |
94 | { | 94 | { |
95 | u8 buf[4]; | 95 | u8 buf[4]; |
96 | int i; | 96 | int i; |
@@ -119,8 +119,7 @@ static int ds1374_check_rtc_status(struct i2c_client *client) | |||
119 | 119 | ||
120 | if (stat & DS1374_REG_SR_OSF) | 120 | if (stat & DS1374_REG_SR_OSF) |
121 | dev_warn(&client->dev, | 121 | dev_warn(&client->dev, |
122 | "oscillator discontinuity flagged, " | 122 | "oscillator discontinuity flagged, time unreliable\n"); |
123 | "time unreliable\n"); | ||
124 | 123 | ||
125 | stat &= ~(DS1374_REG_SR_OSF | DS1374_REG_SR_AF); | 124 | stat &= ~(DS1374_REG_SR_OSF | DS1374_REG_SR_AF); |
126 | 125 | ||
@@ -363,7 +362,7 @@ static int ds1374_probe(struct i2c_client *client, | |||
363 | 362 | ||
364 | if (client->irq > 0) { | 363 | if (client->irq > 0) { |
365 | ret = devm_request_irq(&client->dev, client->irq, ds1374_irq, 0, | 364 | ret = devm_request_irq(&client->dev, client->irq, ds1374_irq, 0, |
366 | "ds1374", client); | 365 | "ds1374", client); |
367 | if (ret) { | 366 | if (ret) { |
368 | dev_err(&client->dev, "unable to request IRQ\n"); | 367 | dev_err(&client->dev, "unable to request IRQ\n"); |
369 | return ret; | 368 | return ret; |
@@ -373,7 +372,7 @@ static int ds1374_probe(struct i2c_client *client, | |||
373 | } | 372 | } |
374 | 373 | ||
375 | ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, | 374 | ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, |
376 | &ds1374_rtc_ops, THIS_MODULE); | 375 | &ds1374_rtc_ops, THIS_MODULE); |
377 | if (IS_ERR(ds1374->rtc)) { | 376 | if (IS_ERR(ds1374->rtc)) { |
378 | dev_err(&client->dev, "unable to register the class device\n"); | 377 | dev_err(&client->dev, "unable to register the class device\n"); |
379 | return PTR_ERR(ds1374->rtc); | 378 | return PTR_ERR(ds1374->rtc); |
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c index 289af419dff4..be9d8c0a7e3a 100644 --- a/drivers/rtc/rtc-ds1390.c +++ b/drivers/rtc/rtc-ds1390.c | |||
@@ -154,18 +154,12 @@ static int ds1390_probe(struct spi_device *spi) | |||
154 | return res; | 154 | return res; |
155 | } | 155 | } |
156 | 156 | ||
157 | static int ds1390_remove(struct spi_device *spi) | ||
158 | { | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static struct spi_driver ds1390_driver = { | 157 | static struct spi_driver ds1390_driver = { |
163 | .driver = { | 158 | .driver = { |
164 | .name = "rtc-ds1390", | 159 | .name = "rtc-ds1390", |
165 | .owner = THIS_MODULE, | 160 | .owner = THIS_MODULE, |
166 | }, | 161 | }, |
167 | .probe = ds1390_probe, | 162 | .probe = ds1390_probe, |
168 | .remove = ds1390_remove, | ||
169 | }; | 163 | }; |
170 | 164 | ||
171 | module_spi_driver(ds1390_driver); | 165 | module_spi_driver(ds1390_driver); |
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c index 6ce8a997cf51..308a8fefe76f 100644 --- a/drivers/rtc/rtc-ds1511.c +++ b/drivers/rtc/rtc-ds1511.c | |||
@@ -104,31 +104,31 @@ static DEFINE_SPINLOCK(ds1511_lock); | |||
104 | static __iomem char *ds1511_base; | 104 | static __iomem char *ds1511_base; |
105 | static u32 reg_spacing = 1; | 105 | static u32 reg_spacing = 1; |
106 | 106 | ||
107 | static noinline void | 107 | static noinline void |
108 | rtc_write(uint8_t val, uint32_t reg) | 108 | rtc_write(uint8_t val, uint32_t reg) |
109 | { | 109 | { |
110 | writeb(val, ds1511_base + (reg * reg_spacing)); | 110 | writeb(val, ds1511_base + (reg * reg_spacing)); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void | 113 | static inline void |
114 | rtc_write_alarm(uint8_t val, enum ds1511reg reg) | 114 | rtc_write_alarm(uint8_t val, enum ds1511reg reg) |
115 | { | 115 | { |
116 | rtc_write((val | 0x80), reg); | 116 | rtc_write((val | 0x80), reg); |
117 | } | 117 | } |
118 | 118 | ||
119 | static noinline uint8_t | 119 | static noinline uint8_t |
120 | rtc_read(enum ds1511reg reg) | 120 | rtc_read(enum ds1511reg reg) |
121 | { | 121 | { |
122 | return readb(ds1511_base + (reg * reg_spacing)); | 122 | return readb(ds1511_base + (reg * reg_spacing)); |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline void | 125 | static inline void |
126 | rtc_disable_update(void) | 126 | rtc_disable_update(void) |
127 | { | 127 | { |
128 | rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD); | 128 | rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void | 131 | static void |
132 | rtc_enable_update(void) | 132 | rtc_enable_update(void) |
133 | { | 133 | { |
134 | rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD); | 134 | rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD); |
@@ -145,7 +145,7 @@ rtc_enable_update(void) | |||
145 | * just enough code to set the watchdog timer so that it | 145 | * just enough code to set the watchdog timer so that it |
146 | * will reboot the system | 146 | * will reboot the system |
147 | */ | 147 | */ |
148 | void | 148 | void |
149 | ds1511_wdog_set(unsigned long deciseconds) | 149 | ds1511_wdog_set(unsigned long deciseconds) |
150 | { | 150 | { |
151 | /* | 151 | /* |
@@ -163,7 +163,7 @@ ds1511_wdog_set(unsigned long deciseconds) | |||
163 | rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD); | 163 | rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD); |
164 | } | 164 | } |
165 | 165 | ||
166 | void | 166 | void |
167 | ds1511_wdog_disable(void) | 167 | ds1511_wdog_disable(void) |
168 | { | 168 | { |
169 | /* | 169 | /* |
@@ -191,13 +191,12 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) | |||
191 | /* | 191 | /* |
192 | * won't have to change this for a while | 192 | * won't have to change this for a while |
193 | */ | 193 | */ |
194 | if (rtc_tm->tm_year < 1900) { | 194 | if (rtc_tm->tm_year < 1900) |
195 | rtc_tm->tm_year += 1900; | 195 | rtc_tm->tm_year += 1900; |
196 | } | ||
197 | 196 | ||
198 | if (rtc_tm->tm_year < 1970) { | 197 | if (rtc_tm->tm_year < 1970) |
199 | return -EINVAL; | 198 | return -EINVAL; |
200 | } | 199 | |
201 | yrs = rtc_tm->tm_year % 100; | 200 | yrs = rtc_tm->tm_year % 100; |
202 | cen = rtc_tm->tm_year / 100; | 201 | cen = rtc_tm->tm_year / 100; |
203 | mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */ | 202 | mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */ |
@@ -207,17 +206,14 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) | |||
207 | min = rtc_tm->tm_min; | 206 | min = rtc_tm->tm_min; |
208 | sec = rtc_tm->tm_sec; | 207 | sec = rtc_tm->tm_sec; |
209 | 208 | ||
210 | if ((mon > 12) || (day == 0)) { | 209 | if ((mon > 12) || (day == 0)) |
211 | return -EINVAL; | 210 | return -EINVAL; |
212 | } | ||
213 | 211 | ||
214 | if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year)) { | 212 | if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year)) |
215 | return -EINVAL; | 213 | return -EINVAL; |
216 | } | ||
217 | 214 | ||
218 | if ((hrs >= 24) || (min >= 60) || (sec >= 60)) { | 215 | if ((hrs >= 24) || (min >= 60) || (sec >= 60)) |
219 | return -EINVAL; | 216 | return -EINVAL; |
220 | } | ||
221 | 217 | ||
222 | /* | 218 | /* |
223 | * each register is a different number of valid bits | 219 | * each register is a different number of valid bits |
@@ -299,7 +295,7 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm) | |||
299 | * date/hours/mins/secs matches. the ds1511 has many more | 295 | * date/hours/mins/secs matches. the ds1511 has many more |
300 | * permutations, but the kernel doesn't. | 296 | * permutations, but the kernel doesn't. |
301 | */ | 297 | */ |
302 | static void | 298 | static void |
303 | ds1511_rtc_update_alarm(struct rtc_plat_data *pdata) | 299 | ds1511_rtc_update_alarm(struct rtc_plat_data *pdata) |
304 | { | 300 | { |
305 | unsigned long flags; | 301 | unsigned long flags; |
@@ -322,7 +318,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata) | |||
322 | spin_unlock_irqrestore(&pdata->lock, flags); | 318 | spin_unlock_irqrestore(&pdata->lock, flags); |
323 | } | 319 | } |
324 | 320 | ||
325 | static int | 321 | static int |
326 | ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | 322 | ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) |
327 | { | 323 | { |
328 | struct platform_device *pdev = to_platform_device(dev); | 324 | struct platform_device *pdev = to_platform_device(dev); |
@@ -335,14 +331,14 @@ ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
335 | pdata->alrm_hour = alrm->time.tm_hour; | 331 | pdata->alrm_hour = alrm->time.tm_hour; |
336 | pdata->alrm_min = alrm->time.tm_min; | 332 | pdata->alrm_min = alrm->time.tm_min; |
337 | pdata->alrm_sec = alrm->time.tm_sec; | 333 | pdata->alrm_sec = alrm->time.tm_sec; |
338 | if (alrm->enabled) { | 334 | if (alrm->enabled) |
339 | pdata->irqen |= RTC_AF; | 335 | pdata->irqen |= RTC_AF; |
340 | } | 336 | |
341 | ds1511_rtc_update_alarm(pdata); | 337 | ds1511_rtc_update_alarm(pdata); |
342 | return 0; | 338 | return 0; |
343 | } | 339 | } |
344 | 340 | ||
345 | static int | 341 | static int |
346 | ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | 342 | ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) |
347 | { | 343 | { |
348 | struct platform_device *pdev = to_platform_device(dev); | 344 | struct platform_device *pdev = to_platform_device(dev); |
@@ -359,7 +355,7 @@ ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
359 | return 0; | 355 | return 0; |
360 | } | 356 | } |
361 | 357 | ||
362 | static irqreturn_t | 358 | static irqreturn_t |
363 | ds1511_interrupt(int irq, void *dev_id) | 359 | ds1511_interrupt(int irq, void *dev_id) |
364 | { | 360 | { |
365 | struct platform_device *pdev = dev_id; | 361 | struct platform_device *pdev = dev_id; |
@@ -406,7 +402,7 @@ static const struct rtc_class_ops ds1511_rtc_ops = { | |||
406 | .alarm_irq_enable = ds1511_rtc_alarm_irq_enable, | 402 | .alarm_irq_enable = ds1511_rtc_alarm_irq_enable, |
407 | }; | 403 | }; |
408 | 404 | ||
409 | static ssize_t | 405 | static ssize_t |
410 | ds1511_nvram_read(struct file *filp, struct kobject *kobj, | 406 | ds1511_nvram_read(struct file *filp, struct kobject *kobj, |
411 | struct bin_attribute *ba, | 407 | struct bin_attribute *ba, |
412 | char *buf, loff_t pos, size_t size) | 408 | char *buf, loff_t pos, size_t size) |
@@ -417,26 +413,26 @@ ds1511_nvram_read(struct file *filp, struct kobject *kobj, | |||
417 | * if count is more than one, turn on "burst" mode | 413 | * if count is more than one, turn on "burst" mode |
418 | * turn it off when you're done | 414 | * turn it off when you're done |
419 | */ | 415 | */ |
420 | if (size > 1) { | 416 | if (size > 1) |
421 | rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD); | 417 | rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD); |
422 | } | 418 | |
423 | if (pos > DS1511_RAM_MAX) { | 419 | if (pos > DS1511_RAM_MAX) |
424 | pos = DS1511_RAM_MAX; | 420 | pos = DS1511_RAM_MAX; |
425 | } | 421 | |
426 | if (size + pos > DS1511_RAM_MAX + 1) { | 422 | if (size + pos > DS1511_RAM_MAX + 1) |
427 | size = DS1511_RAM_MAX - pos + 1; | 423 | size = DS1511_RAM_MAX - pos + 1; |
428 | } | 424 | |
429 | rtc_write(pos, DS1511_RAMADDR_LSB); | 425 | rtc_write(pos, DS1511_RAMADDR_LSB); |
430 | for (count = 0; size > 0; count++, size--) { | 426 | for (count = 0; size > 0; count++, size--) |
431 | *buf++ = rtc_read(DS1511_RAMDATA); | 427 | *buf++ = rtc_read(DS1511_RAMDATA); |
432 | } | 428 | |
433 | if (count > 1) { | 429 | if (count > 1) |
434 | rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD); | 430 | rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD); |
435 | } | 431 | |
436 | return count; | 432 | return count; |
437 | } | 433 | } |
438 | 434 | ||
439 | static ssize_t | 435 | static ssize_t |
440 | ds1511_nvram_write(struct file *filp, struct kobject *kobj, | 436 | ds1511_nvram_write(struct file *filp, struct kobject *kobj, |
441 | struct bin_attribute *bin_attr, | 437 | struct bin_attribute *bin_attr, |
442 | char *buf, loff_t pos, size_t size) | 438 | char *buf, loff_t pos, size_t size) |
@@ -447,22 +443,22 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj, | |||
447 | * if count is more than one, turn on "burst" mode | 443 | * if count is more than one, turn on "burst" mode |
448 | * turn it off when you're done | 444 | * turn it off when you're done |
449 | */ | 445 | */ |
450 | if (size > 1) { | 446 | if (size > 1) |
451 | rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD); | 447 | rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD); |
452 | } | 448 | |
453 | if (pos > DS1511_RAM_MAX) { | 449 | if (pos > DS1511_RAM_MAX) |
454 | pos = DS1511_RAM_MAX; | 450 | pos = DS1511_RAM_MAX; |
455 | } | 451 | |
456 | if (size + pos > DS1511_RAM_MAX + 1) { | 452 | if (size + pos > DS1511_RAM_MAX + 1) |
457 | size = DS1511_RAM_MAX - pos + 1; | 453 | size = DS1511_RAM_MAX - pos + 1; |
458 | } | 454 | |
459 | rtc_write(pos, DS1511_RAMADDR_LSB); | 455 | rtc_write(pos, DS1511_RAMADDR_LSB); |
460 | for (count = 0; size > 0; count++, size--) { | 456 | for (count = 0; size > 0; count++, size--) |
461 | rtc_write(*buf++, DS1511_RAMDATA); | 457 | rtc_write(*buf++, DS1511_RAMDATA); |
462 | } | 458 | |
463 | if (count > 1) { | 459 | if (count > 1) |
464 | rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD); | 460 | rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD); |
465 | } | 461 | |
466 | return count; | 462 | return count; |
467 | } | 463 | } |
468 | 464 | ||
@@ -484,9 +480,9 @@ static int ds1511_rtc_probe(struct platform_device *pdev) | |||
484 | int ret = 0; | 480 | int ret = 0; |
485 | 481 | ||
486 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 482 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
487 | if (!res) { | 483 | if (!res) |
488 | return -ENODEV; | 484 | return -ENODEV; |
489 | } | 485 | |
490 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 486 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
491 | if (!pdata) | 487 | if (!pdata) |
492 | return -ENOMEM; | 488 | return -ENOMEM; |
@@ -518,9 +514,8 @@ static int ds1511_rtc_probe(struct platform_device *pdev) | |||
518 | /* | 514 | /* |
519 | * check for a dying bat-tree | 515 | * check for a dying bat-tree |
520 | */ | 516 | */ |
521 | if (rtc_read(RTC_CMD1) & DS1511_BLF1) { | 517 | if (rtc_read(RTC_CMD1) & DS1511_BLF1) |
522 | dev_warn(&pdev->dev, "voltage-low detected.\n"); | 518 | dev_warn(&pdev->dev, "voltage-low detected.\n"); |
523 | } | ||
524 | 519 | ||
525 | spin_lock_init(&pdata->lock); | 520 | spin_lock_init(&pdata->lock); |
526 | platform_set_drvdata(pdev, pdata); | 521 | platform_set_drvdata(pdev, pdata); |
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index 3fc2a4738027..18e2d8471472 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c | |||
@@ -153,11 +153,6 @@ static const struct rtc_class_ops ds1672_rtc_ops = { | |||
153 | .set_mmss = ds1672_rtc_set_mmss, | 153 | .set_mmss = ds1672_rtc_set_mmss, |
154 | }; | 154 | }; |
155 | 155 | ||
156 | static int ds1672_remove(struct i2c_client *client) | ||
157 | { | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int ds1672_probe(struct i2c_client *client, | 156 | static int ds1672_probe(struct i2c_client *client, |
162 | const struct i2c_device_id *id) | 157 | const struct i2c_device_id *id) |
163 | { | 158 | { |
@@ -210,7 +205,6 @@ static struct i2c_driver ds1672_driver = { | |||
210 | .name = "rtc-ds1672", | 205 | .name = "rtc-ds1672", |
211 | }, | 206 | }, |
212 | .probe = &ds1672_probe, | 207 | .probe = &ds1672_probe, |
213 | .remove = &ds1672_remove, | ||
214 | .id_table = ds1672_id, | 208 | .id_table = ds1672_id, |
215 | }; | 209 | }; |
216 | 210 | ||
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c index ba98c0e9580d..4c9ba5368464 100644 --- a/drivers/rtc/rtc-ds3234.c +++ b/drivers/rtc/rtc-ds3234.c | |||
@@ -73,7 +73,7 @@ static int ds3234_read_time(struct device *dev, struct rtc_time *dt) | |||
73 | dt->tm_wday = bcd2bin(buf[3]) - 1; /* 0 = Sun */ | 73 | dt->tm_wday = bcd2bin(buf[3]) - 1; /* 0 = Sun */ |
74 | dt->tm_mday = bcd2bin(buf[4]); | 74 | dt->tm_mday = bcd2bin(buf[4]); |
75 | dt->tm_mon = bcd2bin(buf[5] & 0x1f) - 1; /* 0 = Jan */ | 75 | dt->tm_mon = bcd2bin(buf[5] & 0x1f) - 1; /* 0 = Jan */ |
76 | dt->tm_year = bcd2bin(buf[6] & 0xff) + 100; /* Assume 20YY */ | 76 | dt->tm_year = bcd2bin(buf[6] & 0xff) + 100; /* Assume 20YY */ |
77 | 77 | ||
78 | return rtc_valid_tm(dt); | 78 | return rtc_valid_tm(dt); |
79 | } | 79 | } |
@@ -156,18 +156,12 @@ static int ds3234_probe(struct spi_device *spi) | |||
156 | return 0; | 156 | return 0; |
157 | } | 157 | } |
158 | 158 | ||
159 | static int ds3234_remove(struct spi_device *spi) | ||
160 | { | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static struct spi_driver ds3234_driver = { | 159 | static struct spi_driver ds3234_driver = { |
165 | .driver = { | 160 | .driver = { |
166 | .name = "ds3234", | 161 | .name = "ds3234", |
167 | .owner = THIS_MODULE, | 162 | .owner = THIS_MODULE, |
168 | }, | 163 | }, |
169 | .probe = ds3234_probe, | 164 | .probe = ds3234_probe, |
170 | .remove = ds3234_remove, | ||
171 | }; | 165 | }; |
172 | 166 | ||
173 | module_spi_driver(ds3234_driver); | 167 | module_spi_driver(ds3234_driver); |
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index b3c8c0b1709d..797aa0252ba9 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c | |||
@@ -201,17 +201,11 @@ static int __init efi_rtc_probe(struct platform_device *dev) | |||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | static int __exit efi_rtc_remove(struct platform_device *dev) | ||
205 | { | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static struct platform_driver efi_rtc_driver = { | 204 | static struct platform_driver efi_rtc_driver = { |
210 | .driver = { | 205 | .driver = { |
211 | .name = "rtc-efi", | 206 | .name = "rtc-efi", |
212 | .owner = THIS_MODULE, | 207 | .owner = THIS_MODULE, |
213 | }, | 208 | }, |
214 | .remove = __exit_p(efi_rtc_remove), | ||
215 | }; | 209 | }; |
216 | 210 | ||
217 | module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); | 211 | module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); |
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c index 3f9eb57d0486..fccf36699245 100644 --- a/drivers/rtc/rtc-em3027.c +++ b/drivers/rtc/rtc-em3027.c | |||
@@ -131,11 +131,6 @@ static int em3027_probe(struct i2c_client *client, | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int em3027_remove(struct i2c_client *client) | ||
135 | { | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static struct i2c_device_id em3027_id[] = { | 134 | static struct i2c_device_id em3027_id[] = { |
140 | { "em3027", 0 }, | 135 | { "em3027", 0 }, |
141 | { } | 136 | { } |
@@ -146,7 +141,6 @@ static struct i2c_driver em3027_driver = { | |||
146 | .name = "rtc-em3027", | 141 | .name = "rtc-em3027", |
147 | }, | 142 | }, |
148 | .probe = &em3027_probe, | 143 | .probe = &em3027_probe, |
149 | .remove = &em3027_remove, | ||
150 | .id_table = em3027_id, | 144 | .id_table = em3027_id, |
151 | }; | 145 | }; |
152 | 146 | ||
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 5807b77c444a..549b3c3792d2 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c | |||
@@ -167,7 +167,6 @@ static int ep93xx_rtc_probe(struct platform_device *pdev) | |||
167 | return 0; | 167 | return 0; |
168 | 168 | ||
169 | exit: | 169 | exit: |
170 | platform_set_drvdata(pdev, NULL); | ||
171 | pdev->dev.platform_data = NULL; | 170 | pdev->dev.platform_data = NULL; |
172 | return err; | 171 | return err; |
173 | } | 172 | } |
@@ -175,7 +174,6 @@ exit: | |||
175 | static int ep93xx_rtc_remove(struct platform_device *pdev) | 174 | static int ep93xx_rtc_remove(struct platform_device *pdev) |
176 | { | 175 | { |
177 | sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); | 176 | sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); |
178 | platform_set_drvdata(pdev, NULL); | ||
179 | pdev->dev.platform_data = NULL; | 177 | pdev->dev.platform_data = NULL; |
180 | 178 | ||
181 | return 0; | 179 | return 0; |
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c index 2835fb6c1965..83c3b3029fa7 100644 --- a/drivers/rtc/rtc-fm3130.c +++ b/drivers/rtc/rtc-fm3130.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | struct fm3130 { | 48 | struct fm3130 { |
49 | u8 reg_addr_time; | 49 | u8 reg_addr_time; |
50 | u8 reg_addr_alarm; | 50 | u8 reg_addr_alarm; |
51 | u8 regs[15]; | 51 | u8 regs[15]; |
52 | struct i2c_msg msg[4]; | 52 | struct i2c_msg msg[4]; |
53 | struct i2c_client *client; | 53 | struct i2c_client *client; |
@@ -520,18 +520,12 @@ exit_free: | |||
520 | return err; | 520 | return err; |
521 | } | 521 | } |
522 | 522 | ||
523 | static int fm3130_remove(struct i2c_client *client) | ||
524 | { | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | static struct i2c_driver fm3130_driver = { | 523 | static struct i2c_driver fm3130_driver = { |
529 | .driver = { | 524 | .driver = { |
530 | .name = "rtc-fm3130", | 525 | .name = "rtc-fm3130", |
531 | .owner = THIS_MODULE, | 526 | .owner = THIS_MODULE, |
532 | }, | 527 | }, |
533 | .probe = fm3130_probe, | 528 | .probe = fm3130_probe, |
534 | .remove = fm3130_remove, | ||
535 | .id_table = fm3130_id, | 529 | .id_table = fm3130_id, |
536 | }; | 530 | }; |
537 | 531 | ||
diff --git a/drivers/rtc/rtc-generic.c b/drivers/rtc/rtc-generic.c index 06279ce6bff2..9b6725ebbfb2 100644 --- a/drivers/rtc/rtc-generic.c +++ b/drivers/rtc/rtc-generic.c | |||
@@ -48,17 +48,11 @@ static int __init generic_rtc_probe(struct platform_device *dev) | |||
48 | return 0; | 48 | return 0; |
49 | } | 49 | } |
50 | 50 | ||
51 | static int __exit generic_rtc_remove(struct platform_device *dev) | ||
52 | { | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static struct platform_driver generic_rtc_driver = { | 51 | static struct platform_driver generic_rtc_driver = { |
57 | .driver = { | 52 | .driver = { |
58 | .name = "rtc-generic", | 53 | .name = "rtc-generic", |
59 | .owner = THIS_MODULE, | 54 | .owner = THIS_MODULE, |
60 | }, | 55 | }, |
61 | .remove = __exit_p(generic_rtc_remove), | ||
62 | }; | 56 | }; |
63 | 57 | ||
64 | module_platform_driver_probe(generic_rtc_driver, generic_rtc_probe); | 58 | module_platform_driver_probe(generic_rtc_driver, generic_rtc_probe); |
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c index 63024505dddc..7273b0139e5c 100644 --- a/drivers/rtc/rtc-hid-sensor-time.c +++ b/drivers/rtc/rtc-hid-sensor-time.c | |||
@@ -76,6 +76,20 @@ static int hid_time_proc_event(struct hid_sensor_hub_device *hsdev, | |||
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | static u32 hid_time_value(size_t raw_len, char *raw_data) | ||
80 | { | ||
81 | switch (raw_len) { | ||
82 | case 1: | ||
83 | return *(u8 *)raw_data; | ||
84 | case 2: | ||
85 | return *(u16 *)raw_data; | ||
86 | case 4: | ||
87 | return *(u32 *)raw_data; | ||
88 | default: | ||
89 | return (u32)(~0U); /* 0xff... or -1 to denote an error */ | ||
90 | } | ||
91 | } | ||
92 | |||
79 | static int hid_time_capture_sample(struct hid_sensor_hub_device *hsdev, | 93 | static int hid_time_capture_sample(struct hid_sensor_hub_device *hsdev, |
80 | unsigned usage_id, size_t raw_len, | 94 | unsigned usage_id, size_t raw_len, |
81 | char *raw_data, void *priv) | 95 | char *raw_data, void *priv) |
@@ -85,26 +99,35 @@ static int hid_time_capture_sample(struct hid_sensor_hub_device *hsdev, | |||
85 | 99 | ||
86 | switch (usage_id) { | 100 | switch (usage_id) { |
87 | case HID_USAGE_SENSOR_TIME_YEAR: | 101 | case HID_USAGE_SENSOR_TIME_YEAR: |
88 | time_buf->tm_year = *(u8 *)raw_data; | 102 | /* |
89 | if (time_buf->tm_year < 70) | 103 | * The draft for HID-sensors (HUTRR39) currently doesn't define |
90 | /* assume we are in 1970...2069 */ | 104 | * the range for the year attribute. Therefor we support |
91 | time_buf->tm_year += 100; | 105 | * 8 bit (0-99) and 16 or 32 bits (full) as size for the year. |
106 | */ | ||
107 | if (raw_len == 1) { | ||
108 | time_buf->tm_year = *(u8 *)raw_data; | ||
109 | if (time_buf->tm_year < 70) | ||
110 | /* assume we are in 1970...2069 */ | ||
111 | time_buf->tm_year += 100; | ||
112 | } else | ||
113 | time_buf->tm_year = | ||
114 | (int)hid_time_value(raw_len, raw_data)-1900; | ||
92 | break; | 115 | break; |
93 | case HID_USAGE_SENSOR_TIME_MONTH: | 116 | case HID_USAGE_SENSOR_TIME_MONTH: |
94 | /* sensor sending the month as 1-12, we need 0-11 */ | 117 | /* sensors are sending the month as 1-12, we need 0-11 */ |
95 | time_buf->tm_mon = *(u8 *)raw_data-1; | 118 | time_buf->tm_mon = (int)hid_time_value(raw_len, raw_data)-1; |
96 | break; | 119 | break; |
97 | case HID_USAGE_SENSOR_TIME_DAY: | 120 | case HID_USAGE_SENSOR_TIME_DAY: |
98 | time_buf->tm_mday = *(u8 *)raw_data; | 121 | time_buf->tm_mday = (int)hid_time_value(raw_len, raw_data); |
99 | break; | 122 | break; |
100 | case HID_USAGE_SENSOR_TIME_HOUR: | 123 | case HID_USAGE_SENSOR_TIME_HOUR: |
101 | time_buf->tm_hour = *(u8 *)raw_data; | 124 | time_buf->tm_hour = (int)hid_time_value(raw_len, raw_data); |
102 | break; | 125 | break; |
103 | case HID_USAGE_SENSOR_TIME_MINUTE: | 126 | case HID_USAGE_SENSOR_TIME_MINUTE: |
104 | time_buf->tm_min = *(u8 *)raw_data; | 127 | time_buf->tm_min = (int)hid_time_value(raw_len, raw_data); |
105 | break; | 128 | break; |
106 | case HID_USAGE_SENSOR_TIME_SECOND: | 129 | case HID_USAGE_SENSOR_TIME_SECOND: |
107 | time_buf->tm_sec = *(u8 *)raw_data; | 130 | time_buf->tm_sec = (int)hid_time_value(raw_len, raw_data); |
108 | break; | 131 | break; |
109 | default: | 132 | default: |
110 | return -EINVAL; | 133 | return -EINVAL; |
@@ -150,9 +173,10 @@ static int hid_time_parse_report(struct platform_device *pdev, | |||
150 | "not all needed attributes inside the same report!\n"); | 173 | "not all needed attributes inside the same report!\n"); |
151 | return -EINVAL; | 174 | return -EINVAL; |
152 | } | 175 | } |
153 | if (time_state->info[i].size != 1) { | 176 | if (time_state->info[i].size == 3 || |
177 | time_state->info[i].size > 4) { | ||
154 | dev_err(&pdev->dev, | 178 | dev_err(&pdev->dev, |
155 | "attribute '%s' not 8 bits wide!\n", | 179 | "attribute '%s' not 8, 16 or 32 bits wide!\n", |
156 | hid_time_attrib_name( | 180 | hid_time_attrib_name( |
157 | time_state->info[i].attrib_id)); | 181 | time_state->info[i].attrib_id)); |
158 | return -EINVAL; | 182 | return -EINVAL; |
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c index a1bbbb8de029..5dbdc4405718 100644 --- a/drivers/rtc/rtc-isl12022.c +++ b/drivers/rtc/rtc-isl12022.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/rtc.h> | 16 | #include <linux/rtc.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/err.h> | ||
19 | 20 | ||
20 | #define DRV_VERSION "0.1" | 21 | #define DRV_VERSION "0.1" |
21 | 22 | ||
@@ -267,15 +268,7 @@ static int isl12022_probe(struct i2c_client *client, | |||
267 | isl12022->rtc = devm_rtc_device_register(&client->dev, | 268 | isl12022->rtc = devm_rtc_device_register(&client->dev, |
268 | isl12022_driver.driver.name, | 269 | isl12022_driver.driver.name, |
269 | &isl12022_rtc_ops, THIS_MODULE); | 270 | &isl12022_rtc_ops, THIS_MODULE); |
270 | if (IS_ERR(isl12022->rtc)) | 271 | return PTR_RET(isl12022->rtc); |
271 | return PTR_ERR(isl12022->rtc); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int isl12022_remove(struct i2c_client *client) | ||
277 | { | ||
278 | return 0; | ||
279 | } | 272 | } |
280 | 273 | ||
281 | static const struct i2c_device_id isl12022_id[] = { | 274 | static const struct i2c_device_id isl12022_id[] = { |
@@ -289,7 +282,6 @@ static struct i2c_driver isl12022_driver = { | |||
289 | .name = "rtc-isl12022", | 282 | .name = "rtc-isl12022", |
290 | }, | 283 | }, |
291 | .probe = isl12022_probe, | 284 | .probe = isl12022_probe, |
292 | .remove = isl12022_remove, | ||
293 | .id_table = isl12022_id, | 285 | .id_table = isl12022_id, |
294 | }; | 286 | }; |
295 | 287 | ||
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 1e48686ca6d2..1b126d2513de 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/io.h> | ||
17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
@@ -216,37 +217,34 @@ static int jz4740_rtc_probe(struct platform_device *pdev) | |||
216 | struct jz4740_rtc *rtc; | 217 | struct jz4740_rtc *rtc; |
217 | uint32_t scratchpad; | 218 | uint32_t scratchpad; |
218 | 219 | ||
219 | rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); | 220 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); |
220 | if (!rtc) | 221 | if (!rtc) |
221 | return -ENOMEM; | 222 | return -ENOMEM; |
222 | 223 | ||
223 | rtc->irq = platform_get_irq(pdev, 0); | 224 | rtc->irq = platform_get_irq(pdev, 0); |
224 | if (rtc->irq < 0) { | 225 | if (rtc->irq < 0) { |
225 | ret = -ENOENT; | ||
226 | dev_err(&pdev->dev, "Failed to get platform irq\n"); | 226 | dev_err(&pdev->dev, "Failed to get platform irq\n"); |
227 | goto err_free; | 227 | return -ENOENT; |
228 | } | 228 | } |
229 | 229 | ||
230 | rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 230 | rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
231 | if (!rtc->mem) { | 231 | if (!rtc->mem) { |
232 | ret = -ENOENT; | ||
233 | dev_err(&pdev->dev, "Failed to get platform mmio memory\n"); | 232 | dev_err(&pdev->dev, "Failed to get platform mmio memory\n"); |
234 | goto err_free; | 233 | return -ENOENT; |
235 | } | 234 | } |
236 | 235 | ||
237 | rtc->mem = request_mem_region(rtc->mem->start, resource_size(rtc->mem), | 236 | rtc->mem = devm_request_mem_region(&pdev->dev, rtc->mem->start, |
238 | pdev->name); | 237 | resource_size(rtc->mem), pdev->name); |
239 | if (!rtc->mem) { | 238 | if (!rtc->mem) { |
240 | ret = -EBUSY; | ||
241 | dev_err(&pdev->dev, "Failed to request mmio memory region\n"); | 239 | dev_err(&pdev->dev, "Failed to request mmio memory region\n"); |
242 | goto err_free; | 240 | return -EBUSY; |
243 | } | 241 | } |
244 | 242 | ||
245 | rtc->base = ioremap_nocache(rtc->mem->start, resource_size(rtc->mem)); | 243 | rtc->base = devm_ioremap_nocache(&pdev->dev, rtc->mem->start, |
244 | resource_size(rtc->mem)); | ||
246 | if (!rtc->base) { | 245 | if (!rtc->base) { |
247 | ret = -EBUSY; | ||
248 | dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); | 246 | dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); |
249 | goto err_release_mem_region; | 247 | return -EBUSY; |
250 | } | 248 | } |
251 | 249 | ||
252 | spin_lock_init(&rtc->lock); | 250 | spin_lock_init(&rtc->lock); |
@@ -255,19 +253,19 @@ static int jz4740_rtc_probe(struct platform_device *pdev) | |||
255 | 253 | ||
256 | device_init_wakeup(&pdev->dev, 1); | 254 | device_init_wakeup(&pdev->dev, 1); |
257 | 255 | ||
258 | rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops, | 256 | rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
259 | THIS_MODULE); | 257 | &jz4740_rtc_ops, THIS_MODULE); |
260 | if (IS_ERR(rtc->rtc)) { | 258 | if (IS_ERR(rtc->rtc)) { |
261 | ret = PTR_ERR(rtc->rtc); | 259 | ret = PTR_ERR(rtc->rtc); |
262 | dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret); | 260 | dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret); |
263 | goto err_iounmap; | 261 | return ret; |
264 | } | 262 | } |
265 | 263 | ||
266 | ret = request_irq(rtc->irq, jz4740_rtc_irq, 0, | 264 | ret = devm_request_irq(&pdev->dev, rtc->irq, jz4740_rtc_irq, 0, |
267 | pdev->name, rtc); | 265 | pdev->name, rtc); |
268 | if (ret) { | 266 | if (ret) { |
269 | dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret); | 267 | dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret); |
270 | goto err_unregister_rtc; | 268 | return ret; |
271 | } | 269 | } |
272 | 270 | ||
273 | scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD); | 271 | scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD); |
@@ -276,46 +274,13 @@ static int jz4740_rtc_probe(struct platform_device *pdev) | |||
276 | ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0); | 274 | ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0); |
277 | if (ret) { | 275 | if (ret) { |
278 | dev_err(&pdev->dev, "Could not write write to RTC registers\n"); | 276 | dev_err(&pdev->dev, "Could not write write to RTC registers\n"); |
279 | goto err_free_irq; | 277 | return ret; |
280 | } | 278 | } |
281 | } | 279 | } |
282 | 280 | ||
283 | return 0; | 281 | return 0; |
284 | |||
285 | err_free_irq: | ||
286 | free_irq(rtc->irq, rtc); | ||
287 | err_unregister_rtc: | ||
288 | rtc_device_unregister(rtc->rtc); | ||
289 | err_iounmap: | ||
290 | platform_set_drvdata(pdev, NULL); | ||
291 | iounmap(rtc->base); | ||
292 | err_release_mem_region: | ||
293 | release_mem_region(rtc->mem->start, resource_size(rtc->mem)); | ||
294 | err_free: | ||
295 | kfree(rtc); | ||
296 | |||
297 | return ret; | ||
298 | } | ||
299 | |||
300 | static int jz4740_rtc_remove(struct platform_device *pdev) | ||
301 | { | ||
302 | struct jz4740_rtc *rtc = platform_get_drvdata(pdev); | ||
303 | |||
304 | free_irq(rtc->irq, rtc); | ||
305 | |||
306 | rtc_device_unregister(rtc->rtc); | ||
307 | |||
308 | iounmap(rtc->base); | ||
309 | release_mem_region(rtc->mem->start, resource_size(rtc->mem)); | ||
310 | |||
311 | kfree(rtc); | ||
312 | |||
313 | platform_set_drvdata(pdev, NULL); | ||
314 | |||
315 | return 0; | ||
316 | } | 282 | } |
317 | 283 | ||
318 | |||
319 | #ifdef CONFIG_PM | 284 | #ifdef CONFIG_PM |
320 | static int jz4740_rtc_suspend(struct device *dev) | 285 | static int jz4740_rtc_suspend(struct device *dev) |
321 | { | 286 | { |
@@ -347,7 +312,6 @@ static const struct dev_pm_ops jz4740_pm_ops = { | |||
347 | 312 | ||
348 | static struct platform_driver jz4740_rtc_driver = { | 313 | static struct platform_driver jz4740_rtc_driver = { |
349 | .probe = jz4740_rtc_probe, | 314 | .probe = jz4740_rtc_probe, |
350 | .remove = jz4740_rtc_remove, | ||
351 | .driver = { | 315 | .driver = { |
352 | .name = "jz4740-rtc", | 316 | .name = "jz4740-rtc", |
353 | .owner = THIS_MODULE, | 317 | .owner = THIS_MODULE, |
diff --git a/drivers/rtc/rtc-lp8788.c b/drivers/rtc/rtc-lp8788.c index 9853ac15b296..4ff6c73253b3 100644 --- a/drivers/rtc/rtc-lp8788.c +++ b/drivers/rtc/rtc-lp8788.c | |||
@@ -312,16 +312,8 @@ static int lp8788_rtc_probe(struct platform_device *pdev) | |||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | static int lp8788_rtc_remove(struct platform_device *pdev) | ||
316 | { | ||
317 | platform_set_drvdata(pdev, NULL); | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | static struct platform_driver lp8788_rtc_driver = { | 315 | static struct platform_driver lp8788_rtc_driver = { |
323 | .probe = lp8788_rtc_probe, | 316 | .probe = lp8788_rtc_probe, |
324 | .remove = lp8788_rtc_remove, | ||
325 | .driver = { | 317 | .driver = { |
326 | .name = LP8788_DEV_RTC, | 318 | .name = LP8788_DEV_RTC, |
327 | .owner = THIS_MODULE, | 319 | .owner = THIS_MODULE, |
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c index 787550d756e9..8276ae94a2a9 100644 --- a/drivers/rtc/rtc-lpc32xx.c +++ b/drivers/rtc/rtc-lpc32xx.c | |||
@@ -277,7 +277,6 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev) | |||
277 | &lpc32xx_rtc_ops, THIS_MODULE); | 277 | &lpc32xx_rtc_ops, THIS_MODULE); |
278 | if (IS_ERR(rtc->rtc)) { | 278 | if (IS_ERR(rtc->rtc)) { |
279 | dev_err(&pdev->dev, "Can't get RTC\n"); | 279 | dev_err(&pdev->dev, "Can't get RTC\n"); |
280 | platform_set_drvdata(pdev, NULL); | ||
281 | return PTR_ERR(rtc->rtc); | 280 | return PTR_ERR(rtc->rtc); |
282 | } | 281 | } |
283 | 282 | ||
@@ -306,8 +305,6 @@ static int lpc32xx_rtc_remove(struct platform_device *pdev) | |||
306 | if (rtc->irq >= 0) | 305 | if (rtc->irq >= 0) |
307 | device_init_wakeup(&pdev->dev, 0); | 306 | device_init_wakeup(&pdev->dev, 0); |
308 | 307 | ||
309 | platform_set_drvdata(pdev, NULL); | ||
310 | |||
311 | return 0; | 308 | return 0; |
312 | } | 309 | } |
313 | 310 | ||
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c index db82f91f4562..682ecb094839 100644 --- a/drivers/rtc/rtc-ls1x.c +++ b/drivers/rtc/rtc-ls1x.c | |||
@@ -185,19 +185,11 @@ err: | |||
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
187 | 187 | ||
188 | static int ls1x_rtc_remove(struct platform_device *pdev) | ||
189 | { | ||
190 | platform_set_drvdata(pdev, NULL); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static struct platform_driver ls1x_rtc_driver = { | 188 | static struct platform_driver ls1x_rtc_driver = { |
196 | .driver = { | 189 | .driver = { |
197 | .name = "ls1x-rtc", | 190 | .name = "ls1x-rtc", |
198 | .owner = THIS_MODULE, | 191 | .owner = THIS_MODULE, |
199 | }, | 192 | }, |
200 | .remove = ls1x_rtc_remove, | ||
201 | .probe = ls1x_rtc_probe, | 193 | .probe = ls1x_rtc_probe, |
202 | }; | 194 | }; |
203 | 195 | ||
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 89674b5e6efd..a5248aa1abf1 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -168,7 +168,7 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
168 | buf[M41T80_REG_MIN] = | 168 | buf[M41T80_REG_MIN] = |
169 | bin2bcd(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f); | 169 | bin2bcd(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f); |
170 | buf[M41T80_REG_HOUR] = | 170 | buf[M41T80_REG_HOUR] = |
171 | bin2bcd(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ; | 171 | bin2bcd(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f); |
172 | buf[M41T80_REG_WDAY] = | 172 | buf[M41T80_REG_WDAY] = |
173 | (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07); | 173 | (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07); |
174 | buf[M41T80_REG_DAY] = | 174 | buf[M41T80_REG_DAY] = |
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c index 9707d36e8b15..4698c7e344e4 100644 --- a/drivers/rtc/rtc-m41t93.c +++ b/drivers/rtc/rtc-m41t93.c | |||
@@ -194,19 +194,12 @@ static int m41t93_probe(struct spi_device *spi) | |||
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | |||
198 | static int m41t93_remove(struct spi_device *spi) | ||
199 | { | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static struct spi_driver m41t93_driver = { | 197 | static struct spi_driver m41t93_driver = { |
204 | .driver = { | 198 | .driver = { |
205 | .name = "rtc-m41t93", | 199 | .name = "rtc-m41t93", |
206 | .owner = THIS_MODULE, | 200 | .owner = THIS_MODULE, |
207 | }, | 201 | }, |
208 | .probe = m41t93_probe, | 202 | .probe = m41t93_probe, |
209 | .remove = m41t93_remove, | ||
210 | }; | 203 | }; |
211 | 204 | ||
212 | module_spi_driver(m41t93_driver); | 205 | module_spi_driver(m41t93_driver); |
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c index 7454ef0a4cfa..8d800b1bf87b 100644 --- a/drivers/rtc/rtc-m41t94.c +++ b/drivers/rtc/rtc-m41t94.c | |||
@@ -134,18 +134,12 @@ static int m41t94_probe(struct spi_device *spi) | |||
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
137 | static int m41t94_remove(struct spi_device *spi) | ||
138 | { | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static struct spi_driver m41t94_driver = { | 137 | static struct spi_driver m41t94_driver = { |
143 | .driver = { | 138 | .driver = { |
144 | .name = "rtc-m41t94", | 139 | .name = "rtc-m41t94", |
145 | .owner = THIS_MODULE, | 140 | .owner = THIS_MODULE, |
146 | }, | 141 | }, |
147 | .probe = m41t94_probe, | 142 | .probe = m41t94_probe, |
148 | .remove = m41t94_remove, | ||
149 | }; | 143 | }; |
150 | 144 | ||
151 | module_spi_driver(m41t94_driver); | 145 | module_spi_driver(m41t94_driver); |
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c index 37444246e5e4..23c3779a5f2b 100644 --- a/drivers/rtc/rtc-m48t35.c +++ b/drivers/rtc/rtc-m48t35.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/bcd.h> | 21 | #include <linux/bcd.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/err.h> | ||
23 | 24 | ||
24 | #define DRV_VERSION "1.0" | 25 | #define DRV_VERSION "1.0" |
25 | 26 | ||
@@ -174,15 +175,7 @@ static int m48t35_probe(struct platform_device *pdev) | |||
174 | 175 | ||
175 | priv->rtc = devm_rtc_device_register(&pdev->dev, "m48t35", | 176 | priv->rtc = devm_rtc_device_register(&pdev->dev, "m48t35", |
176 | &m48t35_ops, THIS_MODULE); | 177 | &m48t35_ops, THIS_MODULE); |
177 | if (IS_ERR(priv->rtc)) | 178 | return PTR_RET(priv->rtc); |
178 | return PTR_ERR(priv->rtc); | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int m48t35_remove(struct platform_device *pdev) | ||
184 | { | ||
185 | return 0; | ||
186 | } | 179 | } |
187 | 180 | ||
188 | static struct platform_driver m48t35_platform_driver = { | 181 | static struct platform_driver m48t35_platform_driver = { |
@@ -191,7 +184,6 @@ static struct platform_driver m48t35_platform_driver = { | |||
191 | .owner = THIS_MODULE, | 184 | .owner = THIS_MODULE, |
192 | }, | 185 | }, |
193 | .probe = m48t35_probe, | 186 | .probe = m48t35_probe, |
194 | .remove = m48t35_remove, | ||
195 | }; | 187 | }; |
196 | 188 | ||
197 | module_platform_driver(m48t35_platform_driver); | 189 | module_platform_driver(m48t35_platform_driver); |
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index 130f29af3869..fcb03291f145 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c | |||
@@ -409,7 +409,8 @@ static int m48t59_rtc_probe(struct platform_device *pdev) | |||
409 | } else if (res->flags & IORESOURCE_MEM) { | 409 | } else if (res->flags & IORESOURCE_MEM) { |
410 | /* we are memory-mapped */ | 410 | /* we are memory-mapped */ |
411 | if (!pdata) { | 411 | if (!pdata) { |
412 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | 412 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), |
413 | GFP_KERNEL); | ||
413 | if (!pdata) | 414 | if (!pdata) |
414 | return -ENOMEM; | 415 | return -ENOMEM; |
415 | /* Ensure we only kmalloc platform data once */ | 416 | /* Ensure we only kmalloc platform data once */ |
@@ -425,7 +426,7 @@ static int m48t59_rtc_probe(struct platform_device *pdev) | |||
425 | pdata->read_byte = m48t59_mem_readb; | 426 | pdata->read_byte = m48t59_mem_readb; |
426 | } | 427 | } |
427 | 428 | ||
428 | m48t59 = kzalloc(sizeof(*m48t59), GFP_KERNEL); | 429 | m48t59 = devm_kzalloc(&pdev->dev, sizeof(*m48t59), GFP_KERNEL); |
429 | if (!m48t59) | 430 | if (!m48t59) |
430 | return -ENOMEM; | 431 | return -ENOMEM; |
431 | 432 | ||
@@ -433,9 +434,10 @@ static int m48t59_rtc_probe(struct platform_device *pdev) | |||
433 | 434 | ||
434 | if (!m48t59->ioaddr) { | 435 | if (!m48t59->ioaddr) { |
435 | /* ioaddr not mapped externally */ | 436 | /* ioaddr not mapped externally */ |
436 | m48t59->ioaddr = ioremap(res->start, resource_size(res)); | 437 | m48t59->ioaddr = devm_ioremap(&pdev->dev, res->start, |
438 | resource_size(res)); | ||
437 | if (!m48t59->ioaddr) | 439 | if (!m48t59->ioaddr) |
438 | goto out; | 440 | return ret; |
439 | } | 441 | } |
440 | 442 | ||
441 | /* Try to get irq number. We also can work in | 443 | /* Try to get irq number. We also can work in |
@@ -446,10 +448,11 @@ static int m48t59_rtc_probe(struct platform_device *pdev) | |||
446 | m48t59->irq = NO_IRQ; | 448 | m48t59->irq = NO_IRQ; |
447 | 449 | ||
448 | if (m48t59->irq != NO_IRQ) { | 450 | if (m48t59->irq != NO_IRQ) { |
449 | ret = request_irq(m48t59->irq, m48t59_rtc_interrupt, | 451 | ret = devm_request_irq(&pdev->dev, m48t59->irq, |
450 | IRQF_SHARED, "rtc-m48t59", &pdev->dev); | 452 | m48t59_rtc_interrupt, IRQF_SHARED, |
453 | "rtc-m48t59", &pdev->dev); | ||
451 | if (ret) | 454 | if (ret) |
452 | goto out; | 455 | return ret; |
453 | } | 456 | } |
454 | switch (pdata->type) { | 457 | switch (pdata->type) { |
455 | case M48T59RTC_TYPE_M48T59: | 458 | case M48T59RTC_TYPE_M48T59: |
@@ -469,52 +472,29 @@ static int m48t59_rtc_probe(struct platform_device *pdev) | |||
469 | break; | 472 | break; |
470 | default: | 473 | default: |
471 | dev_err(&pdev->dev, "Unknown RTC type\n"); | 474 | dev_err(&pdev->dev, "Unknown RTC type\n"); |
472 | ret = -ENODEV; | 475 | return -ENODEV; |
473 | goto out; | ||
474 | } | 476 | } |
475 | 477 | ||
476 | spin_lock_init(&m48t59->lock); | 478 | spin_lock_init(&m48t59->lock); |
477 | platform_set_drvdata(pdev, m48t59); | 479 | platform_set_drvdata(pdev, m48t59); |
478 | 480 | ||
479 | m48t59->rtc = rtc_device_register(name, &pdev->dev, ops, THIS_MODULE); | 481 | m48t59->rtc = devm_rtc_device_register(&pdev->dev, name, ops, |
480 | if (IS_ERR(m48t59->rtc)) { | 482 | THIS_MODULE); |
481 | ret = PTR_ERR(m48t59->rtc); | 483 | if (IS_ERR(m48t59->rtc)) |
482 | goto out; | 484 | return PTR_ERR(m48t59->rtc); |
483 | } | ||
484 | 485 | ||
485 | m48t59_nvram_attr.size = pdata->offset; | 486 | m48t59_nvram_attr.size = pdata->offset; |
486 | 487 | ||
487 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); | 488 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); |
488 | if (ret) { | 489 | if (ret) |
489 | rtc_device_unregister(m48t59->rtc); | 490 | return ret; |
490 | goto out; | ||
491 | } | ||
492 | 491 | ||
493 | return 0; | 492 | return 0; |
494 | |||
495 | out: | ||
496 | if (m48t59->irq != NO_IRQ) | ||
497 | free_irq(m48t59->irq, &pdev->dev); | ||
498 | if (m48t59->ioaddr) | ||
499 | iounmap(m48t59->ioaddr); | ||
500 | kfree(m48t59); | ||
501 | return ret; | ||
502 | } | 493 | } |
503 | 494 | ||
504 | static int m48t59_rtc_remove(struct platform_device *pdev) | 495 | static int m48t59_rtc_remove(struct platform_device *pdev) |
505 | { | 496 | { |
506 | struct m48t59_private *m48t59 = platform_get_drvdata(pdev); | ||
507 | struct m48t59_plat_data *pdata = pdev->dev.platform_data; | ||
508 | |||
509 | sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); | 497 | sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); |
510 | if (!IS_ERR(m48t59->rtc)) | ||
511 | rtc_device_unregister(m48t59->rtc); | ||
512 | if (m48t59->ioaddr && !pdata->ioaddr) | ||
513 | iounmap(m48t59->ioaddr); | ||
514 | if (m48t59->irq != NO_IRQ) | ||
515 | free_irq(m48t59->irq, &pdev->dev); | ||
516 | platform_set_drvdata(pdev, NULL); | ||
517 | kfree(m48t59); | ||
518 | return 0; | 498 | return 0; |
519 | } | 499 | } |
520 | 500 | ||
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c index 33a91c484533..2d30314fa07f 100644 --- a/drivers/rtc/rtc-m48t86.c +++ b/drivers/rtc/rtc-m48t86.c | |||
@@ -166,20 +166,12 @@ static int m48t86_rtc_probe(struct platform_device *dev) | |||
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | 168 | ||
169 | static int m48t86_rtc_remove(struct platform_device *dev) | ||
170 | { | ||
171 | platform_set_drvdata(dev, NULL); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static struct platform_driver m48t86_rtc_platform_driver = { | 169 | static struct platform_driver m48t86_rtc_platform_driver = { |
177 | .driver = { | 170 | .driver = { |
178 | .name = "rtc-m48t86", | 171 | .name = "rtc-m48t86", |
179 | .owner = THIS_MODULE, | 172 | .owner = THIS_MODULE, |
180 | }, | 173 | }, |
181 | .probe = m48t86_rtc_probe, | 174 | .probe = m48t86_rtc_probe, |
182 | .remove = m48t86_rtc_remove, | ||
183 | }; | 175 | }; |
184 | 176 | ||
185 | module_platform_driver(m48t86_rtc_platform_driver); | 177 | module_platform_driver(m48t86_rtc_platform_driver); |
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c index 8669d6d09a00..55969b1b771a 100644 --- a/drivers/rtc/rtc-max6900.c +++ b/drivers/rtc/rtc-max6900.c | |||
@@ -212,11 +212,6 @@ static int max6900_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
212 | return max6900_i2c_set_time(to_i2c_client(dev), tm); | 212 | return max6900_i2c_set_time(to_i2c_client(dev), tm); |
213 | } | 213 | } |
214 | 214 | ||
215 | static int max6900_remove(struct i2c_client *client) | ||
216 | { | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static const struct rtc_class_ops max6900_rtc_ops = { | 215 | static const struct rtc_class_ops max6900_rtc_ops = { |
221 | .read_time = max6900_rtc_read_time, | 216 | .read_time = max6900_rtc_read_time, |
222 | .set_time = max6900_rtc_set_time, | 217 | .set_time = max6900_rtc_set_time, |
@@ -252,7 +247,6 @@ static struct i2c_driver max6900_driver = { | |||
252 | .name = "rtc-max6900", | 247 | .name = "rtc-max6900", |
253 | }, | 248 | }, |
254 | .probe = max6900_probe, | 249 | .probe = max6900_probe, |
255 | .remove = max6900_remove, | ||
256 | .id_table = max6900_id, | 250 | .id_table = max6900_id, |
257 | }; | 251 | }; |
258 | 252 | ||
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c index e3aea00c3145..ac3f4191864f 100644 --- a/drivers/rtc/rtc-max6902.c +++ b/drivers/rtc/rtc-max6902.c | |||
@@ -143,23 +143,17 @@ static int max6902_probe(struct spi_device *spi) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int max6902_remove(struct spi_device *spi) | ||
147 | { | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static struct spi_driver max6902_driver = { | 146 | static struct spi_driver max6902_driver = { |
152 | .driver = { | 147 | .driver = { |
153 | .name = "rtc-max6902", | 148 | .name = "rtc-max6902", |
154 | .owner = THIS_MODULE, | 149 | .owner = THIS_MODULE, |
155 | }, | 150 | }, |
156 | .probe = max6902_probe, | 151 | .probe = max6902_probe, |
157 | .remove = max6902_remove, | ||
158 | }; | 152 | }; |
159 | 153 | ||
160 | module_spi_driver(max6902_driver); | 154 | module_spi_driver(max6902_driver); |
161 | 155 | ||
162 | MODULE_DESCRIPTION ("max6902 spi RTC driver"); | 156 | MODULE_DESCRIPTION("max6902 spi RTC driver"); |
163 | MODULE_AUTHOR ("Raphael Assenat"); | 157 | MODULE_AUTHOR("Raphael Assenat"); |
164 | MODULE_LICENSE ("GPL"); | 158 | MODULE_LICENSE("GPL"); |
165 | MODULE_ALIAS("spi:rtc-max6902"); | 159 | MODULE_ALIAS("spi:rtc-max6902"); |
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c index 771812d62e6b..9915cb96014b 100644 --- a/drivers/rtc/rtc-max77686.c +++ b/drivers/rtc/rtc-max77686.c | |||
@@ -119,7 +119,7 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data) | |||
119 | data[RTC_WEEKDAY] = 1 << tm->tm_wday; | 119 | data[RTC_WEEKDAY] = 1 << tm->tm_wday; |
120 | data[RTC_DATE] = tm->tm_mday; | 120 | data[RTC_DATE] = tm->tm_mday; |
121 | data[RTC_MONTH] = tm->tm_mon + 1; | 121 | data[RTC_MONTH] = tm->tm_mon + 1; |
122 | data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0 ; | 122 | data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0; |
123 | 123 | ||
124 | if (tm->tm_year < 100) { | 124 | if (tm->tm_year < 100) { |
125 | pr_warn("%s: MAX77686 RTC cannot handle the year %d." | 125 | pr_warn("%s: MAX77686 RTC cannot handle the year %d." |
@@ -567,11 +567,6 @@ err_rtc: | |||
567 | return ret; | 567 | return ret; |
568 | } | 568 | } |
569 | 569 | ||
570 | static int max77686_rtc_remove(struct platform_device *pdev) | ||
571 | { | ||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | static void max77686_rtc_shutdown(struct platform_device *pdev) | 570 | static void max77686_rtc_shutdown(struct platform_device *pdev) |
576 | { | 571 | { |
577 | #ifdef MAX77686_RTC_WTSR_SMPL | 572 | #ifdef MAX77686_RTC_WTSR_SMPL |
@@ -610,7 +605,6 @@ static struct platform_driver max77686_rtc_driver = { | |||
610 | .owner = THIS_MODULE, | 605 | .owner = THIS_MODULE, |
611 | }, | 606 | }, |
612 | .probe = max77686_rtc_probe, | 607 | .probe = max77686_rtc_probe, |
613 | .remove = max77686_rtc_remove, | ||
614 | .shutdown = max77686_rtc_shutdown, | 608 | .shutdown = max77686_rtc_shutdown, |
615 | .id_table = rtc_id, | 609 | .id_table = rtc_id, |
616 | }; | 610 | }; |
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c index 86afb797125d..8e45b3c4aa2f 100644 --- a/drivers/rtc/rtc-max8907.c +++ b/drivers/rtc/rtc-max8907.c | |||
@@ -213,18 +213,12 @@ static int max8907_rtc_probe(struct platform_device *pdev) | |||
213 | return ret; | 213 | return ret; |
214 | } | 214 | } |
215 | 215 | ||
216 | static int max8907_rtc_remove(struct platform_device *pdev) | ||
217 | { | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static struct platform_driver max8907_rtc_driver = { | 216 | static struct platform_driver max8907_rtc_driver = { |
222 | .driver = { | 217 | .driver = { |
223 | .name = "max8907-rtc", | 218 | .name = "max8907-rtc", |
224 | .owner = THIS_MODULE, | 219 | .owner = THIS_MODULE, |
225 | }, | 220 | }, |
226 | .probe = max8907_rtc_probe, | 221 | .probe = max8907_rtc_probe, |
227 | .remove = max8907_rtc_remove, | ||
228 | }; | 222 | }; |
229 | module_platform_driver(max8907_rtc_driver); | 223 | module_platform_driver(max8907_rtc_driver); |
230 | 224 | ||
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c index 7c90f4e45e27..951d1a78e190 100644 --- a/drivers/rtc/rtc-max8925.c +++ b/drivers/rtc/rtc-max8925.c | |||
@@ -268,7 +268,7 @@ static int max8925_rtc_probe(struct platform_device *pdev) | |||
268 | if (ret < 0) { | 268 | if (ret < 0) { |
269 | dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", | 269 | dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", |
270 | info->irq, ret); | 270 | info->irq, ret); |
271 | goto err; | 271 | return ret; |
272 | } | 272 | } |
273 | 273 | ||
274 | dev_set_drvdata(&pdev->dev, info); | 274 | dev_set_drvdata(&pdev->dev, info); |
@@ -282,18 +282,10 @@ static int max8925_rtc_probe(struct platform_device *pdev) | |||
282 | ret = PTR_ERR(info->rtc_dev); | 282 | ret = PTR_ERR(info->rtc_dev); |
283 | if (IS_ERR(info->rtc_dev)) { | 283 | if (IS_ERR(info->rtc_dev)) { |
284 | dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); | 284 | dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); |
285 | goto err; | 285 | return ret; |
286 | } | 286 | } |
287 | 287 | ||
288 | return 0; | 288 | return 0; |
289 | err: | ||
290 | platform_set_drvdata(pdev, NULL); | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | static int max8925_rtc_remove(struct platform_device *pdev) | ||
295 | { | ||
296 | return 0; | ||
297 | } | 289 | } |
298 | 290 | ||
299 | #ifdef CONFIG_PM_SLEEP | 291 | #ifdef CONFIG_PM_SLEEP |
@@ -326,7 +318,6 @@ static struct platform_driver max8925_rtc_driver = { | |||
326 | .pm = &max8925_rtc_pm_ops, | 318 | .pm = &max8925_rtc_pm_ops, |
327 | }, | 319 | }, |
328 | .probe = max8925_rtc_probe, | 320 | .probe = max8925_rtc_probe, |
329 | .remove = max8925_rtc_remove, | ||
330 | }; | 321 | }; |
331 | 322 | ||
332 | module_platform_driver(max8925_rtc_driver); | 323 | module_platform_driver(max8925_rtc_driver); |
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c index dacf48db7925..0777c01b58e0 100644 --- a/drivers/rtc/rtc-max8997.c +++ b/drivers/rtc/rtc-max8997.c | |||
@@ -104,7 +104,7 @@ static int max8997_rtc_tm_to_data(struct rtc_time *tm, u8 *data) | |||
104 | data[RTC_WEEKDAY] = 1 << tm->tm_wday; | 104 | data[RTC_WEEKDAY] = 1 << tm->tm_wday; |
105 | data[RTC_DATE] = tm->tm_mday; | 105 | data[RTC_DATE] = tm->tm_mday; |
106 | data[RTC_MONTH] = tm->tm_mon + 1; | 106 | data[RTC_MONTH] = tm->tm_mon + 1; |
107 | data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0 ; | 107 | data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0; |
108 | 108 | ||
109 | if (tm->tm_year < 100) { | 109 | if (tm->tm_year < 100) { |
110 | pr_warn("%s: MAX8997 RTC cannot handle the year %d." | 110 | pr_warn("%s: MAX8997 RTC cannot handle the year %d." |
@@ -507,11 +507,6 @@ err_out: | |||
507 | return ret; | 507 | return ret; |
508 | } | 508 | } |
509 | 509 | ||
510 | static int max8997_rtc_remove(struct platform_device *pdev) | ||
511 | { | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static void max8997_rtc_shutdown(struct platform_device *pdev) | 510 | static void max8997_rtc_shutdown(struct platform_device *pdev) |
516 | { | 511 | { |
517 | struct max8997_rtc_info *info = platform_get_drvdata(pdev); | 512 | struct max8997_rtc_info *info = platform_get_drvdata(pdev); |
@@ -531,7 +526,6 @@ static struct platform_driver max8997_rtc_driver = { | |||
531 | .owner = THIS_MODULE, | 526 | .owner = THIS_MODULE, |
532 | }, | 527 | }, |
533 | .probe = max8997_rtc_probe, | 528 | .probe = max8997_rtc_probe, |
534 | .remove = max8997_rtc_remove, | ||
535 | .shutdown = max8997_rtc_shutdown, | 529 | .shutdown = max8997_rtc_shutdown, |
536 | .id_table = rtc_id, | 530 | .id_table = rtc_id, |
537 | }; | 531 | }; |
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index d5af7baa48b5..5388336a2c4c 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c | |||
@@ -274,7 +274,7 @@ static int max8998_rtc_probe(struct platform_device *pdev) | |||
274 | if (IS_ERR(info->rtc_dev)) { | 274 | if (IS_ERR(info->rtc_dev)) { |
275 | ret = PTR_ERR(info->rtc_dev); | 275 | ret = PTR_ERR(info->rtc_dev); |
276 | dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); | 276 | dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); |
277 | goto out_rtc; | 277 | return ret; |
278 | } | 278 | } |
279 | 279 | ||
280 | ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, | 280 | ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, |
@@ -292,15 +292,6 @@ static int max8998_rtc_probe(struct platform_device *pdev) | |||
292 | } | 292 | } |
293 | 293 | ||
294 | return 0; | 294 | return 0; |
295 | |||
296 | out_rtc: | ||
297 | platform_set_drvdata(pdev, NULL); | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | static int max8998_rtc_remove(struct platform_device *pdev) | ||
302 | { | ||
303 | return 0; | ||
304 | } | 295 | } |
305 | 296 | ||
306 | static const struct platform_device_id max8998_rtc_id[] = { | 297 | static const struct platform_device_id max8998_rtc_id[] = { |
@@ -315,7 +306,6 @@ static struct platform_driver max8998_rtc_driver = { | |||
315 | .owner = THIS_MODULE, | 306 | .owner = THIS_MODULE, |
316 | }, | 307 | }, |
317 | .probe = max8998_rtc_probe, | 308 | .probe = max8998_rtc_probe, |
318 | .remove = max8998_rtc_remove, | ||
319 | .id_table = max8998_rtc_id, | 309 | .id_table = max8998_rtc_id, |
320 | }; | 310 | }; |
321 | 311 | ||
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index 7a8ed27a5f2e..77ea9896b5ba 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c | |||
@@ -370,8 +370,6 @@ err_reset_irq_status: | |||
370 | err_reset_irq_request: | 370 | err_reset_irq_request: |
371 | 371 | ||
372 | mc13xxx_unlock(mc13xxx); | 372 | mc13xxx_unlock(mc13xxx); |
373 | |||
374 | platform_set_drvdata(pdev, NULL); | ||
375 | } | 373 | } |
376 | 374 | ||
377 | return ret; | 375 | return ret; |
@@ -389,8 +387,6 @@ static int __exit mc13xxx_rtc_remove(struct platform_device *pdev) | |||
389 | 387 | ||
390 | mc13xxx_unlock(priv->mc13xxx); | 388 | mc13xxx_unlock(priv->mc13xxx); |
391 | 389 | ||
392 | platform_set_drvdata(pdev, NULL); | ||
393 | |||
394 | return 0; | 390 | return 0; |
395 | } | 391 | } |
396 | 392 | ||
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c index bdcc60830aec..9c8f60903799 100644 --- a/drivers/rtc/rtc-mpc5121.c +++ b/drivers/rtc/rtc-mpc5121.c | |||
@@ -68,7 +68,7 @@ struct mpc5121_rtc_regs { | |||
68 | u32 target_time; /* RTC + 0x20 */ | 68 | u32 target_time; /* RTC + 0x20 */ |
69 | /* | 69 | /* |
70 | * actual_time: | 70 | * actual_time: |
71 | * readonly time since VBAT_RTC was last connected | 71 | * readonly time since VBAT_RTC was last connected |
72 | */ | 72 | */ |
73 | u32 actual_time; /* RTC + 0x24 */ | 73 | u32 actual_time; /* RTC + 0x24 */ |
74 | u32 keep_alive; /* RTC + 0x28 */ | 74 | u32 keep_alive; /* RTC + 0x28 */ |
@@ -312,20 +312,19 @@ static int mpc5121_rtc_probe(struct platform_device *op) | |||
312 | struct mpc5121_rtc_data *rtc; | 312 | struct mpc5121_rtc_data *rtc; |
313 | int err = 0; | 313 | int err = 0; |
314 | 314 | ||
315 | rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); | 315 | rtc = devm_kzalloc(&op->dev, sizeof(*rtc), GFP_KERNEL); |
316 | if (!rtc) | 316 | if (!rtc) |
317 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | 318 | ||
319 | rtc->regs = of_iomap(op->dev.of_node, 0); | 319 | rtc->regs = of_iomap(op->dev.of_node, 0); |
320 | if (!rtc->regs) { | 320 | if (!rtc->regs) { |
321 | dev_err(&op->dev, "%s: couldn't map io space\n", __func__); | 321 | dev_err(&op->dev, "%s: couldn't map io space\n", __func__); |
322 | err = -ENOSYS; | 322 | return -ENOSYS; |
323 | goto out_free; | ||
324 | } | 323 | } |
325 | 324 | ||
326 | device_init_wakeup(&op->dev, 1); | 325 | device_init_wakeup(&op->dev, 1); |
327 | 326 | ||
328 | dev_set_drvdata(&op->dev, rtc); | 327 | platform_set_drvdata(op, rtc); |
329 | 328 | ||
330 | rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1); | 329 | rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1); |
331 | err = request_irq(rtc->irq, mpc5121_rtc_handler, 0, | 330 | err = request_irq(rtc->irq, mpc5121_rtc_handler, 0, |
@@ -354,10 +353,10 @@ static int mpc5121_rtc_probe(struct platform_device *op) | |||
354 | out_be32(&rtc->regs->keep_alive, ka); | 353 | out_be32(&rtc->regs->keep_alive, ka); |
355 | } | 354 | } |
356 | 355 | ||
357 | rtc->rtc = rtc_device_register("mpc5121-rtc", &op->dev, | 356 | rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5121-rtc", |
358 | &mpc5121_rtc_ops, THIS_MODULE); | 357 | &mpc5121_rtc_ops, THIS_MODULE); |
359 | } else { | 358 | } else { |
360 | rtc->rtc = rtc_device_register("mpc5200-rtc", &op->dev, | 359 | rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5200-rtc", |
361 | &mpc5200_rtc_ops, THIS_MODULE); | 360 | &mpc5200_rtc_ops, THIS_MODULE); |
362 | } | 361 | } |
363 | 362 | ||
@@ -377,29 +376,24 @@ out_dispose2: | |||
377 | out_dispose: | 376 | out_dispose: |
378 | irq_dispose_mapping(rtc->irq); | 377 | irq_dispose_mapping(rtc->irq); |
379 | iounmap(rtc->regs); | 378 | iounmap(rtc->regs); |
380 | out_free: | ||
381 | kfree(rtc); | ||
382 | 379 | ||
383 | return err; | 380 | return err; |
384 | } | 381 | } |
385 | 382 | ||
386 | static int mpc5121_rtc_remove(struct platform_device *op) | 383 | static int mpc5121_rtc_remove(struct platform_device *op) |
387 | { | 384 | { |
388 | struct mpc5121_rtc_data *rtc = dev_get_drvdata(&op->dev); | 385 | struct mpc5121_rtc_data *rtc = platform_get_drvdata(op); |
389 | struct mpc5121_rtc_regs __iomem *regs = rtc->regs; | 386 | struct mpc5121_rtc_regs __iomem *regs = rtc->regs; |
390 | 387 | ||
391 | /* disable interrupt, so there are no nasty surprises */ | 388 | /* disable interrupt, so there are no nasty surprises */ |
392 | out_8(®s->alm_enable, 0); | 389 | out_8(®s->alm_enable, 0); |
393 | out_8(®s->int_enable, in_8(®s->int_enable) & ~0x1); | 390 | out_8(®s->int_enable, in_8(®s->int_enable) & ~0x1); |
394 | 391 | ||
395 | rtc_device_unregister(rtc->rtc); | ||
396 | iounmap(rtc->regs); | 392 | iounmap(rtc->regs); |
397 | free_irq(rtc->irq, &op->dev); | 393 | free_irq(rtc->irq, &op->dev); |
398 | free_irq(rtc->irq_periodic, &op->dev); | 394 | free_irq(rtc->irq_periodic, &op->dev); |
399 | irq_dispose_mapping(rtc->irq); | 395 | irq_dispose_mapping(rtc->irq); |
400 | irq_dispose_mapping(rtc->irq_periodic); | 396 | irq_dispose_mapping(rtc->irq_periodic); |
401 | dev_set_drvdata(&op->dev, NULL); | ||
402 | kfree(rtc); | ||
403 | 397 | ||
404 | return 0; | 398 | return 0; |
405 | } | 399 | } |
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index 771f86a05d14..426cb5189daa 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c | |||
@@ -111,8 +111,8 @@ static void msm6242_lock(struct msm6242_priv *priv) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | if (!cnt) | 113 | if (!cnt) |
114 | pr_warning("msm6242: timed out waiting for RTC (0x%x)\n", | 114 | pr_warn("msm6242: timed out waiting for RTC (0x%x)\n", |
115 | msm6242_read(priv, MSM6242_CD)); | 115 | msm6242_read(priv, MSM6242_CD)); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void msm6242_unlock(struct msm6242_priv *priv) | 118 | static void msm6242_unlock(struct msm6242_priv *priv) |
@@ -199,7 +199,6 @@ static int __init msm6242_rtc_probe(struct platform_device *pdev) | |||
199 | struct resource *res; | 199 | struct resource *res; |
200 | struct msm6242_priv *priv; | 200 | struct msm6242_priv *priv; |
201 | struct rtc_device *rtc; | 201 | struct rtc_device *rtc; |
202 | int error; | ||
203 | 202 | ||
204 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 203 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
205 | if (!res) | 204 | if (!res) |
@@ -216,22 +215,11 @@ static int __init msm6242_rtc_probe(struct platform_device *pdev) | |||
216 | 215 | ||
217 | rtc = devm_rtc_device_register(&pdev->dev, "rtc-msm6242", | 216 | rtc = devm_rtc_device_register(&pdev->dev, "rtc-msm6242", |
218 | &msm6242_rtc_ops, THIS_MODULE); | 217 | &msm6242_rtc_ops, THIS_MODULE); |
219 | if (IS_ERR(rtc)) { | 218 | if (IS_ERR(rtc)) |
220 | error = PTR_ERR(rtc); | 219 | return PTR_ERR(rtc); |
221 | goto out_unmap; | ||
222 | } | ||
223 | 220 | ||
224 | priv->rtc = rtc; | 221 | priv->rtc = rtc; |
225 | return 0; | 222 | return 0; |
226 | |||
227 | out_unmap: | ||
228 | platform_set_drvdata(pdev, NULL); | ||
229 | return error; | ||
230 | } | ||
231 | |||
232 | static int __exit msm6242_rtc_remove(struct platform_device *pdev) | ||
233 | { | ||
234 | return 0; | ||
235 | } | 223 | } |
236 | 224 | ||
237 | static struct platform_driver msm6242_rtc_driver = { | 225 | static struct platform_driver msm6242_rtc_driver = { |
@@ -239,7 +227,6 @@ static struct platform_driver msm6242_rtc_driver = { | |||
239 | .name = "rtc-msm6242", | 227 | .name = "rtc-msm6242", |
240 | .owner = THIS_MODULE, | 228 | .owner = THIS_MODULE, |
241 | }, | 229 | }, |
242 | .remove = __exit_p(msm6242_rtc_remove), | ||
243 | }; | 230 | }; |
244 | 231 | ||
245 | module_platform_driver_probe(msm6242_rtc_driver, msm6242_rtc_probe); | 232 | module_platform_driver_probe(msm6242_rtc_driver, msm6242_rtc_probe); |
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 9a3895bc4f4d..ab87bacb8f88 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c | |||
@@ -436,22 +436,20 @@ static int mxc_rtc_probe(struct platform_device *pdev) | |||
436 | pdata->irq = -1; | 436 | pdata->irq = -1; |
437 | } | 437 | } |
438 | 438 | ||
439 | if (pdata->irq >=0) | 439 | if (pdata->irq >= 0) |
440 | device_init_wakeup(&pdev->dev, 1); | 440 | device_init_wakeup(&pdev->dev, 1); |
441 | 441 | ||
442 | rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &mxc_rtc_ops, | 442 | rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &mxc_rtc_ops, |
443 | THIS_MODULE); | 443 | THIS_MODULE); |
444 | if (IS_ERR(rtc)) { | 444 | if (IS_ERR(rtc)) { |
445 | ret = PTR_ERR(rtc); | 445 | ret = PTR_ERR(rtc); |
446 | goto exit_clr_drvdata; | 446 | goto exit_put_clk; |
447 | } | 447 | } |
448 | 448 | ||
449 | pdata->rtc = rtc; | 449 | pdata->rtc = rtc; |
450 | 450 | ||
451 | return 0; | 451 | return 0; |
452 | 452 | ||
453 | exit_clr_drvdata: | ||
454 | platform_set_drvdata(pdev, NULL); | ||
455 | exit_put_clk: | 453 | exit_put_clk: |
456 | clk_disable_unprepare(pdata->clk); | 454 | clk_disable_unprepare(pdata->clk); |
457 | 455 | ||
@@ -465,7 +463,6 @@ static int mxc_rtc_remove(struct platform_device *pdev) | |||
465 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); | 463 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); |
466 | 464 | ||
467 | clk_disable_unprepare(pdata->clk); | 465 | clk_disable_unprepare(pdata->clk); |
468 | platform_set_drvdata(pdev, NULL); | ||
469 | 466 | ||
470 | return 0; | 467 | return 0; |
471 | } | 468 | } |
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index d592e2fe43f7..22861c5e0c59 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c | |||
@@ -260,15 +260,7 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev) | |||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
262 | 262 | ||
263 | static int __exit nuc900_rtc_remove(struct platform_device *pdev) | ||
264 | { | ||
265 | platform_set_drvdata(pdev, NULL); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static struct platform_driver nuc900_rtc_driver = { | 263 | static struct platform_driver nuc900_rtc_driver = { |
271 | .remove = __exit_p(nuc900_rtc_remove), | ||
272 | .driver = { | 264 | .driver = { |
273 | .name = "nuc900-rtc", | 265 | .name = "nuc900-rtc", |
274 | .owner = THIS_MODULE, | 266 | .owner = THIS_MODULE, |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index b0ba3fc991ea..c6ffbaec32a4 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -23,9 +23,7 @@ | |||
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/of_device.h> | 24 | #include <linux/of_device.h> |
25 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
26 | 26 | #include <linux/io.h> | |
27 | #include <asm/io.h> | ||
28 | |||
29 | 27 | ||
30 | /* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock | 28 | /* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock |
31 | * with century-range alarm matching, driven by the 32kHz clock. | 29 | * with century-range alarm matching, driven by the 32kHz clock. |
@@ -423,6 +421,8 @@ static int __init omap_rtc_probe(struct platform_device *pdev) | |||
423 | * is write-only, and always reads as zero...) | 421 | * is write-only, and always reads as zero...) |
424 | */ | 422 | */ |
425 | 423 | ||
424 | device_init_wakeup(&pdev->dev, true); | ||
425 | |||
426 | if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT) | 426 | if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT) |
427 | pr_info("%s: split power mode\n", pdev->name); | 427 | pr_info("%s: split power mode\n", pdev->name); |
428 | 428 | ||
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c index 50204d474eb7..a1fecc8d97fc 100644 --- a/drivers/rtc/rtc-palmas.c +++ b/drivers/rtc/rtc-palmas.c | |||
@@ -265,6 +265,7 @@ static int palmas_rtc_probe(struct platform_device *pdev) | |||
265 | 265 | ||
266 | palmas_rtc->irq = platform_get_irq(pdev, 0); | 266 | palmas_rtc->irq = platform_get_irq(pdev, 0); |
267 | 267 | ||
268 | device_init_wakeup(&pdev->dev, 1); | ||
268 | palmas_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | 269 | palmas_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
269 | &palmas_rtc_ops, THIS_MODULE); | 270 | &palmas_rtc_ops, THIS_MODULE); |
270 | if (IS_ERR(palmas_rtc->rtc)) { | 271 | if (IS_ERR(palmas_rtc->rtc)) { |
@@ -283,7 +284,6 @@ static int palmas_rtc_probe(struct platform_device *pdev) | |||
283 | return ret; | 284 | return ret; |
284 | } | 285 | } |
285 | 286 | ||
286 | device_set_wakeup_capable(&pdev->dev, 1); | ||
287 | return 0; | 287 | return 0; |
288 | } | 288 | } |
289 | 289 | ||
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c index 539a90b98bc5..40b5c630bc7d 100644 --- a/drivers/rtc/rtc-pcap.c +++ b/drivers/rtc/rtc-pcap.c | |||
@@ -156,10 +156,8 @@ static int __init pcap_rtc_probe(struct platform_device *pdev) | |||
156 | 156 | ||
157 | pcap_rtc->rtc = devm_rtc_device_register(&pdev->dev, "pcap", | 157 | pcap_rtc->rtc = devm_rtc_device_register(&pdev->dev, "pcap", |
158 | &pcap_rtc_ops, THIS_MODULE); | 158 | &pcap_rtc_ops, THIS_MODULE); |
159 | if (IS_ERR(pcap_rtc->rtc)) { | 159 | if (IS_ERR(pcap_rtc->rtc)) |
160 | err = PTR_ERR(pcap_rtc->rtc); | 160 | return PTR_ERR(pcap_rtc->rtc); |
161 | goto fail; | ||
162 | } | ||
163 | 161 | ||
164 | timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); | 162 | timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); |
165 | alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); | 163 | alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); |
@@ -167,17 +165,14 @@ static int __init pcap_rtc_probe(struct platform_device *pdev) | |||
167 | err = devm_request_irq(&pdev->dev, timer_irq, pcap_rtc_irq, 0, | 165 | err = devm_request_irq(&pdev->dev, timer_irq, pcap_rtc_irq, 0, |
168 | "RTC Timer", pcap_rtc); | 166 | "RTC Timer", pcap_rtc); |
169 | if (err) | 167 | if (err) |
170 | goto fail; | 168 | return err; |
171 | 169 | ||
172 | err = devm_request_irq(&pdev->dev, alarm_irq, pcap_rtc_irq, 0, | 170 | err = devm_request_irq(&pdev->dev, alarm_irq, pcap_rtc_irq, 0, |
173 | "RTC Alarm", pcap_rtc); | 171 | "RTC Alarm", pcap_rtc); |
174 | if (err) | 172 | if (err) |
175 | goto fail; | 173 | return err; |
176 | 174 | ||
177 | return 0; | 175 | return 0; |
178 | fail: | ||
179 | platform_set_drvdata(pdev, NULL); | ||
180 | return err; | ||
181 | } | 176 | } |
182 | 177 | ||
183 | static int __exit pcap_rtc_remove(struct platform_device *pdev) | 178 | static int __exit pcap_rtc_remove(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index 796a6c5067dd..1725b5090e33 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c | |||
@@ -18,11 +18,11 @@ | |||
18 | * should look something like: | 18 | * should look something like: |
19 | * | 19 | * |
20 | * static struct spi_board_info ek_spi_devices[] = { | 20 | * static struct spi_board_info ek_spi_devices[] = { |
21 | * ... | 21 | * ... |
22 | * { | 22 | * { |
23 | * .modalias = "rtc-pcf2123", | 23 | * .modalias = "rtc-pcf2123", |
24 | * .chip_select = 1, | 24 | * .chip_select = 1, |
25 | * .controller_data = (void *)AT91_PIN_PA10, | 25 | * .controller_data = (void *)AT91_PIN_PA10, |
26 | * .max_speed_hz = 1000 * 1000, | 26 | * .max_speed_hz = 1000 * 1000, |
27 | * .mode = SPI_CS_HIGH, | 27 | * .mode = SPI_CS_HIGH, |
28 | * .bus_num = 0, | 28 | * .bus_num = 0, |
@@ -94,8 +94,9 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr, | |||
94 | 94 | ||
95 | r = container_of(attr, struct pcf2123_sysfs_reg, attr); | 95 | r = container_of(attr, struct pcf2123_sysfs_reg, attr); |
96 | 96 | ||
97 | if (strict_strtoul(r->name, 16, ®)) | 97 | ret = kstrtoul(r->name, 16, ®); |
98 | return -EINVAL; | 98 | if (ret) |
99 | return ret; | ||
99 | 100 | ||
100 | txbuf[0] = PCF2123_READ | reg; | 101 | txbuf[0] = PCF2123_READ | reg; |
101 | ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1); | 102 | ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1); |
@@ -117,9 +118,13 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, | |||
117 | 118 | ||
118 | r = container_of(attr, struct pcf2123_sysfs_reg, attr); | 119 | r = container_of(attr, struct pcf2123_sysfs_reg, attr); |
119 | 120 | ||
120 | if (strict_strtoul(r->name, 16, ®) | 121 | ret = kstrtoul(r->name, 16, ®); |
121 | || strict_strtoul(buffer, 10, &val)) | 122 | if (ret) |
122 | return -EINVAL; | 123 | return ret; |
124 | |||
125 | ret = kstrtoul(buffer, 10, &val); | ||
126 | if (ret) | ||
127 | return ret; | ||
123 | 128 | ||
124 | txbuf[0] = PCF2123_WRITE | reg; | 129 | txbuf[0] = PCF2123_WRITE | reg; |
125 | txbuf[1] = val; | 130 | txbuf[1] = val; |
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c new file mode 100644 index 000000000000..205b9f7da1b8 --- /dev/null +++ b/drivers/rtc/rtc-pcf2127.c | |||
@@ -0,0 +1,241 @@ | |||
1 | /* | ||
2 | * An I2C driver for the NXP PCF2127 RTC | ||
3 | * Copyright 2013 Til-Technologies | ||
4 | * | ||
5 | * Author: Renaud Cerrato <r.cerrato@til-technologies.fr> | ||
6 | * | ||
7 | * based on the other drivers in this same directory. | ||
8 | * | ||
9 | * http://www.nxp.com/documents/data_sheet/PCF2127AT.pdf | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/bcd.h> | ||
18 | #include <linux/rtc.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of.h> | ||
22 | |||
23 | #define DRV_VERSION "0.0.1" | ||
24 | |||
25 | #define PCF2127_REG_CTRL1 (0x00) /* Control Register 1 */ | ||
26 | #define PCF2127_REG_CTRL2 (0x01) /* Control Register 2 */ | ||
27 | #define PCF2127_REG_CTRL3 (0x02) /* Control Register 3 */ | ||
28 | #define PCF2127_REG_SC (0x03) /* datetime */ | ||
29 | #define PCF2127_REG_MN (0x04) | ||
30 | #define PCF2127_REG_HR (0x05) | ||
31 | #define PCF2127_REG_DM (0x06) | ||
32 | #define PCF2127_REG_DW (0x07) | ||
33 | #define PCF2127_REG_MO (0x08) | ||
34 | #define PCF2127_REG_YR (0x09) | ||
35 | |||
36 | static struct i2c_driver pcf2127_driver; | ||
37 | |||
38 | struct pcf2127 { | ||
39 | struct rtc_device *rtc; | ||
40 | int voltage_low; /* indicates if a low_voltage was detected */ | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * In the routines that deal directly with the pcf2127 hardware, we use | ||
45 | * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. | ||
46 | */ | ||
47 | static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
48 | { | ||
49 | struct pcf2127 *pcf2127 = i2c_get_clientdata(client); | ||
50 | unsigned char buf[10] = { PCF2127_REG_CTRL1 }; | ||
51 | |||
52 | /* read registers */ | ||
53 | if (i2c_master_send(client, buf, 1) != 1 || | ||
54 | i2c_master_recv(client, buf, sizeof(buf)) != sizeof(buf)) { | ||
55 | dev_err(&client->dev, "%s: read error\n", __func__); | ||
56 | return -EIO; | ||
57 | } | ||
58 | |||
59 | if (buf[PCF2127_REG_CTRL3] & 0x04) { | ||
60 | pcf2127->voltage_low = 1; | ||
61 | dev_info(&client->dev, | ||
62 | "low voltage detected, date/time is not reliable.\n"); | ||
63 | } | ||
64 | |||
65 | dev_dbg(&client->dev, | ||
66 | "%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, " | ||
67 | "sec=%02x, min=%02x, hr=%02x, " | ||
68 | "mday=%02x, wday=%02x, mon=%02x, year=%02x\n", | ||
69 | __func__, | ||
70 | buf[0], buf[1], buf[2], | ||
71 | buf[3], buf[4], buf[5], | ||
72 | buf[6], buf[7], buf[8], buf[9]); | ||
73 | |||
74 | |||
75 | tm->tm_sec = bcd2bin(buf[PCF2127_REG_SC] & 0x7F); | ||
76 | tm->tm_min = bcd2bin(buf[PCF2127_REG_MN] & 0x7F); | ||
77 | tm->tm_hour = bcd2bin(buf[PCF2127_REG_HR] & 0x3F); /* rtc hr 0-23 */ | ||
78 | tm->tm_mday = bcd2bin(buf[PCF2127_REG_DM] & 0x3F); | ||
79 | tm->tm_wday = buf[PCF2127_REG_DW] & 0x07; | ||
80 | tm->tm_mon = bcd2bin(buf[PCF2127_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */ | ||
81 | tm->tm_year = bcd2bin(buf[PCF2127_REG_YR]); | ||
82 | if (tm->tm_year < 70) | ||
83 | tm->tm_year += 100; /* assume we are in 1970...2069 */ | ||
84 | |||
85 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " | ||
86 | "mday=%d, mon=%d, year=%d, wday=%d\n", | ||
87 | __func__, | ||
88 | tm->tm_sec, tm->tm_min, tm->tm_hour, | ||
89 | tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); | ||
90 | |||
91 | /* the clock can give out invalid datetime, but we cannot return | ||
92 | * -EINVAL otherwise hwclock will refuse to set the time on bootup. | ||
93 | */ | ||
94 | if (rtc_valid_tm(tm) < 0) | ||
95 | dev_err(&client->dev, "retrieved date/time is not valid.\n"); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
101 | { | ||
102 | unsigned char buf[8]; | ||
103 | int i = 0, err; | ||
104 | |||
105 | dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " | ||
106 | "mday=%d, mon=%d, year=%d, wday=%d\n", | ||
107 | __func__, | ||
108 | tm->tm_sec, tm->tm_min, tm->tm_hour, | ||
109 | tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); | ||
110 | |||
111 | /* start register address */ | ||
112 | buf[i++] = PCF2127_REG_SC; | ||
113 | |||
114 | /* hours, minutes and seconds */ | ||
115 | buf[i++] = bin2bcd(tm->tm_sec); | ||
116 | buf[i++] = bin2bcd(tm->tm_min); | ||
117 | buf[i++] = bin2bcd(tm->tm_hour); | ||
118 | buf[i++] = bin2bcd(tm->tm_mday); | ||
119 | buf[i++] = tm->tm_wday & 0x07; | ||
120 | |||
121 | /* month, 1 - 12 */ | ||
122 | buf[i++] = bin2bcd(tm->tm_mon + 1); | ||
123 | |||
124 | /* year */ | ||
125 | buf[i++] = bin2bcd(tm->tm_year % 100); | ||
126 | |||
127 | /* write register's data */ | ||
128 | err = i2c_master_send(client, buf, i); | ||
129 | if (err != i) { | ||
130 | dev_err(&client->dev, | ||
131 | "%s: err=%d", __func__, err); | ||
132 | return -EIO; | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | #ifdef CONFIG_RTC_INTF_DEV | ||
139 | static int pcf2127_rtc_ioctl(struct device *dev, | ||
140 | unsigned int cmd, unsigned long arg) | ||
141 | { | ||
142 | struct pcf2127 *pcf2127 = i2c_get_clientdata(to_i2c_client(dev)); | ||
143 | |||
144 | switch (cmd) { | ||
145 | case RTC_VL_READ: | ||
146 | if (pcf2127->voltage_low) | ||
147 | dev_info(dev, "low voltage detected, date/time is not reliable.\n"); | ||
148 | |||
149 | if (copy_to_user((void __user *)arg, &pcf2127->voltage_low, | ||
150 | sizeof(int))) | ||
151 | return -EFAULT; | ||
152 | return 0; | ||
153 | default: | ||
154 | return -ENOIOCTLCMD; | ||
155 | } | ||
156 | } | ||
157 | #else | ||
158 | #define pcf2127_rtc_ioctl NULL | ||
159 | #endif | ||
160 | |||
161 | static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
162 | { | ||
163 | return pcf2127_get_datetime(to_i2c_client(dev), tm); | ||
164 | } | ||
165 | |||
166 | static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
167 | { | ||
168 | return pcf2127_set_datetime(to_i2c_client(dev), tm); | ||
169 | } | ||
170 | |||
171 | static const struct rtc_class_ops pcf2127_rtc_ops = { | ||
172 | .ioctl = pcf2127_rtc_ioctl, | ||
173 | .read_time = pcf2127_rtc_read_time, | ||
174 | .set_time = pcf2127_rtc_set_time, | ||
175 | }; | ||
176 | |||
177 | static int pcf2127_probe(struct i2c_client *client, | ||
178 | const struct i2c_device_id *id) | ||
179 | { | ||
180 | struct pcf2127 *pcf2127; | ||
181 | |||
182 | dev_dbg(&client->dev, "%s\n", __func__); | ||
183 | |||
184 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) | ||
185 | return -ENODEV; | ||
186 | |||
187 | pcf2127 = devm_kzalloc(&client->dev, sizeof(struct pcf2127), | ||
188 | GFP_KERNEL); | ||
189 | if (!pcf2127) | ||
190 | return -ENOMEM; | ||
191 | |||
192 | dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); | ||
193 | |||
194 | i2c_set_clientdata(client, pcf2127); | ||
195 | |||
196 | pcf2127->rtc = devm_rtc_device_register(&client->dev, | ||
197 | pcf2127_driver.driver.name, | ||
198 | &pcf2127_rtc_ops, THIS_MODULE); | ||
199 | |||
200 | if (IS_ERR(pcf2127->rtc)) | ||
201 | return PTR_ERR(pcf2127->rtc); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int pcf2127_remove(struct i2c_client *client) | ||
207 | { | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static const struct i2c_device_id pcf2127_id[] = { | ||
212 | { "pcf2127", 0 }, | ||
213 | { } | ||
214 | }; | ||
215 | MODULE_DEVICE_TABLE(i2c, pcf2127_id); | ||
216 | |||
217 | #ifdef CONFIG_OF | ||
218 | static const struct of_device_id pcf2127_of_match[] = { | ||
219 | { .compatible = "nxp,pcf2127" }, | ||
220 | {} | ||
221 | }; | ||
222 | MODULE_DEVICE_TABLE(of, pcf2127_of_match); | ||
223 | #endif | ||
224 | |||
225 | static struct i2c_driver pcf2127_driver = { | ||
226 | .driver = { | ||
227 | .name = "rtc-pcf2127", | ||
228 | .owner = THIS_MODULE, | ||
229 | .of_match_table = of_match_ptr(pcf2127_of_match), | ||
230 | }, | ||
231 | .probe = pcf2127_probe, | ||
232 | .remove = pcf2127_remove, | ||
233 | .id_table = pcf2127_id, | ||
234 | }; | ||
235 | |||
236 | module_i2c_driver(pcf2127_driver); | ||
237 | |||
238 | MODULE_AUTHOR("Renaud Cerrato <r.cerrato@til-technologies.fr>"); | ||
239 | MODULE_DESCRIPTION("NXP PCF2127 RTC driver"); | ||
240 | MODULE_LICENSE("GPL"); | ||
241 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 305c9515e5bb..5c8f8226c848 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c | |||
@@ -317,11 +317,6 @@ static int pcf8523_probe(struct i2c_client *client, | |||
317 | return 0; | 317 | return 0; |
318 | } | 318 | } |
319 | 319 | ||
320 | static int pcf8523_remove(struct i2c_client *client) | ||
321 | { | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static const struct i2c_device_id pcf8523_id[] = { | 320 | static const struct i2c_device_id pcf8523_id[] = { |
326 | { "pcf8523", 0 }, | 321 | { "pcf8523", 0 }, |
327 | { } | 322 | { } |
@@ -343,7 +338,6 @@ static struct i2c_driver pcf8523_driver = { | |||
343 | .of_match_table = of_match_ptr(pcf8523_of_match), | 338 | .of_match_table = of_match_ptr(pcf8523_of_match), |
344 | }, | 339 | }, |
345 | .probe = pcf8523_probe, | 340 | .probe = pcf8523_probe, |
346 | .remove = pcf8523_remove, | ||
347 | .id_table = pcf8523_id, | 341 | .id_table = pcf8523_id, |
348 | }; | 342 | }; |
349 | module_i2c_driver(pcf8523_driver); | 343 | module_i2c_driver(pcf8523_driver); |
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index 97b354a26a44..710c3a5aa6ff 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
23 | #include <linux/err.h> | ||
23 | 24 | ||
24 | #define DRV_VERSION "0.4.3" | 25 | #define DRV_VERSION "0.4.3" |
25 | 26 | ||
@@ -263,15 +264,7 @@ static int pcf8563_probe(struct i2c_client *client, | |||
263 | pcf8563_driver.driver.name, | 264 | pcf8563_driver.driver.name, |
264 | &pcf8563_rtc_ops, THIS_MODULE); | 265 | &pcf8563_rtc_ops, THIS_MODULE); |
265 | 266 | ||
266 | if (IS_ERR(pcf8563->rtc)) | 267 | return PTR_RET(pcf8563->rtc); |
267 | return PTR_ERR(pcf8563->rtc); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static int pcf8563_remove(struct i2c_client *client) | ||
273 | { | ||
274 | return 0; | ||
275 | } | 268 | } |
276 | 269 | ||
277 | static const struct i2c_device_id pcf8563_id[] = { | 270 | static const struct i2c_device_id pcf8563_id[] = { |
@@ -296,7 +289,6 @@ static struct i2c_driver pcf8563_driver = { | |||
296 | .of_match_table = of_match_ptr(pcf8563_of_match), | 289 | .of_match_table = of_match_ptr(pcf8563_of_match), |
297 | }, | 290 | }, |
298 | .probe = pcf8563_probe, | 291 | .probe = pcf8563_probe, |
299 | .remove = pcf8563_remove, | ||
300 | .id_table = pcf8563_id, | 292 | .id_table = pcf8563_id, |
301 | }; | 293 | }; |
302 | 294 | ||
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c index 95886dcf4a39..843a745c42f3 100644 --- a/drivers/rtc/rtc-pcf8583.c +++ b/drivers/rtc/rtc-pcf8583.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/rtc.h> | 18 | #include <linux/rtc.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/err.h> | ||
20 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
21 | #include <linux/bcd.h> | 22 | #include <linux/bcd.h> |
22 | 23 | ||
@@ -188,7 +189,8 @@ static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
188 | dev_warn(dev, "resetting control %02x -> %02x\n", | 189 | dev_warn(dev, "resetting control %02x -> %02x\n", |
189 | ctrl, new_ctrl); | 190 | ctrl, new_ctrl); |
190 | 191 | ||
191 | if ((err = pcf8583_set_ctrl(client, &new_ctrl)) < 0) | 192 | err = pcf8583_set_ctrl(client, &new_ctrl); |
193 | if (err < 0) | ||
192 | return err; | 194 | return err; |
193 | } | 195 | } |
194 | 196 | ||
@@ -283,15 +285,7 @@ static int pcf8583_probe(struct i2c_client *client, | |||
283 | pcf8583_driver.driver.name, | 285 | pcf8583_driver.driver.name, |
284 | &pcf8583_rtc_ops, THIS_MODULE); | 286 | &pcf8583_rtc_ops, THIS_MODULE); |
285 | 287 | ||
286 | if (IS_ERR(pcf8583->rtc)) | 288 | return PTR_RET(pcf8583->rtc); |
287 | return PTR_ERR(pcf8583->rtc); | ||
288 | |||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int pcf8583_remove(struct i2c_client *client) | ||
293 | { | ||
294 | return 0; | ||
295 | } | 289 | } |
296 | 290 | ||
297 | static const struct i2c_device_id pcf8583_id[] = { | 291 | static const struct i2c_device_id pcf8583_id[] = { |
@@ -306,7 +300,6 @@ static struct i2c_driver pcf8583_driver = { | |||
306 | .owner = THIS_MODULE, | 300 | .owner = THIS_MODULE, |
307 | }, | 301 | }, |
308 | .probe = pcf8583_probe, | 302 | .probe = pcf8583_probe, |
309 | .remove = pcf8583_remove, | ||
310 | .id_table = pcf8583_id, | 303 | .id_table = pcf8583_id, |
311 | }; | 304 | }; |
312 | 305 | ||
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index f1a6557261f3..03f8f75d5af2 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c | |||
@@ -395,7 +395,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
395 | if (pdata != NULL) | 395 | if (pdata != NULL) |
396 | rtc_write_enable = pdata->rtc_write_enable; | 396 | rtc_write_enable = pdata->rtc_write_enable; |
397 | 397 | ||
398 | rtc_dd = kzalloc(sizeof(*rtc_dd), GFP_KERNEL); | 398 | rtc_dd = devm_kzalloc(&pdev->dev, sizeof(*rtc_dd), GFP_KERNEL); |
399 | if (rtc_dd == NULL) { | 399 | if (rtc_dd == NULL) { |
400 | dev_err(&pdev->dev, "Unable to allocate memory!\n"); | 400 | dev_err(&pdev->dev, "Unable to allocate memory!\n"); |
401 | return -ENOMEM; | 401 | return -ENOMEM; |
@@ -407,16 +407,14 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
407 | rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0); | 407 | rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0); |
408 | if (rtc_dd->rtc_alarm_irq < 0) { | 408 | if (rtc_dd->rtc_alarm_irq < 0) { |
409 | dev_err(&pdev->dev, "Alarm IRQ resource absent!\n"); | 409 | dev_err(&pdev->dev, "Alarm IRQ resource absent!\n"); |
410 | rc = -ENXIO; | 410 | return -ENXIO; |
411 | goto fail_rtc_enable; | ||
412 | } | 411 | } |
413 | 412 | ||
414 | rtc_resource = platform_get_resource_byname(pdev, IORESOURCE_IO, | 413 | rtc_resource = platform_get_resource_byname(pdev, IORESOURCE_IO, |
415 | "pmic_rtc_base"); | 414 | "pmic_rtc_base"); |
416 | if (!(rtc_resource && rtc_resource->start)) { | 415 | if (!(rtc_resource && rtc_resource->start)) { |
417 | dev_err(&pdev->dev, "RTC IO resource absent!\n"); | 416 | dev_err(&pdev->dev, "RTC IO resource absent!\n"); |
418 | rc = -ENXIO; | 417 | return -ENXIO; |
419 | goto fail_rtc_enable; | ||
420 | } | 418 | } |
421 | 419 | ||
422 | rtc_dd->rtc_base = rtc_resource->start; | 420 | rtc_dd->rtc_base = rtc_resource->start; |
@@ -432,7 +430,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
432 | rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1); | 430 | rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1); |
433 | if (rc < 0) { | 431 | if (rc < 0) { |
434 | dev_err(&pdev->dev, "RTC control register read failed!\n"); | 432 | dev_err(&pdev->dev, "RTC control register read failed!\n"); |
435 | goto fail_rtc_enable; | 433 | return rc; |
436 | } | 434 | } |
437 | 435 | ||
438 | if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) { | 436 | if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) { |
@@ -442,7 +440,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
442 | if (rc < 0) { | 440 | if (rc < 0) { |
443 | dev_err(&pdev->dev, "Write to RTC control register " | 441 | dev_err(&pdev->dev, "Write to RTC control register " |
444 | "failed\n"); | 442 | "failed\n"); |
445 | goto fail_rtc_enable; | 443 | return rc; |
446 | } | 444 | } |
447 | } | 445 | } |
448 | 446 | ||
@@ -453,13 +451,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
453 | platform_set_drvdata(pdev, rtc_dd); | 451 | platform_set_drvdata(pdev, rtc_dd); |
454 | 452 | ||
455 | /* Register the RTC device */ | 453 | /* Register the RTC device */ |
456 | rtc_dd->rtc = rtc_device_register("pm8xxx_rtc", &pdev->dev, | 454 | rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc", |
457 | &pm8xxx_rtc_ops, THIS_MODULE); | 455 | &pm8xxx_rtc_ops, THIS_MODULE); |
458 | if (IS_ERR(rtc_dd->rtc)) { | 456 | if (IS_ERR(rtc_dd->rtc)) { |
459 | dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n", | 457 | dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n", |
460 | __func__, PTR_ERR(rtc_dd->rtc)); | 458 | __func__, PTR_ERR(rtc_dd->rtc)); |
461 | rc = PTR_ERR(rtc_dd->rtc); | 459 | return PTR_ERR(rtc_dd->rtc); |
462 | goto fail_rtc_enable; | ||
463 | } | 460 | } |
464 | 461 | ||
465 | /* Request the alarm IRQ */ | 462 | /* Request the alarm IRQ */ |
@@ -468,7 +465,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
468 | "pm8xxx_rtc_alarm", rtc_dd); | 465 | "pm8xxx_rtc_alarm", rtc_dd); |
469 | if (rc < 0) { | 466 | if (rc < 0) { |
470 | dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc); | 467 | dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc); |
471 | goto fail_req_irq; | 468 | return rc; |
472 | } | 469 | } |
473 | 470 | ||
474 | device_init_wakeup(&pdev->dev, 1); | 471 | device_init_wakeup(&pdev->dev, 1); |
@@ -476,13 +473,6 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
476 | dev_dbg(&pdev->dev, "Probe success !!\n"); | 473 | dev_dbg(&pdev->dev, "Probe success !!\n"); |
477 | 474 | ||
478 | return 0; | 475 | return 0; |
479 | |||
480 | fail_req_irq: | ||
481 | rtc_device_unregister(rtc_dd->rtc); | ||
482 | fail_rtc_enable: | ||
483 | platform_set_drvdata(pdev, NULL); | ||
484 | kfree(rtc_dd); | ||
485 | return rc; | ||
486 | } | 476 | } |
487 | 477 | ||
488 | static int pm8xxx_rtc_remove(struct platform_device *pdev) | 478 | static int pm8xxx_rtc_remove(struct platform_device *pdev) |
@@ -491,9 +481,6 @@ static int pm8xxx_rtc_remove(struct platform_device *pdev) | |||
491 | 481 | ||
492 | device_init_wakeup(&pdev->dev, 0); | 482 | device_init_wakeup(&pdev->dev, 0); |
493 | free_irq(rtc_dd->rtc_alarm_irq, rtc_dd); | 483 | free_irq(rtc_dd->rtc_alarm_irq, rtc_dd); |
494 | rtc_device_unregister(rtc_dd->rtc); | ||
495 | platform_set_drvdata(pdev, NULL); | ||
496 | kfree(rtc_dd); | ||
497 | 484 | ||
498 | return 0; | 485 | return 0; |
499 | } | 486 | } |
diff --git a/drivers/rtc/rtc-ps3.c b/drivers/rtc/rtc-ps3.c index 4bb825bb5804..554ada5e9b76 100644 --- a/drivers/rtc/rtc-ps3.c +++ b/drivers/rtc/rtc-ps3.c | |||
@@ -71,17 +71,11 @@ static int __init ps3_rtc_probe(struct platform_device *dev) | |||
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | static int __exit ps3_rtc_remove(struct platform_device *dev) | ||
75 | { | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static struct platform_driver ps3_rtc_driver = { | 74 | static struct platform_driver ps3_rtc_driver = { |
80 | .driver = { | 75 | .driver = { |
81 | .name = "rtc-ps3", | 76 | .name = "rtc-ps3", |
82 | .owner = THIS_MODULE, | 77 | .owner = THIS_MODULE, |
83 | }, | 78 | }, |
84 | .remove = __exit_p(ps3_rtc_remove), | ||
85 | }; | 79 | }; |
86 | 80 | ||
87 | module_platform_driver_probe(ps3_rtc_driver, ps3_rtc_probe); | 81 | module_platform_driver_probe(ps3_rtc_driver, ps3_rtc_probe); |
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c index 72f437170d2e..402732cfb32a 100644 --- a/drivers/rtc/rtc-puv3.c +++ b/drivers/rtc/rtc-puv3.c | |||
@@ -224,7 +224,6 @@ static int puv3_rtc_remove(struct platform_device *dev) | |||
224 | { | 224 | { |
225 | struct rtc_device *rtc = platform_get_drvdata(dev); | 225 | struct rtc_device *rtc = platform_get_drvdata(dev); |
226 | 226 | ||
227 | platform_set_drvdata(dev, NULL); | ||
228 | rtc_device_unregister(rtc); | 227 | rtc_device_unregister(rtc); |
229 | 228 | ||
230 | puv3_rtc_setpie(&dev->dev, 0); | 229 | puv3_rtc_setpie(&dev->dev, 0); |
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index ed037ae91c5f..a355f2b82bb8 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c | |||
@@ -324,37 +324,35 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) | |||
324 | int ret; | 324 | int ret; |
325 | u32 rttr; | 325 | u32 rttr; |
326 | 326 | ||
327 | pxa_rtc = kzalloc(sizeof(struct pxa_rtc), GFP_KERNEL); | 327 | pxa_rtc = devm_kzalloc(dev, sizeof(*pxa_rtc), GFP_KERNEL); |
328 | if (!pxa_rtc) | 328 | if (!pxa_rtc) |
329 | return -ENOMEM; | 329 | return -ENOMEM; |
330 | 330 | ||
331 | spin_lock_init(&pxa_rtc->lock); | 331 | spin_lock_init(&pxa_rtc->lock); |
332 | platform_set_drvdata(pdev, pxa_rtc); | 332 | platform_set_drvdata(pdev, pxa_rtc); |
333 | 333 | ||
334 | ret = -ENXIO; | ||
335 | pxa_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 334 | pxa_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
336 | if (!pxa_rtc->ress) { | 335 | if (!pxa_rtc->ress) { |
337 | dev_err(dev, "No I/O memory resource defined\n"); | 336 | dev_err(dev, "No I/O memory resource defined\n"); |
338 | goto err_ress; | 337 | return -ENXIO; |
339 | } | 338 | } |
340 | 339 | ||
341 | pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0); | 340 | pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0); |
342 | if (pxa_rtc->irq_1Hz < 0) { | 341 | if (pxa_rtc->irq_1Hz < 0) { |
343 | dev_err(dev, "No 1Hz IRQ resource defined\n"); | 342 | dev_err(dev, "No 1Hz IRQ resource defined\n"); |
344 | goto err_ress; | 343 | return -ENXIO; |
345 | } | 344 | } |
346 | pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1); | 345 | pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1); |
347 | if (pxa_rtc->irq_Alrm < 0) { | 346 | if (pxa_rtc->irq_Alrm < 0) { |
348 | dev_err(dev, "No alarm IRQ resource defined\n"); | 347 | dev_err(dev, "No alarm IRQ resource defined\n"); |
349 | goto err_ress; | 348 | return -ENXIO; |
350 | } | 349 | } |
351 | pxa_rtc_open(dev); | 350 | pxa_rtc_open(dev); |
352 | ret = -ENOMEM; | 351 | pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start, |
353 | pxa_rtc->base = ioremap(pxa_rtc->ress->start, | ||
354 | resource_size(pxa_rtc->ress)); | 352 | resource_size(pxa_rtc->ress)); |
355 | if (!pxa_rtc->base) { | 353 | if (!pxa_rtc->base) { |
356 | dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n"); | 354 | dev_err(dev, "Unable to map pxa RTC I/O memory\n"); |
357 | goto err_map; | 355 | return -ENOMEM; |
358 | } | 356 | } |
359 | 357 | ||
360 | /* | 358 | /* |
@@ -370,41 +368,24 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) | |||
370 | 368 | ||
371 | rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); | 369 | rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); |
372 | 370 | ||
373 | pxa_rtc->rtc = rtc_device_register("pxa-rtc", &pdev->dev, &pxa_rtc_ops, | 371 | pxa_rtc->rtc = devm_rtc_device_register(&pdev->dev, "pxa-rtc", |
374 | THIS_MODULE); | 372 | &pxa_rtc_ops, THIS_MODULE); |
375 | ret = PTR_ERR(pxa_rtc->rtc); | ||
376 | if (IS_ERR(pxa_rtc->rtc)) { | 373 | if (IS_ERR(pxa_rtc->rtc)) { |
374 | ret = PTR_ERR(pxa_rtc->rtc); | ||
377 | dev_err(dev, "Failed to register RTC device -> %d\n", ret); | 375 | dev_err(dev, "Failed to register RTC device -> %d\n", ret); |
378 | goto err_rtc_reg; | 376 | return ret; |
379 | } | 377 | } |
380 | 378 | ||
381 | device_init_wakeup(dev, 1); | 379 | device_init_wakeup(dev, 1); |
382 | 380 | ||
383 | return 0; | 381 | return 0; |
384 | |||
385 | err_rtc_reg: | ||
386 | iounmap(pxa_rtc->base); | ||
387 | err_ress: | ||
388 | err_map: | ||
389 | kfree(pxa_rtc); | ||
390 | return ret; | ||
391 | } | 382 | } |
392 | 383 | ||
393 | static int __exit pxa_rtc_remove(struct platform_device *pdev) | 384 | static int __exit pxa_rtc_remove(struct platform_device *pdev) |
394 | { | 385 | { |
395 | struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); | ||
396 | |||
397 | struct device *dev = &pdev->dev; | 386 | struct device *dev = &pdev->dev; |
398 | pxa_rtc_release(dev); | ||
399 | |||
400 | rtc_device_unregister(pxa_rtc->rtc); | ||
401 | |||
402 | spin_lock_irq(&pxa_rtc->lock); | ||
403 | iounmap(pxa_rtc->base); | ||
404 | spin_unlock_irq(&pxa_rtc->lock); | ||
405 | |||
406 | kfree(pxa_rtc); | ||
407 | 387 | ||
388 | pxa_rtc_release(dev); | ||
408 | return 0; | 389 | return 0; |
409 | } | 390 | } |
410 | 391 | ||
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c index 8eabcf51b35a..e53e9b1c69b3 100644 --- a/drivers/rtc/rtc-rc5t583.c +++ b/drivers/rtc/rtc-rc5t583.c | |||
@@ -273,7 +273,7 @@ static int rc5t583_rtc_probe(struct platform_device *pdev) | |||
273 | */ | 273 | */ |
274 | static int rc5t583_rtc_remove(struct platform_device *pdev) | 274 | static int rc5t583_rtc_remove(struct platform_device *pdev) |
275 | { | 275 | { |
276 | struct rc5t583_rtc *rc5t583_rtc = dev_get_drvdata(&pdev->dev); | 276 | struct rc5t583_rtc *rc5t583_rtc = platform_get_drvdata(pdev); |
277 | 277 | ||
278 | rc5t583_rtc_alarm_irq_enable(&rc5t583_rtc->rtc->dev, 0); | 278 | rc5t583_rtc_alarm_irq_enable(&rc5t583_rtc->rtc->dev, 0); |
279 | return 0; | 279 | return 0; |
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 873c689f01c3..89d073679267 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c | |||
@@ -251,21 +251,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) | |||
251 | 251 | ||
252 | rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops, | 252 | rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops, |
253 | THIS_MODULE); | 253 | THIS_MODULE); |
254 | if (IS_ERR(rtc)) { | 254 | if (IS_ERR(rtc)) |
255 | error = PTR_ERR(rtc); | 255 | return PTR_ERR(rtc); |
256 | goto out; | ||
257 | } | ||
258 | priv->rtc = rtc; | 256 | priv->rtc = rtc; |
259 | 257 | ||
260 | error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); | 258 | error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); |
261 | if (error) | 259 | if (error) |
262 | goto out; | 260 | return error; |
263 | 261 | ||
264 | return 0; | 262 | return 0; |
265 | |||
266 | out: | ||
267 | platform_set_drvdata(dev, NULL); | ||
268 | return error; | ||
269 | } | 263 | } |
270 | 264 | ||
271 | static int __exit rp5c01_rtc_remove(struct platform_device *dev) | 265 | static int __exit rp5c01_rtc_remove(struct platform_device *dev) |
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c index 8089fc63e403..68f7856422f1 100644 --- a/drivers/rtc/rtc-rs5c313.c +++ b/drivers/rtc/rtc-rs5c313.c | |||
@@ -47,10 +47,10 @@ | |||
47 | #include <linux/platform_device.h> | 47 | #include <linux/platform_device.h> |
48 | #include <linux/bcd.h> | 48 | #include <linux/bcd.h> |
49 | #include <linux/delay.h> | 49 | #include <linux/delay.h> |
50 | #include <asm/io.h> | 50 | #include <linux/io.h> |
51 | 51 | ||
52 | #define DRV_NAME "rs5c313" | 52 | #define DRV_NAME "rs5c313" |
53 | #define DRV_VERSION "1.13" | 53 | #define DRV_VERSION "1.13" |
54 | 54 | ||
55 | #ifdef CONFIG_SH_LANDISK | 55 | #ifdef CONFIG_SH_LANDISK |
56 | /*****************************************************/ | 56 | /*****************************************************/ |
@@ -301,7 +301,7 @@ static int rs5c313_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
301 | rs5c313_write_reg(RS5C313_ADDR_SEC10, (data >> 4)); | 301 | rs5c313_write_reg(RS5C313_ADDR_SEC10, (data >> 4)); |
302 | 302 | ||
303 | data = bin2bcd(tm->tm_min); | 303 | data = bin2bcd(tm->tm_min); |
304 | rs5c313_write_reg(RS5C313_ADDR_MIN, data ); | 304 | rs5c313_write_reg(RS5C313_ADDR_MIN, data); |
305 | rs5c313_write_reg(RS5C313_ADDR_MIN10, (data >> 4)); | 305 | rs5c313_write_reg(RS5C313_ADDR_MIN10, (data >> 4)); |
306 | 306 | ||
307 | data = bin2bcd(tm->tm_hour); | 307 | data = bin2bcd(tm->tm_hour); |
@@ -310,7 +310,7 @@ static int rs5c313_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
310 | 310 | ||
311 | data = bin2bcd(tm->tm_mday); | 311 | data = bin2bcd(tm->tm_mday); |
312 | rs5c313_write_reg(RS5C313_ADDR_DAY, data); | 312 | rs5c313_write_reg(RS5C313_ADDR_DAY, data); |
313 | rs5c313_write_reg(RS5C313_ADDR_DAY10, (data>> 4)); | 313 | rs5c313_write_reg(RS5C313_ADDR_DAY10, (data >> 4)); |
314 | 314 | ||
315 | data = bin2bcd(tm->tm_mon + 1); | 315 | data = bin2bcd(tm->tm_mon + 1); |
316 | rs5c313_write_reg(RS5C313_ADDR_MON, data); | 316 | rs5c313_write_reg(RS5C313_ADDR_MON, data); |
@@ -349,9 +349,9 @@ static void rs5c313_check_xstp_bit(void) | |||
349 | } | 349 | } |
350 | 350 | ||
351 | memset(&tm, 0, sizeof(struct rtc_time)); | 351 | memset(&tm, 0, sizeof(struct rtc_time)); |
352 | tm.tm_mday = 1; | 352 | tm.tm_mday = 1; |
353 | tm.tm_mon = 1 - 1; | 353 | tm.tm_mon = 1 - 1; |
354 | tm.tm_year = 2000 - 1900; | 354 | tm.tm_year = 2000 - 1900; |
355 | 355 | ||
356 | rs5c313_rtc_set_time(NULL, &tm); | 356 | rs5c313_rtc_set_time(NULL, &tm); |
357 | pr_err("invalid value, resetting to 1 Jan 2000\n"); | 357 | pr_err("invalid value, resetting to 1 Jan 2000\n"); |
@@ -378,18 +378,12 @@ static int rs5c313_rtc_probe(struct platform_device *pdev) | |||
378 | return 0; | 378 | return 0; |
379 | } | 379 | } |
380 | 380 | ||
381 | static int rs5c313_rtc_remove(struct platform_device *pdev) | ||
382 | { | ||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static struct platform_driver rs5c313_rtc_platform_driver = { | 381 | static struct platform_driver rs5c313_rtc_platform_driver = { |
387 | .driver = { | 382 | .driver = { |
388 | .name = DRV_NAME, | 383 | .name = DRV_NAME, |
389 | .owner = THIS_MODULE, | 384 | .owner = THIS_MODULE, |
390 | }, | 385 | }, |
391 | .probe = rs5c313_rtc_probe, | 386 | .probe = rs5c313_rtc_probe, |
392 | .remove = rs5c313_rtc_remove, | ||
393 | }; | 387 | }; |
394 | 388 | ||
395 | static int __init rs5c313_rtc_init(void) | 389 | static int __init rs5c313_rtc_init(void) |
@@ -408,7 +402,7 @@ static int __init rs5c313_rtc_init(void) | |||
408 | 402 | ||
409 | static void __exit rs5c313_rtc_exit(void) | 403 | static void __exit rs5c313_rtc_exit(void) |
410 | { | 404 | { |
411 | platform_driver_unregister( &rs5c313_rtc_platform_driver ); | 405 | platform_driver_unregister(&rs5c313_rtc_platform_driver); |
412 | } | 406 | } |
413 | 407 | ||
414 | module_init(rs5c313_rtc_init); | 408 | module_init(rs5c313_rtc_init); |
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c index 2c37df3586c7..f7a90a116a39 100644 --- a/drivers/rtc/rtc-rs5c348.c +++ b/drivers/rtc/rtc-rs5c348.c | |||
@@ -218,18 +218,12 @@ static int rs5c348_probe(struct spi_device *spi) | |||
218 | return ret; | 218 | return ret; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int rs5c348_remove(struct spi_device *spi) | ||
222 | { | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static struct spi_driver rs5c348_driver = { | 221 | static struct spi_driver rs5c348_driver = { |
227 | .driver = { | 222 | .driver = { |
228 | .name = "rtc-rs5c348", | 223 | .name = "rtc-rs5c348", |
229 | .owner = THIS_MODULE, | 224 | .owner = THIS_MODULE, |
230 | }, | 225 | }, |
231 | .probe = rs5c348_probe, | 226 | .probe = rs5c348_probe, |
232 | .remove = rs5c348_remove, | ||
233 | }; | 227 | }; |
234 | 228 | ||
235 | module_spi_driver(rs5c348_driver); | 229 | module_spi_driver(rs5c348_driver); |
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index 5032c24ec159..1a779a67ff66 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c | |||
@@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client, | |||
310 | dev_dbg(&client->dev, "alarm IRQ armed\n"); | 310 | dev_dbg(&client->dev, "alarm IRQ armed\n"); |
311 | } else { | 311 | } else { |
312 | /* disable AIE irq */ | 312 | /* disable AIE irq */ |
313 | ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1); | 313 | ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0); |
314 | if (ret) | 314 | if (ret) |
315 | return ret; | 315 | return ret; |
316 | 316 | ||
@@ -412,17 +412,11 @@ static int rv3029c2_probe(struct i2c_client *client, | |||
412 | return 0; | 412 | return 0; |
413 | } | 413 | } |
414 | 414 | ||
415 | static int rv3029c2_remove(struct i2c_client *client) | ||
416 | { | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static struct i2c_driver rv3029c2_driver = { | 415 | static struct i2c_driver rv3029c2_driver = { |
421 | .driver = { | 416 | .driver = { |
422 | .name = "rtc-rv3029c2", | 417 | .name = "rtc-rv3029c2", |
423 | }, | 418 | }, |
424 | .probe = rv3029c2_probe, | 419 | .probe = rv3029c2_probe, |
425 | .remove = rv3029c2_remove, | ||
426 | .id_table = rv3029c2_id, | 420 | .id_table = rv3029c2_id, |
427 | }; | 421 | }; |
428 | 422 | ||
diff --git a/drivers/rtc/rtc-rx4581.c b/drivers/rtc/rtc-rx4581.c index 84eb08d65d30..6889222f9ed6 100644 --- a/drivers/rtc/rtc-rx4581.c +++ b/drivers/rtc/rtc-rx4581.c | |||
@@ -282,11 +282,6 @@ static int rx4581_probe(struct spi_device *spi) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | static int rx4581_remove(struct spi_device *spi) | ||
286 | { | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static const struct spi_device_id rx4581_id[] = { | 285 | static const struct spi_device_id rx4581_id[] = { |
291 | { "rx4581", 0 }, | 286 | { "rx4581", 0 }, |
292 | { } | 287 | { } |
@@ -299,7 +294,6 @@ static struct spi_driver rx4581_driver = { | |||
299 | .owner = THIS_MODULE, | 294 | .owner = THIS_MODULE, |
300 | }, | 295 | }, |
301 | .probe = rx4581_probe, | 296 | .probe = rx4581_probe, |
302 | .remove = rx4581_remove, | ||
303 | .id_table = rx4581_id, | 297 | .id_table = rx4581_id, |
304 | }; | 298 | }; |
305 | 299 | ||
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index 0722d36b9c9a..8fa23eabcb68 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c | |||
@@ -549,7 +549,7 @@ static int rx8025_probe(struct i2c_client *client, | |||
549 | goto errout; | 549 | goto errout; |
550 | } | 550 | } |
551 | 551 | ||
552 | rx8025 = kzalloc(sizeof(*rx8025), GFP_KERNEL); | 552 | rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL); |
553 | if (!rx8025) { | 553 | if (!rx8025) { |
554 | dev_err(&adapter->dev, "failed to alloc memory\n"); | 554 | dev_err(&adapter->dev, "failed to alloc memory\n"); |
555 | err = -ENOMEM; | 555 | err = -ENOMEM; |
@@ -562,7 +562,7 @@ static int rx8025_probe(struct i2c_client *client, | |||
562 | 562 | ||
563 | err = rx8025_init_client(client, &need_reset); | 563 | err = rx8025_init_client(client, &need_reset); |
564 | if (err) | 564 | if (err) |
565 | goto errout_free; | 565 | goto errout; |
566 | 566 | ||
567 | if (need_reset) { | 567 | if (need_reset) { |
568 | struct rtc_time tm; | 568 | struct rtc_time tm; |
@@ -572,12 +572,12 @@ static int rx8025_probe(struct i2c_client *client, | |||
572 | rx8025_set_time(&client->dev, &tm); | 572 | rx8025_set_time(&client->dev, &tm); |
573 | } | 573 | } |
574 | 574 | ||
575 | rx8025->rtc = rtc_device_register(client->name, &client->dev, | 575 | rx8025->rtc = devm_rtc_device_register(&client->dev, client->name, |
576 | &rx8025_rtc_ops, THIS_MODULE); | 576 | &rx8025_rtc_ops, THIS_MODULE); |
577 | if (IS_ERR(rx8025->rtc)) { | 577 | if (IS_ERR(rx8025->rtc)) { |
578 | err = PTR_ERR(rx8025->rtc); | 578 | err = PTR_ERR(rx8025->rtc); |
579 | dev_err(&client->dev, "unable to register the class device\n"); | 579 | dev_err(&client->dev, "unable to register the class device\n"); |
580 | goto errout_free; | 580 | goto errout; |
581 | } | 581 | } |
582 | 582 | ||
583 | if (client->irq > 0) { | 583 | if (client->irq > 0) { |
@@ -586,7 +586,7 @@ static int rx8025_probe(struct i2c_client *client, | |||
586 | 0, "rx8025", client); | 586 | 0, "rx8025", client); |
587 | if (err) { | 587 | if (err) { |
588 | dev_err(&client->dev, "unable to request IRQ\n"); | 588 | dev_err(&client->dev, "unable to request IRQ\n"); |
589 | goto errout_reg; | 589 | goto errout; |
590 | } | 590 | } |
591 | } | 591 | } |
592 | 592 | ||
@@ -603,12 +603,6 @@ errout_irq: | |||
603 | if (client->irq > 0) | 603 | if (client->irq > 0) |
604 | free_irq(client->irq, client); | 604 | free_irq(client->irq, client); |
605 | 605 | ||
606 | errout_reg: | ||
607 | rtc_device_unregister(rx8025->rtc); | ||
608 | |||
609 | errout_free: | ||
610 | kfree(rx8025); | ||
611 | |||
612 | errout: | 606 | errout: |
613 | dev_err(&adapter->dev, "probing for rx8025 failed\n"); | 607 | dev_err(&adapter->dev, "probing for rx8025 failed\n"); |
614 | return err; | 608 | return err; |
@@ -629,8 +623,6 @@ static int rx8025_remove(struct i2c_client *client) | |||
629 | } | 623 | } |
630 | 624 | ||
631 | rx8025_sysfs_unregister(&client->dev); | 625 | rx8025_sysfs_unregister(&client->dev); |
632 | rtc_device_unregister(rx8025->rtc); | ||
633 | kfree(rx8025); | ||
634 | return 0; | 626 | return 0; |
635 | } | 627 | } |
636 | 628 | ||
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c index 07f3037b18f4..00b0eb7fe166 100644 --- a/drivers/rtc/rtc-rx8581.c +++ b/drivers/rtc/rtc-rx8581.c | |||
@@ -251,11 +251,6 @@ static int rx8581_probe(struct i2c_client *client, | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | static int rx8581_remove(struct i2c_client *client) | ||
255 | { | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static const struct i2c_device_id rx8581_id[] = { | 254 | static const struct i2c_device_id rx8581_id[] = { |
260 | { "rx8581", 0 }, | 255 | { "rx8581", 0 }, |
261 | { } | 256 | { } |
@@ -268,7 +263,6 @@ static struct i2c_driver rx8581_driver = { | |||
268 | .owner = THIS_MODULE, | 263 | .owner = THIS_MODULE, |
269 | }, | 264 | }, |
270 | .probe = rx8581_probe, | 265 | .probe = rx8581_probe, |
271 | .remove = rx8581_remove, | ||
272 | .id_table = rx8581_id, | 266 | .id_table = rx8581_id, |
273 | }; | 267 | }; |
274 | 268 | ||
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 0b495e8b8e66..7afd373b9595 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -421,8 +421,6 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en) | |||
421 | 421 | ||
422 | static int s3c_rtc_remove(struct platform_device *dev) | 422 | static int s3c_rtc_remove(struct platform_device *dev) |
423 | { | 423 | { |
424 | platform_set_drvdata(dev, NULL); | ||
425 | |||
426 | s3c_rtc_setaie(&dev->dev, 0); | 424 | s3c_rtc_setaie(&dev->dev, 0); |
427 | 425 | ||
428 | clk_unprepare(rtc_clk); | 426 | clk_unprepare(rtc_clk); |
@@ -549,23 +547,20 @@ static int s3c_rtc_probe(struct platform_device *pdev) | |||
549 | 0, "s3c2410-rtc alarm", rtc); | 547 | 0, "s3c2410-rtc alarm", rtc); |
550 | if (ret) { | 548 | if (ret) { |
551 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); | 549 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); |
552 | goto err_alarm_irq; | 550 | goto err_nortc; |
553 | } | 551 | } |
554 | 552 | ||
555 | ret = devm_request_irq(&pdev->dev, s3c_rtc_tickno, s3c_rtc_tickirq, | 553 | ret = devm_request_irq(&pdev->dev, s3c_rtc_tickno, s3c_rtc_tickirq, |
556 | 0, "s3c2410-rtc tick", rtc); | 554 | 0, "s3c2410-rtc tick", rtc); |
557 | if (ret) { | 555 | if (ret) { |
558 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); | 556 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); |
559 | goto err_alarm_irq; | 557 | goto err_nortc; |
560 | } | 558 | } |
561 | 559 | ||
562 | clk_disable(rtc_clk); | 560 | clk_disable(rtc_clk); |
563 | 561 | ||
564 | return 0; | 562 | return 0; |
565 | 563 | ||
566 | err_alarm_irq: | ||
567 | platform_set_drvdata(pdev, NULL); | ||
568 | |||
569 | err_nortc: | 564 | err_nortc: |
570 | s3c_rtc_enable(pdev, 0); | 565 | s3c_rtc_enable(pdev, 0); |
571 | clk_disable_unprepare(rtc_clk); | 566 | clk_disable_unprepare(rtc_clk); |
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 00605601dbf7..0f7adeb1944a 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c | |||
@@ -249,7 +249,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev) | |||
249 | 249 | ||
250 | ret = clk_prepare_enable(info->clk); | 250 | ret = clk_prepare_enable(info->clk); |
251 | if (ret) | 251 | if (ret) |
252 | goto err_enable_clk; | 252 | return ret; |
253 | /* | 253 | /* |
254 | * According to the manual we should be able to let RTTR be zero | 254 | * According to the manual we should be able to let RTTR be zero |
255 | * and then a default diviser for a 32.768KHz clock is used. | 255 | * and then a default diviser for a 32.768KHz clock is used. |
@@ -303,8 +303,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev) | |||
303 | return 0; | 303 | return 0; |
304 | err_dev: | 304 | err_dev: |
305 | clk_disable_unprepare(info->clk); | 305 | clk_disable_unprepare(info->clk); |
306 | err_enable_clk: | ||
307 | platform_set_drvdata(pdev, NULL); | ||
308 | return ret; | 306 | return ret; |
309 | } | 307 | } |
310 | 308 | ||
@@ -312,10 +310,8 @@ static int sa1100_rtc_remove(struct platform_device *pdev) | |||
312 | { | 310 | { |
313 | struct sa1100_rtc *info = platform_get_drvdata(pdev); | 311 | struct sa1100_rtc *info = platform_get_drvdata(pdev); |
314 | 312 | ||
315 | if (info) { | 313 | if (info) |
316 | clk_disable_unprepare(info->clk); | 314 | clk_disable_unprepare(info->clk); |
317 | platform_set_drvdata(pdev, NULL); | ||
318 | } | ||
319 | 315 | ||
320 | return 0; | 316 | return 0; |
321 | } | 317 | } |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 8d5bd2e36776..6d87e26355a3 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -593,7 +593,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
593 | char clk_name[6]; | 593 | char clk_name[6]; |
594 | int clk_id, ret; | 594 | int clk_id, ret; |
595 | 595 | ||
596 | rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); | 596 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); |
597 | if (unlikely(!rtc)) | 597 | if (unlikely(!rtc)) |
598 | return -ENOMEM; | 598 | return -ENOMEM; |
599 | 599 | ||
@@ -602,9 +602,8 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
602 | /* get periodic/carry/alarm irqs */ | 602 | /* get periodic/carry/alarm irqs */ |
603 | ret = platform_get_irq(pdev, 0); | 603 | ret = platform_get_irq(pdev, 0); |
604 | if (unlikely(ret <= 0)) { | 604 | if (unlikely(ret <= 0)) { |
605 | ret = -ENOENT; | ||
606 | dev_err(&pdev->dev, "No IRQ resource\n"); | 605 | dev_err(&pdev->dev, "No IRQ resource\n"); |
607 | goto err_badres; | 606 | return -ENOENT; |
608 | } | 607 | } |
609 | 608 | ||
610 | rtc->periodic_irq = ret; | 609 | rtc->periodic_irq = ret; |
@@ -613,24 +612,21 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
613 | 612 | ||
614 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 613 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
615 | if (unlikely(res == NULL)) { | 614 | if (unlikely(res == NULL)) { |
616 | ret = -ENOENT; | ||
617 | dev_err(&pdev->dev, "No IO resource\n"); | 615 | dev_err(&pdev->dev, "No IO resource\n"); |
618 | goto err_badres; | 616 | return -ENOENT; |
619 | } | 617 | } |
620 | 618 | ||
621 | rtc->regsize = resource_size(res); | 619 | rtc->regsize = resource_size(res); |
622 | 620 | ||
623 | rtc->res = request_mem_region(res->start, rtc->regsize, pdev->name); | 621 | rtc->res = devm_request_mem_region(&pdev->dev, res->start, |
624 | if (unlikely(!rtc->res)) { | 622 | rtc->regsize, pdev->name); |
625 | ret = -EBUSY; | 623 | if (unlikely(!rtc->res)) |
626 | goto err_badres; | 624 | return -EBUSY; |
627 | } | ||
628 | 625 | ||
629 | rtc->regbase = ioremap_nocache(rtc->res->start, rtc->regsize); | 626 | rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start, |
630 | if (unlikely(!rtc->regbase)) { | 627 | rtc->regsize); |
631 | ret = -EINVAL; | 628 | if (unlikely(!rtc->regbase)) |
632 | goto err_badmap; | 629 | return -EINVAL; |
633 | } | ||
634 | 630 | ||
635 | clk_id = pdev->id; | 631 | clk_id = pdev->id; |
636 | /* With a single device, the clock id is still "rtc0" */ | 632 | /* With a single device, the clock id is still "rtc0" */ |
@@ -639,7 +635,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
639 | 635 | ||
640 | snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); | 636 | snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); |
641 | 637 | ||
642 | rtc->clk = clk_get(&pdev->dev, clk_name); | 638 | rtc->clk = devm_clk_get(&pdev->dev, clk_name); |
643 | if (IS_ERR(rtc->clk)) { | 639 | if (IS_ERR(rtc->clk)) { |
644 | /* | 640 | /* |
645 | * No error handling for rtc->clk intentionally, not all | 641 | * No error handling for rtc->clk intentionally, not all |
@@ -665,8 +661,8 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
665 | 661 | ||
666 | if (rtc->carry_irq <= 0) { | 662 | if (rtc->carry_irq <= 0) { |
667 | /* register shared periodic/carry/alarm irq */ | 663 | /* register shared periodic/carry/alarm irq */ |
668 | ret = request_irq(rtc->periodic_irq, sh_rtc_shared, | 664 | ret = devm_request_irq(&pdev->dev, rtc->periodic_irq, |
669 | 0, "sh-rtc", rtc); | 665 | sh_rtc_shared, 0, "sh-rtc", rtc); |
670 | if (unlikely(ret)) { | 666 | if (unlikely(ret)) { |
671 | dev_err(&pdev->dev, | 667 | dev_err(&pdev->dev, |
672 | "request IRQ failed with %d, IRQ %d\n", ret, | 668 | "request IRQ failed with %d, IRQ %d\n", ret, |
@@ -675,8 +671,8 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
675 | } | 671 | } |
676 | } else { | 672 | } else { |
677 | /* register periodic/carry/alarm irqs */ | 673 | /* register periodic/carry/alarm irqs */ |
678 | ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, | 674 | ret = devm_request_irq(&pdev->dev, rtc->periodic_irq, |
679 | 0, "sh-rtc period", rtc); | 675 | sh_rtc_periodic, 0, "sh-rtc period", rtc); |
680 | if (unlikely(ret)) { | 676 | if (unlikely(ret)) { |
681 | dev_err(&pdev->dev, | 677 | dev_err(&pdev->dev, |
682 | "request period IRQ failed with %d, IRQ %d\n", | 678 | "request period IRQ failed with %d, IRQ %d\n", |
@@ -684,24 +680,21 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
684 | goto err_unmap; | 680 | goto err_unmap; |
685 | } | 681 | } |
686 | 682 | ||
687 | ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, | 683 | ret = devm_request_irq(&pdev->dev, rtc->carry_irq, |
688 | 0, "sh-rtc carry", rtc); | 684 | sh_rtc_interrupt, 0, "sh-rtc carry", rtc); |
689 | if (unlikely(ret)) { | 685 | if (unlikely(ret)) { |
690 | dev_err(&pdev->dev, | 686 | dev_err(&pdev->dev, |
691 | "request carry IRQ failed with %d, IRQ %d\n", | 687 | "request carry IRQ failed with %d, IRQ %d\n", |
692 | ret, rtc->carry_irq); | 688 | ret, rtc->carry_irq); |
693 | free_irq(rtc->periodic_irq, rtc); | ||
694 | goto err_unmap; | 689 | goto err_unmap; |
695 | } | 690 | } |
696 | 691 | ||
697 | ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, | 692 | ret = devm_request_irq(&pdev->dev, rtc->alarm_irq, |
698 | 0, "sh-rtc alarm", rtc); | 693 | sh_rtc_alarm, 0, "sh-rtc alarm", rtc); |
699 | if (unlikely(ret)) { | 694 | if (unlikely(ret)) { |
700 | dev_err(&pdev->dev, | 695 | dev_err(&pdev->dev, |
701 | "request alarm IRQ failed with %d, IRQ %d\n", | 696 | "request alarm IRQ failed with %d, IRQ %d\n", |
702 | ret, rtc->alarm_irq); | 697 | ret, rtc->alarm_irq); |
703 | free_irq(rtc->carry_irq, rtc); | ||
704 | free_irq(rtc->periodic_irq, rtc); | ||
705 | goto err_unmap; | 698 | goto err_unmap; |
706 | } | 699 | } |
707 | } | 700 | } |
@@ -714,13 +707,10 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
714 | sh_rtc_setaie(&pdev->dev, 0); | 707 | sh_rtc_setaie(&pdev->dev, 0); |
715 | sh_rtc_setcie(&pdev->dev, 0); | 708 | sh_rtc_setcie(&pdev->dev, 0); |
716 | 709 | ||
717 | rtc->rtc_dev = rtc_device_register("sh", &pdev->dev, | 710 | rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "sh", |
718 | &sh_rtc_ops, THIS_MODULE); | 711 | &sh_rtc_ops, THIS_MODULE); |
719 | if (IS_ERR(rtc->rtc_dev)) { | 712 | if (IS_ERR(rtc->rtc_dev)) { |
720 | ret = PTR_ERR(rtc->rtc_dev); | 713 | ret = PTR_ERR(rtc->rtc_dev); |
721 | free_irq(rtc->periodic_irq, rtc); | ||
722 | free_irq(rtc->carry_irq, rtc); | ||
723 | free_irq(rtc->alarm_irq, rtc); | ||
724 | goto err_unmap; | 714 | goto err_unmap; |
725 | } | 715 | } |
726 | 716 | ||
@@ -737,12 +727,6 @@ static int __init sh_rtc_probe(struct platform_device *pdev) | |||
737 | 727 | ||
738 | err_unmap: | 728 | err_unmap: |
739 | clk_disable(rtc->clk); | 729 | clk_disable(rtc->clk); |
740 | clk_put(rtc->clk); | ||
741 | iounmap(rtc->regbase); | ||
742 | err_badmap: | ||
743 | release_mem_region(rtc->res->start, rtc->regsize); | ||
744 | err_badres: | ||
745 | kfree(rtc); | ||
746 | 730 | ||
747 | return ret; | 731 | return ret; |
748 | } | 732 | } |
@@ -751,28 +735,12 @@ static int __exit sh_rtc_remove(struct platform_device *pdev) | |||
751 | { | 735 | { |
752 | struct sh_rtc *rtc = platform_get_drvdata(pdev); | 736 | struct sh_rtc *rtc = platform_get_drvdata(pdev); |
753 | 737 | ||
754 | rtc_device_unregister(rtc->rtc_dev); | ||
755 | sh_rtc_irq_set_state(&pdev->dev, 0); | 738 | sh_rtc_irq_set_state(&pdev->dev, 0); |
756 | 739 | ||
757 | sh_rtc_setaie(&pdev->dev, 0); | 740 | sh_rtc_setaie(&pdev->dev, 0); |
758 | sh_rtc_setcie(&pdev->dev, 0); | 741 | sh_rtc_setcie(&pdev->dev, 0); |
759 | 742 | ||
760 | free_irq(rtc->periodic_irq, rtc); | ||
761 | |||
762 | if (rtc->carry_irq > 0) { | ||
763 | free_irq(rtc->carry_irq, rtc); | ||
764 | free_irq(rtc->alarm_irq, rtc); | ||
765 | } | ||
766 | |||
767 | iounmap(rtc->regbase); | ||
768 | release_mem_region(rtc->res->start, rtc->regsize); | ||
769 | |||
770 | clk_disable(rtc->clk); | 743 | clk_disable(rtc->clk); |
771 | clk_put(rtc->clk); | ||
772 | |||
773 | platform_set_drvdata(pdev, NULL); | ||
774 | |||
775 | kfree(rtc); | ||
776 | 744 | ||
777 | return 0; | 745 | return 0; |
778 | } | 746 | } |
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c new file mode 100644 index 000000000000..aa7ed4b5f7f0 --- /dev/null +++ b/drivers/rtc/rtc-sirfsoc.c | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * SiRFSoC Real Time Clock interface for Linux | ||
3 | * | ||
4 | * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company. | ||
5 | * | ||
6 | * Licensed under GPLv2 or later. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/err.h> | ||
11 | #include <linux/rtc.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/rtc/sirfsoc_rtciobrg.h> | ||
17 | |||
18 | |||
19 | #define RTC_CN 0x00 | ||
20 | #define RTC_ALARM0 0x04 | ||
21 | #define RTC_ALARM1 0x18 | ||
22 | #define RTC_STATUS 0x08 | ||
23 | #define RTC_SW_VALUE 0x40 | ||
24 | #define SIRFSOC_RTC_AL1E (1<<6) | ||
25 | #define SIRFSOC_RTC_AL1 (1<<4) | ||
26 | #define SIRFSOC_RTC_HZE (1<<3) | ||
27 | #define SIRFSOC_RTC_AL0E (1<<2) | ||
28 | #define SIRFSOC_RTC_HZ (1<<1) | ||
29 | #define SIRFSOC_RTC_AL0 (1<<0) | ||
30 | #define RTC_DIV 0x0c | ||
31 | #define RTC_DEEP_CTRL 0x14 | ||
32 | #define RTC_CLOCK_SWITCH 0x1c | ||
33 | #define SIRFSOC_RTC_CLK 0x03 /* others are reserved */ | ||
34 | |||
35 | /* Refer to RTC DIV switch */ | ||
36 | #define RTC_HZ 16 | ||
37 | |||
38 | /* This macro is also defined in arch/arm/plat-sirfsoc/cpu.c */ | ||
39 | #define RTC_SHIFT 4 | ||
40 | |||
41 | #define INTR_SYSRTC_CN 0x48 | ||
42 | |||
43 | struct sirfsoc_rtc_drv { | ||
44 | struct rtc_device *rtc; | ||
45 | u32 rtc_base; | ||
46 | u32 irq; | ||
47 | /* Overflow for every 8 years extra time */ | ||
48 | u32 overflow_rtc; | ||
49 | #ifdef CONFIG_PM | ||
50 | u32 saved_counter; | ||
51 | u32 saved_overflow_rtc; | ||
52 | #endif | ||
53 | }; | ||
54 | |||
55 | static int sirfsoc_rtc_read_alarm(struct device *dev, | ||
56 | struct rtc_wkalrm *alrm) | ||
57 | { | ||
58 | unsigned long rtc_alarm, rtc_count; | ||
59 | struct sirfsoc_rtc_drv *rtcdrv; | ||
60 | |||
61 | rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev); | ||
62 | |||
63 | local_irq_disable(); | ||
64 | |||
65 | rtc_count = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); | ||
66 | |||
67 | rtc_alarm = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_ALARM0); | ||
68 | memset(alrm, 0, sizeof(struct rtc_wkalrm)); | ||
69 | |||
70 | /* | ||
71 | * assume alarm interval not beyond one round counter overflow_rtc: | ||
72 | * 0->0xffffffff | ||
73 | */ | ||
74 | /* if alarm is in next overflow cycle */ | ||
75 | if (rtc_count > rtc_alarm) | ||
76 | rtc_time_to_tm((rtcdrv->overflow_rtc + 1) | ||
77 | << (BITS_PER_LONG - RTC_SHIFT) | ||
78 | | rtc_alarm >> RTC_SHIFT, &(alrm->time)); | ||
79 | else | ||
80 | rtc_time_to_tm(rtcdrv->overflow_rtc | ||
81 | << (BITS_PER_LONG - RTC_SHIFT) | ||
82 | | rtc_alarm >> RTC_SHIFT, &(alrm->time)); | ||
83 | if (sirfsoc_rtc_iobrg_readl( | ||
84 | rtcdrv->rtc_base + RTC_STATUS) & SIRFSOC_RTC_AL0E) | ||
85 | alrm->enabled = 1; | ||
86 | local_irq_enable(); | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int sirfsoc_rtc_set_alarm(struct device *dev, | ||
92 | struct rtc_wkalrm *alrm) | ||
93 | { | ||
94 | unsigned long rtc_status_reg, rtc_alarm; | ||
95 | struct sirfsoc_rtc_drv *rtcdrv; | ||
96 | rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev); | ||
97 | |||
98 | if (alrm->enabled) { | ||
99 | rtc_tm_to_time(&(alrm->time), &rtc_alarm); | ||
100 | |||
101 | local_irq_disable(); | ||
102 | |||
103 | rtc_status_reg = sirfsoc_rtc_iobrg_readl( | ||
104 | rtcdrv->rtc_base + RTC_STATUS); | ||
105 | if (rtc_status_reg & SIRFSOC_RTC_AL0E) { | ||
106 | /* | ||
107 | * An ongoing alarm in progress - ingore it and not | ||
108 | * to return EBUSY | ||
109 | */ | ||
110 | dev_info(dev, "An old alarm was set, will be replaced by a new one\n"); | ||
111 | } | ||
112 | |||
113 | sirfsoc_rtc_iobrg_writel( | ||
114 | rtc_alarm << RTC_SHIFT, rtcdrv->rtc_base + RTC_ALARM0); | ||
115 | rtc_status_reg &= ~0x07; /* mask out the lower status bits */ | ||
116 | /* | ||
117 | * This bit RTC_AL sets it as a wake-up source for Sleep Mode | ||
118 | * Writing 1 into this bit will clear it | ||
119 | */ | ||
120 | rtc_status_reg |= SIRFSOC_RTC_AL0; | ||
121 | /* enable the RTC alarm interrupt */ | ||
122 | rtc_status_reg |= SIRFSOC_RTC_AL0E; | ||
123 | sirfsoc_rtc_iobrg_writel( | ||
124 | rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS); | ||
125 | local_irq_enable(); | ||
126 | } else { | ||
127 | /* | ||
128 | * if this function was called with enabled=0 | ||
129 | * then it could mean that the application is | ||
130 | * trying to cancel an ongoing alarm | ||
131 | */ | ||
132 | local_irq_disable(); | ||
133 | |||
134 | rtc_status_reg = sirfsoc_rtc_iobrg_readl( | ||
135 | rtcdrv->rtc_base + RTC_STATUS); | ||
136 | if (rtc_status_reg & SIRFSOC_RTC_AL0E) { | ||
137 | /* clear the RTC status register's alarm bit */ | ||
138 | rtc_status_reg &= ~0x07; | ||
139 | /* write 1 into SIRFSOC_RTC_AL0 to force a clear */ | ||
140 | rtc_status_reg |= (SIRFSOC_RTC_AL0); | ||
141 | /* Clear the Alarm enable bit */ | ||
142 | rtc_status_reg &= ~(SIRFSOC_RTC_AL0E); | ||
143 | |||
144 | sirfsoc_rtc_iobrg_writel(rtc_status_reg, | ||
145 | rtcdrv->rtc_base + RTC_STATUS); | ||
146 | } | ||
147 | |||
148 | local_irq_enable(); | ||
149 | } | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int sirfsoc_rtc_read_time(struct device *dev, | ||
155 | struct rtc_time *tm) | ||
156 | { | ||
157 | unsigned long tmp_rtc = 0; | ||
158 | struct sirfsoc_rtc_drv *rtcdrv; | ||
159 | rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev); | ||
160 | /* | ||
161 | * This patch is taken from WinCE - Need to validate this for | ||
162 | * correctness. To work around sirfsoc RTC counter double sync logic | ||
163 | * fail, read several times to make sure get stable value. | ||
164 | */ | ||
165 | do { | ||
166 | tmp_rtc = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); | ||
167 | cpu_relax(); | ||
168 | } while (tmp_rtc != sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN)); | ||
169 | |||
170 | rtc_time_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) | | ||
171 | tmp_rtc >> RTC_SHIFT, tm); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int sirfsoc_rtc_set_time(struct device *dev, | ||
176 | struct rtc_time *tm) | ||
177 | { | ||
178 | unsigned long rtc_time; | ||
179 | struct sirfsoc_rtc_drv *rtcdrv; | ||
180 | rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev); | ||
181 | |||
182 | rtc_tm_to_time(tm, &rtc_time); | ||
183 | |||
184 | rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT); | ||
185 | |||
186 | sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc, | ||
187 | rtcdrv->rtc_base + RTC_SW_VALUE); | ||
188 | sirfsoc_rtc_iobrg_writel( | ||
189 | rtc_time << RTC_SHIFT, rtcdrv->rtc_base + RTC_CN); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static int sirfsoc_rtc_ioctl(struct device *dev, unsigned int cmd, | ||
195 | unsigned long arg) | ||
196 | { | ||
197 | switch (cmd) { | ||
198 | case RTC_PIE_ON: | ||
199 | case RTC_PIE_OFF: | ||
200 | case RTC_UIE_ON: | ||
201 | case RTC_UIE_OFF: | ||
202 | case RTC_AIE_ON: | ||
203 | case RTC_AIE_OFF: | ||
204 | return 0; | ||
205 | |||
206 | default: | ||
207 | return -ENOIOCTLCMD; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static const struct rtc_class_ops sirfsoc_rtc_ops = { | ||
212 | .read_time = sirfsoc_rtc_read_time, | ||
213 | .set_time = sirfsoc_rtc_set_time, | ||
214 | .read_alarm = sirfsoc_rtc_read_alarm, | ||
215 | .set_alarm = sirfsoc_rtc_set_alarm, | ||
216 | .ioctl = sirfsoc_rtc_ioctl | ||
217 | }; | ||
218 | |||
219 | static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata) | ||
220 | { | ||
221 | struct sirfsoc_rtc_drv *rtcdrv = pdata; | ||
222 | unsigned long rtc_status_reg = 0x0; | ||
223 | unsigned long events = 0x0; | ||
224 | |||
225 | rtc_status_reg = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_STATUS); | ||
226 | /* this bit will be set ONLY if an alarm was active | ||
227 | * and it expired NOW | ||
228 | * So this is being used as an ASSERT | ||
229 | */ | ||
230 | if (rtc_status_reg & SIRFSOC_RTC_AL0) { | ||
231 | /* | ||
232 | * clear the RTC status register's alarm bit | ||
233 | * mask out the lower status bits | ||
234 | */ | ||
235 | rtc_status_reg &= ~0x07; | ||
236 | /* write 1 into SIRFSOC_RTC_AL0 to ACK the alarm interrupt */ | ||
237 | rtc_status_reg |= (SIRFSOC_RTC_AL0); | ||
238 | /* Clear the Alarm enable bit */ | ||
239 | rtc_status_reg &= ~(SIRFSOC_RTC_AL0E); | ||
240 | } | ||
241 | sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS); | ||
242 | /* this should wake up any apps polling/waiting on the read | ||
243 | * after setting the alarm | ||
244 | */ | ||
245 | events |= RTC_IRQF | RTC_AF; | ||
246 | rtc_update_irq(rtcdrv->rtc, 1, events); | ||
247 | |||
248 | return IRQ_HANDLED; | ||
249 | } | ||
250 | |||
251 | static const struct of_device_id sirfsoc_rtc_of_match[] = { | ||
252 | { .compatible = "sirf,prima2-sysrtc"}, | ||
253 | {}, | ||
254 | }; | ||
255 | MODULE_DEVICE_TABLE(of, sirfsoc_rtc_of_match); | ||
256 | |||
257 | static int sirfsoc_rtc_probe(struct platform_device *pdev) | ||
258 | { | ||
259 | int err; | ||
260 | unsigned long rtc_div; | ||
261 | struct sirfsoc_rtc_drv *rtcdrv; | ||
262 | struct device_node *np = pdev->dev.of_node; | ||
263 | |||
264 | rtcdrv = devm_kzalloc(&pdev->dev, | ||
265 | sizeof(struct sirfsoc_rtc_drv), GFP_KERNEL); | ||
266 | if (rtcdrv == NULL) { | ||
267 | dev_err(&pdev->dev, | ||
268 | "%s: can't alloc mem for drv struct\n", | ||
269 | pdev->name); | ||
270 | return -ENOMEM; | ||
271 | } | ||
272 | |||
273 | err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base); | ||
274 | if (err) { | ||
275 | dev_err(&pdev->dev, "unable to find base address of rtc node in dtb\n"); | ||
276 | goto error; | ||
277 | } | ||
278 | |||
279 | platform_set_drvdata(pdev, rtcdrv); | ||
280 | |||
281 | /* Register rtc alarm as a wakeup source */ | ||
282 | device_init_wakeup(&pdev->dev, 1); | ||
283 | |||
284 | /* | ||
285 | * Set SYS_RTC counter in RTC_HZ HZ Units | ||
286 | * We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1 | ||
287 | * If 16HZ, therefore RTC_DIV = 1023; | ||
288 | */ | ||
289 | rtc_div = ((32768 / RTC_HZ) / 2) - 1; | ||
290 | sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV); | ||
291 | |||
292 | rtcdrv->rtc = rtc_device_register(pdev->name, &(pdev->dev), | ||
293 | &sirfsoc_rtc_ops, THIS_MODULE); | ||
294 | if (IS_ERR(rtcdrv->rtc)) { | ||
295 | err = PTR_ERR(rtcdrv->rtc); | ||
296 | dev_err(&pdev->dev, "can't register RTC device\n"); | ||
297 | return err; | ||
298 | } | ||
299 | |||
300 | /* 0x3 -> RTC_CLK */ | ||
301 | sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK, | ||
302 | rtcdrv->rtc_base + RTC_CLOCK_SWITCH); | ||
303 | |||
304 | /* reset SYS RTC ALARM0 */ | ||
305 | sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM0); | ||
306 | |||
307 | /* reset SYS RTC ALARM1 */ | ||
308 | sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM1); | ||
309 | |||
310 | /* Restore RTC Overflow From Register After Command Reboot */ | ||
311 | rtcdrv->overflow_rtc = | ||
312 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); | ||
313 | |||
314 | rtcdrv->irq = platform_get_irq(pdev, 0); | ||
315 | err = devm_request_irq( | ||
316 | &pdev->dev, | ||
317 | rtcdrv->irq, | ||
318 | sirfsoc_rtc_irq_handler, | ||
319 | IRQF_SHARED, | ||
320 | pdev->name, | ||
321 | rtcdrv); | ||
322 | if (err) { | ||
323 | dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n"); | ||
324 | goto error; | ||
325 | } | ||
326 | |||
327 | return 0; | ||
328 | |||
329 | error: | ||
330 | if (rtcdrv->rtc) | ||
331 | rtc_device_unregister(rtcdrv->rtc); | ||
332 | |||
333 | return err; | ||
334 | } | ||
335 | |||
336 | static int sirfsoc_rtc_remove(struct platform_device *pdev) | ||
337 | { | ||
338 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
339 | |||
340 | device_init_wakeup(&pdev->dev, 0); | ||
341 | rtc_device_unregister(rtcdrv->rtc); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | #ifdef CONFIG_PM | ||
347 | |||
348 | static int sirfsoc_rtc_suspend(struct device *dev) | ||
349 | { | ||
350 | struct platform_device *pdev = to_platform_device(dev); | ||
351 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
352 | rtcdrv->overflow_rtc = | ||
353 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); | ||
354 | |||
355 | rtcdrv->saved_counter = | ||
356 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); | ||
357 | rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc; | ||
358 | if (device_may_wakeup(&pdev->dev)) | ||
359 | enable_irq_wake(rtcdrv->irq); | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static int sirfsoc_rtc_freeze(struct device *dev) | ||
365 | { | ||
366 | sirfsoc_rtc_suspend(dev); | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int sirfsoc_rtc_thaw(struct device *dev) | ||
372 | { | ||
373 | u32 tmp; | ||
374 | struct sirfsoc_rtc_drv *rtcdrv; | ||
375 | rtcdrv = (struct sirfsoc_rtc_drv *)dev_get_drvdata(dev); | ||
376 | |||
377 | /* | ||
378 | * if resume from snapshot and the rtc power is losed, | ||
379 | * restroe the rtc settings | ||
380 | */ | ||
381 | if (SIRFSOC_RTC_CLK != sirfsoc_rtc_iobrg_readl( | ||
382 | rtcdrv->rtc_base + RTC_CLOCK_SWITCH)) { | ||
383 | u32 rtc_div; | ||
384 | /* 0x3 -> RTC_CLK */ | ||
385 | sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK, | ||
386 | rtcdrv->rtc_base + RTC_CLOCK_SWITCH); | ||
387 | /* | ||
388 | * Set SYS_RTC counter in RTC_HZ HZ Units | ||
389 | * We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1 | ||
390 | * If 16HZ, therefore RTC_DIV = 1023; | ||
391 | */ | ||
392 | rtc_div = ((32768 / RTC_HZ) / 2) - 1; | ||
393 | |||
394 | sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV); | ||
395 | |||
396 | /* reset SYS RTC ALARM0 */ | ||
397 | sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM0); | ||
398 | |||
399 | /* reset SYS RTC ALARM1 */ | ||
400 | sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM1); | ||
401 | } | ||
402 | rtcdrv->overflow_rtc = rtcdrv->saved_overflow_rtc; | ||
403 | |||
404 | /* | ||
405 | * if current counter is small than previous, | ||
406 | * it means overflow in sleep | ||
407 | */ | ||
408 | tmp = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); | ||
409 | if (tmp <= rtcdrv->saved_counter) | ||
410 | rtcdrv->overflow_rtc++; | ||
411 | /* | ||
412 | *PWRC Value Be Changed When Suspend, Restore Overflow | ||
413 | * In Memory To Register | ||
414 | */ | ||
415 | sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc, | ||
416 | rtcdrv->rtc_base + RTC_SW_VALUE); | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | static int sirfsoc_rtc_resume(struct device *dev) | ||
422 | { | ||
423 | struct platform_device *pdev = to_platform_device(dev); | ||
424 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
425 | sirfsoc_rtc_thaw(dev); | ||
426 | if (device_may_wakeup(&pdev->dev)) | ||
427 | disable_irq_wake(rtcdrv->irq); | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static int sirfsoc_rtc_restore(struct device *dev) | ||
433 | { | ||
434 | struct platform_device *pdev = to_platform_device(dev); | ||
435 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
436 | |||
437 | if (device_may_wakeup(&pdev->dev)) | ||
438 | disable_irq_wake(rtcdrv->irq); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | #else | ||
443 | #define sirfsoc_rtc_suspend NULL | ||
444 | #define sirfsoc_rtc_resume NULL | ||
445 | #define sirfsoc_rtc_freeze NULL | ||
446 | #define sirfsoc_rtc_thaw NULL | ||
447 | #define sirfsoc_rtc_restore NULL | ||
448 | #endif | ||
449 | |||
450 | static const struct dev_pm_ops sirfsoc_rtc_pm_ops = { | ||
451 | .suspend = sirfsoc_rtc_suspend, | ||
452 | .resume = sirfsoc_rtc_resume, | ||
453 | .freeze = sirfsoc_rtc_freeze, | ||
454 | .thaw = sirfsoc_rtc_thaw, | ||
455 | .restore = sirfsoc_rtc_restore, | ||
456 | }; | ||
457 | |||
458 | static struct platform_driver sirfsoc_rtc_driver = { | ||
459 | .driver = { | ||
460 | .name = "sirfsoc-rtc", | ||
461 | .owner = THIS_MODULE, | ||
462 | #ifdef CONFIG_PM | ||
463 | .pm = &sirfsoc_rtc_pm_ops, | ||
464 | #endif | ||
465 | .of_match_table = of_match_ptr(sirfsoc_rtc_of_match), | ||
466 | }, | ||
467 | .probe = sirfsoc_rtc_probe, | ||
468 | .remove = sirfsoc_rtc_remove, | ||
469 | }; | ||
470 | module_platform_driver(sirfsoc_rtc_driver); | ||
471 | |||
472 | MODULE_DESCRIPTION("SiRF SoC rtc driver"); | ||
473 | MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>"); | ||
474 | MODULE_LICENSE("GPL v2"); | ||
475 | MODULE_ALIAS("platform:sirfsoc-rtc"); | ||
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index b04f09a1df2a..316a342115b2 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c | |||
@@ -294,11 +294,6 @@ static int snvs_rtc_probe(struct platform_device *pdev) | |||
294 | return 0; | 294 | return 0; |
295 | } | 295 | } |
296 | 296 | ||
297 | static int snvs_rtc_remove(struct platform_device *pdev) | ||
298 | { | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | #ifdef CONFIG_PM_SLEEP | 297 | #ifdef CONFIG_PM_SLEEP |
303 | static int snvs_rtc_suspend(struct device *dev) | 298 | static int snvs_rtc_suspend(struct device *dev) |
304 | { | 299 | { |
@@ -337,7 +332,6 @@ static struct platform_driver snvs_rtc_driver = { | |||
337 | .of_match_table = of_match_ptr(snvs_dt_ids), | 332 | .of_match_table = of_match_ptr(snvs_dt_ids), |
338 | }, | 333 | }, |
339 | .probe = snvs_rtc_probe, | 334 | .probe = snvs_rtc_probe, |
340 | .remove = snvs_rtc_remove, | ||
341 | }; | 335 | }; |
342 | module_platform_driver(snvs_rtc_driver); | 336 | module_platform_driver(snvs_rtc_driver); |
343 | 337 | ||
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c index 574359c48f65..c492cf0ab8cd 100644 --- a/drivers/rtc/rtc-spear.c +++ b/drivers/rtc/rtc-spear.c | |||
@@ -417,7 +417,6 @@ static int spear_rtc_probe(struct platform_device *pdev) | |||
417 | return 0; | 417 | return 0; |
418 | 418 | ||
419 | err_disable_clock: | 419 | err_disable_clock: |
420 | platform_set_drvdata(pdev, NULL); | ||
421 | clk_disable_unprepare(config->clk); | 420 | clk_disable_unprepare(config->clk); |
422 | 421 | ||
423 | return status; | 422 | return status; |
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c index 987b5ec0ae56..f7d8a6db8078 100644 --- a/drivers/rtc/rtc-starfire.c +++ b/drivers/rtc/rtc-starfire.c | |||
@@ -51,17 +51,11 @@ static int __init starfire_rtc_probe(struct platform_device *pdev) | |||
51 | return 0; | 51 | return 0; |
52 | } | 52 | } |
53 | 53 | ||
54 | static int __exit starfire_rtc_remove(struct platform_device *pdev) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | static struct platform_driver starfire_rtc_driver = { | 54 | static struct platform_driver starfire_rtc_driver = { |
60 | .driver = { | 55 | .driver = { |
61 | .name = "rtc-starfire", | 56 | .name = "rtc-starfire", |
62 | .owner = THIS_MODULE, | 57 | .owner = THIS_MODULE, |
63 | }, | 58 | }, |
64 | .remove = __exit_p(starfire_rtc_remove), | ||
65 | }; | 59 | }; |
66 | 60 | ||
67 | module_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe); | 61 | module_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe); |
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 483ce086990b..90a3e864b8fe 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c | |||
@@ -225,7 +225,6 @@ static int stmp3xxx_rtc_remove(struct platform_device *pdev) | |||
225 | 225 | ||
226 | writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, | 226 | writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, |
227 | rtc_data->io + STMP3XXX_RTC_CTRL_CLR); | 227 | rtc_data->io + STMP3XXX_RTC_CTRL_CLR); |
228 | platform_set_drvdata(pdev, NULL); | ||
229 | 228 | ||
230 | return 0; | 229 | return 0; |
231 | } | 230 | } |
@@ -274,25 +273,19 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev) | |||
274 | 273 | ||
275 | rtc_data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | 274 | rtc_data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
276 | &stmp3xxx_rtc_ops, THIS_MODULE); | 275 | &stmp3xxx_rtc_ops, THIS_MODULE); |
277 | if (IS_ERR(rtc_data->rtc)) { | 276 | if (IS_ERR(rtc_data->rtc)) |
278 | err = PTR_ERR(rtc_data->rtc); | 277 | return PTR_ERR(rtc_data->rtc); |
279 | goto out; | ||
280 | } | ||
281 | 278 | ||
282 | err = devm_request_irq(&pdev->dev, rtc_data->irq_alarm, | 279 | err = devm_request_irq(&pdev->dev, rtc_data->irq_alarm, |
283 | stmp3xxx_rtc_interrupt, 0, "RTC alarm", &pdev->dev); | 280 | stmp3xxx_rtc_interrupt, 0, "RTC alarm", &pdev->dev); |
284 | if (err) { | 281 | if (err) { |
285 | dev_err(&pdev->dev, "Cannot claim IRQ%d\n", | 282 | dev_err(&pdev->dev, "Cannot claim IRQ%d\n", |
286 | rtc_data->irq_alarm); | 283 | rtc_data->irq_alarm); |
287 | goto out; | 284 | return err; |
288 | } | 285 | } |
289 | 286 | ||
290 | stmp3xxx_wdt_register(pdev); | 287 | stmp3xxx_wdt_register(pdev); |
291 | return 0; | 288 | return 0; |
292 | |||
293 | out: | ||
294 | platform_set_drvdata(pdev, NULL); | ||
295 | return err; | ||
296 | } | 289 | } |
297 | 290 | ||
298 | #ifdef CONFIG_PM_SLEEP | 291 | #ifdef CONFIG_PM_SLEEP |
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c index ce42e5fa9e09..bc97ff91341d 100644 --- a/drivers/rtc/rtc-sun4v.c +++ b/drivers/rtc/rtc-sun4v.c | |||
@@ -92,17 +92,11 @@ static int __init sun4v_rtc_probe(struct platform_device *pdev) | |||
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | static int __exit sun4v_rtc_remove(struct platform_device *pdev) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static struct platform_driver sun4v_rtc_driver = { | 95 | static struct platform_driver sun4v_rtc_driver = { |
101 | .driver = { | 96 | .driver = { |
102 | .name = "rtc-sun4v", | 97 | .name = "rtc-sun4v", |
103 | .owner = THIS_MODULE, | 98 | .owner = THIS_MODULE, |
104 | }, | 99 | }, |
105 | .remove = __exit_p(sun4v_rtc_remove), | ||
106 | }; | 100 | }; |
107 | 101 | ||
108 | module_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe); | 102 | module_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe); |
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index b70e2bb63645..4b26f8672b2d 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c | |||
@@ -164,6 +164,7 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr, | |||
164 | { | 164 | { |
165 | ssize_t retval; | 165 | ssize_t retval; |
166 | unsigned long now, alarm; | 166 | unsigned long now, alarm; |
167 | unsigned long push = 0; | ||
167 | struct rtc_wkalrm alm; | 168 | struct rtc_wkalrm alm; |
168 | struct rtc_device *rtc = to_rtc_device(dev); | 169 | struct rtc_device *rtc = to_rtc_device(dev); |
169 | char *buf_ptr; | 170 | char *buf_ptr; |
@@ -180,13 +181,17 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr, | |||
180 | buf_ptr = (char *)buf; | 181 | buf_ptr = (char *)buf; |
181 | if (*buf_ptr == '+') { | 182 | if (*buf_ptr == '+') { |
182 | buf_ptr++; | 183 | buf_ptr++; |
183 | adjust = 1; | 184 | if (*buf_ptr == '=') { |
185 | buf_ptr++; | ||
186 | push = 1; | ||
187 | } else | ||
188 | adjust = 1; | ||
184 | } | 189 | } |
185 | alarm = simple_strtoul(buf_ptr, NULL, 0); | 190 | alarm = simple_strtoul(buf_ptr, NULL, 0); |
186 | if (adjust) { | 191 | if (adjust) { |
187 | alarm += now; | 192 | alarm += now; |
188 | } | 193 | } |
189 | if (alarm > now) { | 194 | if (alarm > now || push) { |
190 | /* Avoid accidentally clobbering active alarms; we can't | 195 | /* Avoid accidentally clobbering active alarms; we can't |
191 | * entirely prevent that here, without even the minimal | 196 | * entirely prevent that here, without even the minimal |
192 | * locking from the /dev/rtcN api. | 197 | * locking from the /dev/rtcN api. |
@@ -194,9 +199,14 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr, | |||
194 | retval = rtc_read_alarm(rtc, &alm); | 199 | retval = rtc_read_alarm(rtc, &alm); |
195 | if (retval < 0) | 200 | if (retval < 0) |
196 | return retval; | 201 | return retval; |
197 | if (alm.enabled) | 202 | if (alm.enabled) { |
198 | return -EBUSY; | 203 | if (push) { |
199 | 204 | rtc_tm_to_time(&alm.time, &push); | |
205 | alarm += push; | ||
206 | } else | ||
207 | return -EBUSY; | ||
208 | } else if (push) | ||
209 | return -EINVAL; | ||
200 | alm.enabled = 1; | 210 | alm.enabled = 1; |
201 | } else { | 211 | } else { |
202 | alm.enabled = 0; | 212 | alm.enabled = 0; |
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c index fc3dee95f166..ff9632eb79f2 100644 --- a/drivers/rtc/rtc-tile.c +++ b/drivers/rtc/rtc-tile.c | |||
@@ -91,23 +91,12 @@ static int tile_rtc_probe(struct platform_device *dev) | |||
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | /* | ||
95 | * Device cleanup routine. | ||
96 | */ | ||
97 | static int tile_rtc_remove(struct platform_device *dev) | ||
98 | { | ||
99 | platform_set_drvdata(dev, NULL); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static struct platform_driver tile_rtc_platform_driver = { | 94 | static struct platform_driver tile_rtc_platform_driver = { |
105 | .driver = { | 95 | .driver = { |
106 | .name = "rtc-tile", | 96 | .name = "rtc-tile", |
107 | .owner = THIS_MODULE, | 97 | .owner = THIS_MODULE, |
108 | }, | 98 | }, |
109 | .probe = tile_rtc_probe, | 99 | .probe = tile_rtc_probe, |
110 | .remove = tile_rtc_remove, | ||
111 | }; | 100 | }; |
112 | 101 | ||
113 | /* | 102 | /* |
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c index 72662eafb938..3e400dce2d06 100644 --- a/drivers/rtc/rtc-tps80031.c +++ b/drivers/rtc/rtc-tps80031.c | |||
@@ -298,11 +298,6 @@ static int tps80031_rtc_probe(struct platform_device *pdev) | |||
298 | return 0; | 298 | return 0; |
299 | } | 299 | } |
300 | 300 | ||
301 | static int tps80031_rtc_remove(struct platform_device *pdev) | ||
302 | { | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | #ifdef CONFIG_PM_SLEEP | 301 | #ifdef CONFIG_PM_SLEEP |
307 | static int tps80031_rtc_suspend(struct device *dev) | 302 | static int tps80031_rtc_suspend(struct device *dev) |
308 | { | 303 | { |
@@ -333,7 +328,6 @@ static struct platform_driver tps80031_rtc_driver = { | |||
333 | .pm = &tps80031_pm_ops, | 328 | .pm = &tps80031_pm_ops, |
334 | }, | 329 | }, |
335 | .probe = tps80031_rtc_probe, | 330 | .probe = tps80031_rtc_probe, |
336 | .remove = tps80031_rtc_remove, | ||
337 | }; | 331 | }; |
338 | 332 | ||
339 | module_platform_driver(tps80031_rtc_driver); | 333 | module_platform_driver(tps80031_rtc_driver); |
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index b2eab34f38d9..02faf3c4e0d5 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c | |||
@@ -213,12 +213,24 @@ static int mask_rtc_irq_bit(unsigned char bit) | |||
213 | 213 | ||
214 | static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) | 214 | static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) |
215 | { | 215 | { |
216 | struct platform_device *pdev = to_platform_device(dev); | ||
217 | int irq = platform_get_irq(pdev, 0); | ||
218 | static bool twl_rtc_wake_enabled; | ||
216 | int ret; | 219 | int ret; |
217 | 220 | ||
218 | if (enabled) | 221 | if (enabled) { |
219 | ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | 222 | ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); |
220 | else | 223 | if (device_can_wakeup(dev) && !twl_rtc_wake_enabled) { |
224 | enable_irq_wake(irq); | ||
225 | twl_rtc_wake_enabled = true; | ||
226 | } | ||
227 | } else { | ||
221 | ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | 228 | ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); |
229 | if (twl_rtc_wake_enabled) { | ||
230 | disable_irq_wake(irq); | ||
231 | twl_rtc_wake_enabled = false; | ||
232 | } | ||
233 | } | ||
222 | 234 | ||
223 | return ret; | 235 | return ret; |
224 | } | 236 | } |
@@ -469,6 +481,12 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
469 | if (irq <= 0) | 481 | if (irq <= 0) |
470 | goto out1; | 482 | goto out1; |
471 | 483 | ||
484 | /* Initialize the register map */ | ||
485 | if (twl_class_is_4030()) | ||
486 | rtc_reg_map = (u8 *)twl4030_rtc_reg_map; | ||
487 | else | ||
488 | rtc_reg_map = (u8 *)twl6030_rtc_reg_map; | ||
489 | |||
472 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); | 490 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); |
473 | if (ret < 0) | 491 | if (ret < 0) |
474 | goto out1; | 492 | goto out1; |
@@ -556,7 +574,6 @@ static int twl_rtc_remove(struct platform_device *pdev) | |||
556 | free_irq(irq, rtc); | 574 | free_irq(irq, rtc); |
557 | 575 | ||
558 | rtc_device_unregister(rtc); | 576 | rtc_device_unregister(rtc); |
559 | platform_set_drvdata(pdev, NULL); | ||
560 | return 0; | 577 | return 0; |
561 | } | 578 | } |
562 | 579 | ||
@@ -609,22 +626,7 @@ static struct platform_driver twl4030rtc_driver = { | |||
609 | }, | 626 | }, |
610 | }; | 627 | }; |
611 | 628 | ||
612 | static int __init twl_rtc_init(void) | 629 | module_platform_driver(twl4030rtc_driver); |
613 | { | ||
614 | if (twl_class_is_4030()) | ||
615 | rtc_reg_map = (u8 *) twl4030_rtc_reg_map; | ||
616 | else | ||
617 | rtc_reg_map = (u8 *) twl6030_rtc_reg_map; | ||
618 | |||
619 | return platform_driver_register(&twl4030rtc_driver); | ||
620 | } | ||
621 | module_init(twl_rtc_init); | ||
622 | |||
623 | static void __exit twl_rtc_exit(void) | ||
624 | { | ||
625 | platform_driver_unregister(&twl4030rtc_driver); | ||
626 | } | ||
627 | module_exit(twl_rtc_exit); | ||
628 | 630 | ||
629 | MODULE_AUTHOR("Texas Instruments, MontaVista Software"); | 631 | MODULE_AUTHOR("Texas Instruments, MontaVista Software"); |
630 | MODULE_LICENSE("GPL"); | 632 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c index 6e0cba8f47d5..d07d89823020 100644 --- a/drivers/rtc/rtc-v3020.c +++ b/drivers/rtc/rtc-v3020.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * - Use the generic rtc class | 16 | * - Use the generic rtc class |
17 | * | 17 | * |
18 | * ??-???-2004: Someone at Compulab | 18 | * ??-???-2004: Someone at Compulab |
19 | * - Initial driver creation. | 19 | * - Initial driver creation. |
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
@@ -278,13 +278,13 @@ static int v3020_set_time(struct device *dev, struct rtc_time *dt) | |||
278 | dev_dbg(dev, "tm_year: %i\n", dt->tm_year); | 278 | dev_dbg(dev, "tm_year: %i\n", dt->tm_year); |
279 | 279 | ||
280 | /* Write all the values to ram... */ | 280 | /* Write all the values to ram... */ |
281 | v3020_set_reg(chip, V3020_SECONDS, bin2bcd(dt->tm_sec)); | 281 | v3020_set_reg(chip, V3020_SECONDS, bin2bcd(dt->tm_sec)); |
282 | v3020_set_reg(chip, V3020_MINUTES, bin2bcd(dt->tm_min)); | 282 | v3020_set_reg(chip, V3020_MINUTES, bin2bcd(dt->tm_min)); |
283 | v3020_set_reg(chip, V3020_HOURS, bin2bcd(dt->tm_hour)); | 283 | v3020_set_reg(chip, V3020_HOURS, bin2bcd(dt->tm_hour)); |
284 | v3020_set_reg(chip, V3020_MONTH_DAY, bin2bcd(dt->tm_mday)); | 284 | v3020_set_reg(chip, V3020_MONTH_DAY, bin2bcd(dt->tm_mday)); |
285 | v3020_set_reg(chip, V3020_MONTH, bin2bcd(dt->tm_mon + 1)); | 285 | v3020_set_reg(chip, V3020_MONTH, bin2bcd(dt->tm_mon + 1)); |
286 | v3020_set_reg(chip, V3020_WEEK_DAY, bin2bcd(dt->tm_wday)); | 286 | v3020_set_reg(chip, V3020_WEEK_DAY, bin2bcd(dt->tm_wday)); |
287 | v3020_set_reg(chip, V3020_YEAR, bin2bcd(dt->tm_year % 100)); | 287 | v3020_set_reg(chip, V3020_YEAR, bin2bcd(dt->tm_year % 100)); |
288 | 288 | ||
289 | /* ...and set the clock. */ | 289 | /* ...and set the clock. */ |
290 | v3020_set_reg(chip, V3020_CMD_RAM2CLOCK, 0); | 290 | v3020_set_reg(chip, V3020_CMD_RAM2CLOCK, 0); |
@@ -320,7 +320,7 @@ static int rtc_probe(struct platform_device *pdev) | |||
320 | 320 | ||
321 | retval = chip->ops->map_io(chip, pdev, pdata); | 321 | retval = chip->ops->map_io(chip, pdev, pdata); |
322 | if (retval) | 322 | if (retval) |
323 | goto err_chip; | 323 | return retval; |
324 | 324 | ||
325 | /* Make sure the v3020 expects a communication cycle | 325 | /* Make sure the v3020 expects a communication cycle |
326 | * by reading 8 times */ | 326 | * by reading 8 times */ |
@@ -364,7 +364,7 @@ static int rtc_probe(struct platform_device *pdev) | |||
364 | 364 | ||
365 | err_io: | 365 | err_io: |
366 | chip->ops->unmap_io(chip); | 366 | chip->ops->unmap_io(chip); |
367 | err_chip: | 367 | |
368 | return retval; | 368 | return retval; |
369 | } | 369 | } |
370 | 370 | ||
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index f91be04b9050..54e104e197e3 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c | |||
@@ -103,7 +103,7 @@ static inline unsigned long read_elapsed_second(void) | |||
103 | second_mid = rtc1_read(ETIMEMREG); | 103 | second_mid = rtc1_read(ETIMEMREG); |
104 | second_high = rtc1_read(ETIMEHREG); | 104 | second_high = rtc1_read(ETIMEHREG); |
105 | } while (first_low != second_low || first_mid != second_mid || | 105 | } while (first_low != second_low || first_mid != second_mid || |
106 | first_high != second_high); | 106 | first_high != second_high); |
107 | 107 | ||
108 | return (first_high << 17) | (first_mid << 1) | (first_low >> 15); | 108 | return (first_high << 17) | (first_mid << 1) | (first_low >> 15); |
109 | } | 109 | } |
@@ -154,7 +154,7 @@ static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time) | |||
154 | 154 | ||
155 | epoch_sec = mktime(epoch, 1, 1, 0, 0, 0); | 155 | epoch_sec = mktime(epoch, 1, 1, 0, 0, 0); |
156 | current_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday, | 156 | current_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday, |
157 | time->tm_hour, time->tm_min, time->tm_sec); | 157 | time->tm_hour, time->tm_min, time->tm_sec); |
158 | 158 | ||
159 | write_elapsed_second(current_sec - epoch_sec); | 159 | write_elapsed_second(current_sec - epoch_sec); |
160 | 160 | ||
@@ -186,7 +186,7 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) | |||
186 | struct rtc_time *time = &wkalrm->time; | 186 | struct rtc_time *time = &wkalrm->time; |
187 | 187 | ||
188 | alarm_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday, | 188 | alarm_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday, |
189 | time->tm_hour, time->tm_min, time->tm_sec); | 189 | time->tm_hour, time->tm_min, time->tm_sec); |
190 | 190 | ||
191 | spin_lock_irq(&rtc_lock); | 191 | spin_lock_irq(&rtc_lock); |
192 | 192 | ||
@@ -334,16 +334,18 @@ static int rtc_probe(struct platform_device *pdev) | |||
334 | } | 334 | } |
335 | 335 | ||
336 | retval = request_irq(aie_irq, elapsedtime_interrupt, 0, | 336 | retval = request_irq(aie_irq, elapsedtime_interrupt, 0, |
337 | "elapsed_time", pdev); | 337 | "elapsed_time", pdev); |
338 | if (retval < 0) | 338 | if (retval < 0) |
339 | goto err_device_unregister; | 339 | goto err_device_unregister; |
340 | 340 | ||
341 | pie_irq = platform_get_irq(pdev, 1); | 341 | pie_irq = platform_get_irq(pdev, 1); |
342 | if (pie_irq <= 0) | 342 | if (pie_irq <= 0) { |
343 | retval = -EBUSY; | ||
343 | goto err_free_irq; | 344 | goto err_free_irq; |
345 | } | ||
344 | 346 | ||
345 | retval = request_irq(pie_irq, rtclong1_interrupt, 0, | 347 | retval = request_irq(pie_irq, rtclong1_interrupt, 0, |
346 | "rtclong1", pdev); | 348 | "rtclong1", pdev); |
347 | if (retval < 0) | 349 | if (retval < 0) |
348 | goto err_free_irq; | 350 | goto err_free_irq; |
349 | 351 | ||
@@ -381,8 +383,6 @@ static int rtc_remove(struct platform_device *pdev) | |||
381 | if (rtc) | 383 | if (rtc) |
382 | rtc_device_unregister(rtc); | 384 | rtc_device_unregister(rtc); |
383 | 385 | ||
384 | platform_set_drvdata(pdev, NULL); | ||
385 | |||
386 | free_irq(aie_irq, pdev); | 386 | free_irq(aie_irq, pdev); |
387 | free_irq(pie_irq, pdev); | 387 | free_irq(pie_irq, pdev); |
388 | if (rtc1_base) | 388 | if (rtc1_base) |
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index d89efee6d29e..c2d6331fc712 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c | |||
@@ -282,8 +282,6 @@ static int vt8500_rtc_remove(struct platform_device *pdev) | |||
282 | /* Disable alarm matching */ | 282 | /* Disable alarm matching */ |
283 | writel(0, vt8500_rtc->regbase + VT8500_RTC_IS); | 283 | writel(0, vt8500_rtc->regbase + VT8500_RTC_IS); |
284 | 284 | ||
285 | platform_set_drvdata(pdev, NULL); | ||
286 | |||
287 | return 0; | 285 | return 0; |
288 | } | 286 | } |
289 | 287 | ||
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c index 8d65b94e5a7e..75aea4c4d334 100644 --- a/drivers/rtc/rtc-wm831x.c +++ b/drivers/rtc/rtc-wm831x.c | |||
@@ -460,11 +460,6 @@ err: | |||
460 | return ret; | 460 | return ret; |
461 | } | 461 | } |
462 | 462 | ||
463 | static int wm831x_rtc_remove(struct platform_device *pdev) | ||
464 | { | ||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | static const struct dev_pm_ops wm831x_rtc_pm_ops = { | 463 | static const struct dev_pm_ops wm831x_rtc_pm_ops = { |
469 | .suspend = wm831x_rtc_suspend, | 464 | .suspend = wm831x_rtc_suspend, |
470 | .resume = wm831x_rtc_resume, | 465 | .resume = wm831x_rtc_resume, |
@@ -478,7 +473,6 @@ static const struct dev_pm_ops wm831x_rtc_pm_ops = { | |||
478 | 473 | ||
479 | static struct platform_driver wm831x_rtc_driver = { | 474 | static struct platform_driver wm831x_rtc_driver = { |
480 | .probe = wm831x_rtc_probe, | 475 | .probe = wm831x_rtc_probe, |
481 | .remove = wm831x_rtc_remove, | ||
482 | .driver = { | 476 | .driver = { |
483 | .name = "wm831x-rtc", | 477 | .name = "wm831x-rtc", |
484 | .pm = &wm831x_rtc_pm_ops, | 478 | .pm = &wm831x_rtc_pm_ops, |
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index fa9b0679fb60..365dc6505148 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright 2005 Alessandro Zummo | 4 | * Copyright 2005 Alessandro Zummo |
5 | * | 5 | * |
6 | * please send all reports to: | 6 | * please send all reports to: |
7 | * Karen Spearel <kas111 at gmail dot com> | 7 | * Karen Spearel <kas111 at gmail dot com> |
8 | * Alessandro Zummo <a.zummo@towertech.it> | 8 | * Alessandro Zummo <a.zummo@towertech.it> |
9 | * | 9 | * |
10 | * based on a lot of other RTC drivers. | 10 | * based on a lot of other RTC drivers. |
@@ -215,12 +215,14 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, | |||
215 | buf[i] |= 0x80; | 215 | buf[i] |= 0x80; |
216 | 216 | ||
217 | /* this sequence is required to unlock the chip */ | 217 | /* this sequence is required to unlock the chip */ |
218 | if ((xfer = i2c_master_send(client, wel, 3)) != 3) { | 218 | xfer = i2c_master_send(client, wel, 3); |
219 | if (xfer != 3) { | ||
219 | dev_err(&client->dev, "%s: wel - %d\n", __func__, xfer); | 220 | dev_err(&client->dev, "%s: wel - %d\n", __func__, xfer); |
220 | return -EIO; | 221 | return -EIO; |
221 | } | 222 | } |
222 | 223 | ||
223 | if ((xfer = i2c_master_send(client, rwel, 3)) != 3) { | 224 | xfer = i2c_master_send(client, rwel, 3); |
225 | if (xfer != 3) { | ||
224 | dev_err(&client->dev, "%s: rwel - %d\n", __func__, xfer); | 226 | dev_err(&client->dev, "%s: rwel - %d\n", __func__, xfer); |
225 | return -EIO; | 227 | return -EIO; |
226 | } | 228 | } |
@@ -269,7 +271,8 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, | |||
269 | } | 271 | } |
270 | 272 | ||
271 | /* disable further writes */ | 273 | /* disable further writes */ |
272 | if ((xfer = i2c_master_send(client, diswe, 3)) != 3) { | 274 | xfer = i2c_master_send(client, diswe, 3); |
275 | if (xfer != 3) { | ||
273 | dev_err(&client->dev, "%s: diswe - %d\n", __func__, xfer); | 276 | dev_err(&client->dev, "%s: diswe - %d\n", __func__, xfer); |
274 | return -EIO; | 277 | return -EIO; |
275 | } | 278 | } |
@@ -375,8 +378,7 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim) | |||
375 | return 0; | 378 | return 0; |
376 | } | 379 | } |
377 | 380 | ||
378 | struct x1205_limit | 381 | struct x1205_limit { |
379 | { | ||
380 | unsigned char reg, mask, min, max; | 382 | unsigned char reg, mask, min, max; |
381 | }; | 383 | }; |
382 | 384 | ||
@@ -430,7 +432,8 @@ static int x1205_validate_client(struct i2c_client *client) | |||
430 | }, | 432 | }, |
431 | }; | 433 | }; |
432 | 434 | ||
433 | if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) { | 435 | xfer = i2c_transfer(client->adapter, msgs, 2); |
436 | if (xfer != 2) { | ||
434 | dev_err(&client->dev, | 437 | dev_err(&client->dev, |
435 | "%s: could not read register %x\n", | 438 | "%s: could not read register %x\n", |
436 | __func__, probe_zero_pattern[i]); | 439 | __func__, probe_zero_pattern[i]); |
@@ -467,7 +470,8 @@ static int x1205_validate_client(struct i2c_client *client) | |||
467 | }, | 470 | }, |
468 | }; | 471 | }; |
469 | 472 | ||
470 | if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) { | 473 | xfer = i2c_transfer(client->adapter, msgs, 2); |
474 | if (xfer != 2) { | ||
471 | dev_err(&client->dev, | 475 | dev_err(&client->dev, |
472 | "%s: could not read register %x\n", | 476 | "%s: could not read register %x\n", |
473 | __func__, probe_limits_pattern[i].reg); | 477 | __func__, probe_limits_pattern[i].reg); |
@@ -548,10 +552,12 @@ static int x1205_rtc_proc(struct device *dev, struct seq_file *seq) | |||
548 | { | 552 | { |
549 | int err, dtrim, atrim; | 553 | int err, dtrim, atrim; |
550 | 554 | ||
551 | if ((err = x1205_get_dtrim(to_i2c_client(dev), &dtrim)) == 0) | 555 | err = x1205_get_dtrim(to_i2c_client(dev), &dtrim); |
556 | if (!err) | ||
552 | seq_printf(seq, "digital_trim\t: %d ppm\n", dtrim); | 557 | seq_printf(seq, "digital_trim\t: %d ppm\n", dtrim); |
553 | 558 | ||
554 | if ((err = x1205_get_atrim(to_i2c_client(dev), &atrim)) == 0) | 559 | err = x1205_get_atrim(to_i2c_client(dev), &atrim); |
560 | if (!err) | ||
555 | seq_printf(seq, "analog_trim\t: %d.%02d pF\n", | 561 | seq_printf(seq, "analog_trim\t: %d.%02d pF\n", |
556 | atrim / 1000, atrim % 1000); | 562 | atrim / 1000, atrim % 1000); |
557 | return 0; | 563 | return 0; |
@@ -639,7 +645,8 @@ static int x1205_probe(struct i2c_client *client, | |||
639 | i2c_set_clientdata(client, rtc); | 645 | i2c_set_clientdata(client, rtc); |
640 | 646 | ||
641 | /* Check for power failures and eventually enable the osc */ | 647 | /* Check for power failures and eventually enable the osc */ |
642 | if ((err = x1205_get_status(client, &sr)) == 0) { | 648 | err = x1205_get_status(client, &sr); |
649 | if (!err) { | ||
643 | if (sr & X1205_SR_RTCF) { | 650 | if (sr & X1205_SR_RTCF) { |
644 | dev_err(&client->dev, | 651 | dev_err(&client->dev, |
645 | "power failure detected, " | 652 | "power failure detected, " |
@@ -647,9 +654,9 @@ static int x1205_probe(struct i2c_client *client, | |||
647 | udelay(50); | 654 | udelay(50); |
648 | x1205_fix_osc(client); | 655 | x1205_fix_osc(client); |
649 | } | 656 | } |
650 | } | 657 | } else { |
651 | else | ||
652 | dev_err(&client->dev, "couldn't read status\n"); | 658 | dev_err(&client->dev, "couldn't read status\n"); |
659 | } | ||
653 | 660 | ||
654 | err = x1205_sysfs_register(&client->dev); | 661 | err = x1205_sysfs_register(&client->dev); |
655 | if (err) | 662 | if (err) |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index e70af2406ff9..d1c8025b0b03 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -315,10 +315,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, | |||
315 | if (qeth_configure_cq(card, QETH_CQ_ENABLED)) | 315 | if (qeth_configure_cq(card, QETH_CQ_ENABLED)) |
316 | return -EPERM; | 316 | return -EPERM; |
317 | 317 | ||
318 | for (i = 0; i < 8; i++) | 318 | snprintf(card->options.hsuid, sizeof(card->options.hsuid), |
319 | card->options.hsuid[i] = ' '; | 319 | "%-8s", tmp); |
320 | card->options.hsuid[8] = '\0'; | ||
321 | strncpy(card->options.hsuid, tmp, strlen(tmp)); | ||
322 | ASCEBC(card->options.hsuid, 8); | 320 | ASCEBC(card->options.hsuid, 8); |
323 | if (card->dev) | 321 | if (card->dev) |
324 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); | 322 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 1ef041bc60c8..d85ac1a9d2c0 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -318,7 +318,8 @@ return_fib: | |||
318 | kthread_stop(dev->thread); | 318 | kthread_stop(dev->thread); |
319 | ssleep(1); | 319 | ssleep(1); |
320 | dev->aif_thread = 0; | 320 | dev->aif_thread = 0; |
321 | dev->thread = kthread_run(aac_command_thread, dev, dev->name); | 321 | dev->thread = kthread_run(aac_command_thread, dev, |
322 | "%s", dev->name); | ||
322 | ssleep(1); | 323 | ssleep(1); |
323 | } | 324 | } |
324 | if (f.wait) { | 325 | if (f.wait) { |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 1be0776a80c4..cab190af6345 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1336,7 +1336,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1336 | if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) | 1336 | if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) |
1337 | goto out; | 1337 | goto out; |
1338 | if (jafo) { | 1338 | if (jafo) { |
1339 | aac->thread = kthread_run(aac_command_thread, aac, aac->name); | 1339 | aac->thread = kthread_run(aac_command_thread, aac, "%s", |
1340 | aac->name); | ||
1340 | if (IS_ERR(aac->thread)) { | 1341 | if (IS_ERR(aac->thread)) { |
1341 | retval = PTR_ERR(aac->thread); | 1342 | retval = PTR_ERR(aac->thread); |
1342 | goto out; | 1343 | goto out; |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index d24a2867bc21..a1f5ac7a9806 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -4996,7 +4996,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, | |||
4996 | 4996 | ||
4997 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", | 4997 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", |
4998 | phba->shost->host_no); | 4998 | phba->shost->host_no); |
4999 | phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); | 4999 | phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); |
5000 | if (!phba->wq) { | 5000 | if (!phba->wq) { |
5001 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, | 5001 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, |
5002 | "BM_%d : beiscsi_dev_probe-" | 5002 | "BM_%d : beiscsi_dev_probe-" |
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 0fab6b5c7b82..9d86947d67fe 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c | |||
@@ -485,7 +485,7 @@ static int osd_probe(struct device *dev) | |||
485 | oud->class_dev.class = &osd_uld_class; | 485 | oud->class_dev.class = &osd_uld_class; |
486 | oud->class_dev.parent = dev; | 486 | oud->class_dev.parent = dev; |
487 | oud->class_dev.release = __remove; | 487 | oud->class_dev.release = __remove; |
488 | error = dev_set_name(&oud->class_dev, disk->disk_name); | 488 | error = dev_set_name(&oud->class_dev, "%s", disk->disk_name); |
489 | if (error) { | 489 | if (error) { |
490 | OSD_ERR("dev_set_name failed => %d\n", error); | 490 | OSD_ERR("dev_set_name failed => %d\n", error); |
491 | goto err_put_cdev; | 491 | goto err_put_cdev; |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 4d231c12463e..b246b3c26912 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -7060,8 +7060,8 @@ skip_retry_init: | |||
7060 | } | 7060 | } |
7061 | INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); | 7061 | INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); |
7062 | 7062 | ||
7063 | sprintf(buf, "qla4xxx_%lu_task", ha->host_no); | 7063 | ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, |
7064 | ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1); | 7064 | ha->host_no); |
7065 | if (!ha->task_wq) { | 7065 | if (!ha->task_wq) { |
7066 | ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); | 7066 | ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); |
7067 | ret = -ENODEV; | 7067 | ret = -ENODEV; |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index e106c276aa00..4628fd5e0688 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -435,7 +435,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, | |||
435 | 435 | ||
436 | snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), | 436 | snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), |
437 | "fc_wq_%d", shost->host_no); | 437 | "fc_wq_%d", shost->host_no); |
438 | fc_host->work_q = alloc_workqueue(fc_host->work_q_name, 0, 0); | 438 | fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); |
439 | if (!fc_host->work_q) | 439 | if (!fc_host->work_q) |
440 | return -ENOMEM; | 440 | return -ENOMEM; |
441 | 441 | ||
@@ -443,8 +443,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, | |||
443 | snprintf(fc_host->devloss_work_q_name, | 443 | snprintf(fc_host->devloss_work_q_name, |
444 | sizeof(fc_host->devloss_work_q_name), | 444 | sizeof(fc_host->devloss_work_q_name), |
445 | "fc_dl_%d", shost->host_no); | 445 | "fc_dl_%d", shost->host_no); |
446 | fc_host->devloss_work_q = | 446 | fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, |
447 | alloc_workqueue(fc_host->devloss_work_q_name, 0, 0); | 447 | fc_host->devloss_work_q_name); |
448 | if (!fc_host->devloss_work_q) { | 448 | if (!fc_host->devloss_work_q) { |
449 | destroy_workqueue(fc_host->work_q); | 449 | destroy_workqueue(fc_host->work_q); |
450 | fc_host->work_q = NULL; | 450 | fc_host->work_q = NULL; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index c1c555242d0d..8fa3d0b73ad9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2931,7 +2931,7 @@ static int sd_probe(struct device *dev) | |||
2931 | device_initialize(&sdkp->dev); | 2931 | device_initialize(&sdkp->dev); |
2932 | sdkp->dev.parent = dev; | 2932 | sdkp->dev.parent = dev; |
2933 | sdkp->dev.class = &sd_disk_class; | 2933 | sdkp->dev.class = &sd_disk_class; |
2934 | dev_set_name(&sdkp->dev, dev_name(dev)); | 2934 | dev_set_name(&sdkp->dev, "%s", dev_name(dev)); |
2935 | 2935 | ||
2936 | if (device_add(&sdkp->dev)) | 2936 | if (device_add(&sdkp->dev)) |
2937 | goto out_free_index; | 2937 | goto out_free_index; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 095cfaded1c0..978dda2c5239 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -601,7 +601,7 @@ static int spi_init_queue(struct spi_master *master) | |||
601 | 601 | ||
602 | init_kthread_worker(&master->kworker); | 602 | init_kthread_worker(&master->kworker); |
603 | master->kworker_task = kthread_run(kthread_worker_fn, | 603 | master->kworker_task = kthread_run(kthread_worker_fn, |
604 | &master->kworker, | 604 | &master->kworker, "%s", |
605 | dev_name(&master->dev)); | 605 | dev_name(&master->dev)); |
606 | if (IS_ERR(master->kworker_task)) { | 606 | if (IS_ERR(master->kworker_task)) { |
607 | dev_err(&master->dev, "failed to create message pump task\n"); | 607 | dev_err(&master->dev, "failed to create message pump task\n"); |
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c index ec9e2ae2de0d..ee3a57f22832 100644 --- a/drivers/staging/android/timed_output.c +++ b/drivers/staging/android/timed_output.c | |||
@@ -78,7 +78,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev) | |||
78 | 78 | ||
79 | tdev->index = atomic_inc_return(&device_count); | 79 | tdev->index = atomic_inc_return(&device_count); |
80 | tdev->dev = device_create(timed_output_class, NULL, | 80 | tdev->dev = device_create(timed_output_class, NULL, |
81 | MKDEV(0, tdev->index), NULL, tdev->name); | 81 | MKDEV(0, tdev->index), NULL, "%s", tdev->name); |
82 | if (IS_ERR(tdev->dev)) | 82 | if (IS_ERR(tdev->dev)) |
83 | return PTR_ERR(tdev->dev); | 83 | return PTR_ERR(tdev->dev); |
84 | 84 | ||
diff --git a/drivers/staging/dgrp/dgrp_sysfs.c b/drivers/staging/dgrp/dgrp_sysfs.c index 7d1b36d1e75f..8cee9c8bc38b 100644 --- a/drivers/staging/dgrp/dgrp_sysfs.c +++ b/drivers/staging/dgrp/dgrp_sysfs.c | |||
@@ -273,7 +273,7 @@ void dgrp_create_node_class_sysfs_files(struct nd_struct *nd) | |||
273 | sprintf(name, "node%ld", nd->nd_major); | 273 | sprintf(name, "node%ld", nd->nd_major); |
274 | 274 | ||
275 | nd->nd_class_dev = device_create(dgrp_class, dgrp_class_nodes_dev, | 275 | nd->nd_class_dev = device_create(dgrp_class, dgrp_class_nodes_dev, |
276 | MKDEV(0, nd->nd_major), NULL, name); | 276 | MKDEV(0, nd->nd_major), NULL, "%s", name); |
277 | 277 | ||
278 | ret = sysfs_create_group(&nd->nd_class_dev->kobj, | 278 | ret = sysfs_create_group(&nd->nd_class_dev->kobj, |
279 | &dgrp_node_attribute_group); | 279 | &dgrp_node_attribute_group); |
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c index b65bf5e177a8..6e81ba0eaf1e 100644 --- a/drivers/staging/rtl8712/os_intfs.c +++ b/drivers/staging/rtl8712/os_intfs.c | |||
@@ -238,7 +238,7 @@ struct net_device *r8712_init_netdev(void) | |||
238 | 238 | ||
239 | static u32 start_drv_threads(struct _adapter *padapter) | 239 | static u32 start_drv_threads(struct _adapter *padapter) |
240 | { | 240 | { |
241 | padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter, | 241 | padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter, "%s", |
242 | padapter->pnetdev->name); | 242 | padapter->pnetdev->name); |
243 | if (IS_ERR(padapter->cmdThread) < 0) | 243 | if (IS_ERR(padapter->cmdThread) < 0) |
244 | return _FAIL; | 244 | return _FAIL; |
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 9c020562c846..6d04eb48bfbc 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c | |||
@@ -421,12 +421,11 @@ static int omap3_bridge_startup(struct platform_device *pdev) | |||
421 | drv_datap->tc_wordswapon = tc_wordswapon; | 421 | drv_datap->tc_wordswapon = tc_wordswapon; |
422 | 422 | ||
423 | if (base_img) { | 423 | if (base_img) { |
424 | drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL); | 424 | drv_datap->base_img = kstrdup(base_img, GFP_KERNEL); |
425 | if (!drv_datap->base_img) { | 425 | if (!drv_datap->base_img) { |
426 | err = -ENOMEM; | 426 | err = -ENOMEM; |
427 | goto err2; | 427 | goto err2; |
428 | } | 428 | } |
429 | strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1); | ||
430 | } | 429 | } |
431 | 430 | ||
432 | dev_set_drvdata(bridge, drv_datap); | 431 | dev_set_drvdata(bridge, drv_datap); |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index b645c47501b4..3b96f18593b3 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -677,7 +677,7 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) | |||
677 | if (mi < 0) | 677 | if (mi < 0) |
678 | return -EINVAL; | 678 | return -EINVAL; |
679 | 679 | ||
680 | requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 680 | requested_pages = vma_pages(vma); |
681 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) | 681 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) |
682 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; | 682 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; |
683 | if (requested_pages > actual_pages) | 683 | if (requested_pages > actual_pages) |
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index d3527dd8b90c..5e0d33a7da58 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c | |||
@@ -1020,7 +1020,7 @@ static int usbatm_heavy_init(struct usbatm_data *instance) | |||
1020 | { | 1020 | { |
1021 | struct task_struct *t; | 1021 | struct task_struct *t; |
1022 | 1022 | ||
1023 | t = kthread_create(usbatm_do_heavy_init, instance, | 1023 | t = kthread_create(usbatm_do_heavy_init, instance, "%s", |
1024 | instance->driver->driver_name); | 1024 | instance->driver->driver_name); |
1025 | if (IS_ERR(t)) { | 1025 | if (IS_ERR(t)) { |
1026 | usb_err(instance, "%s: failed to create kernel_thread (%ld)!\n", | 1026 | usb_err(instance, "%s: failed to create kernel_thread (%ld)!\n", |
@@ -1076,7 +1076,8 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, | |||
1076 | /* public fields */ | 1076 | /* public fields */ |
1077 | 1077 | ||
1078 | instance->driver = driver; | 1078 | instance->driver = driver; |
1079 | snprintf(instance->driver_name, sizeof(instance->driver_name), driver->driver_name); | 1079 | strlcpy(instance->driver_name, driver->driver_name, |
1080 | sizeof(instance->driver_name)); | ||
1080 | 1081 | ||
1081 | instance->usb_dev = usb_dev; | 1082 | instance->usb_dev = usb_dev; |
1082 | instance->usb_intf = intf; | 1083 | instance->usb_intf = intf; |
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c index 5241f1d0ef7a..9209eafc75b1 100644 --- a/drivers/uwb/lc-dev.c +++ b/drivers/uwb/lc-dev.c | |||
@@ -440,7 +440,7 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) | |||
440 | uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */ | 440 | uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */ |
441 | uwb_dev->mac_addr = *bce->mac_addr; | 441 | uwb_dev->mac_addr = *bce->mac_addr; |
442 | uwb_dev->dev_addr = bce->dev_addr; | 442 | uwb_dev->dev_addr = bce->dev_addr; |
443 | dev_set_name(&uwb_dev->dev, macbuf); | 443 | dev_set_name(&uwb_dev->dev, "%s", macbuf); |
444 | result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); | 444 | result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); |
445 | if (result < 0) { | 445 | if (result < 0) { |
446 | dev_err(dev, "new device %s: cannot instantiate device\n", | 446 | dev_err(dev, "new device %s: cannot instantiate device\n", |
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c index a60d6afca97c..0393d827dd44 100644 --- a/drivers/video/backlight/atmel-pwm-bl.c +++ b/drivers/video/backlight/atmel-pwm-bl.c | |||
@@ -195,7 +195,6 @@ static int __init atmel_pwm_bl_probe(struct platform_device *pdev) | |||
195 | return 0; | 195 | return 0; |
196 | 196 | ||
197 | err_free_bl_dev: | 197 | err_free_bl_dev: |
198 | platform_set_drvdata(pdev, NULL); | ||
199 | backlight_device_unregister(bldev); | 198 | backlight_device_unregister(bldev); |
200 | err_free_pwm: | 199 | err_free_pwm: |
201 | pwm_channel_free(&pwmbl->pwmc); | 200 | pwm_channel_free(&pwmbl->pwmc); |
@@ -212,7 +211,6 @@ static int __exit atmel_pwm_bl_remove(struct platform_device *pdev) | |||
212 | pwm_channel_disable(&pwmbl->pwmc); | 211 | pwm_channel_disable(&pwmbl->pwmc); |
213 | pwm_channel_free(&pwmbl->pwmc); | 212 | pwm_channel_free(&pwmbl->pwmc); |
214 | backlight_device_unregister(pwmbl->bldev); | 213 | backlight_device_unregister(pwmbl->bldev); |
215 | platform_set_drvdata(pdev, NULL); | ||
216 | 214 | ||
217 | return 0; | 215 | return 0; |
218 | } | 216 | } |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index c74e7aa46731..3fccb6d3c8c3 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -208,7 +208,8 @@ static ssize_t backlight_show_actual_brightness(struct device *dev, | |||
208 | 208 | ||
209 | static struct class *backlight_class; | 209 | static struct class *backlight_class; |
210 | 210 | ||
211 | static int backlight_suspend(struct device *dev, pm_message_t state) | 211 | #ifdef CONFIG_PM_SLEEP |
212 | static int backlight_suspend(struct device *dev) | ||
212 | { | 213 | { |
213 | struct backlight_device *bd = to_backlight_device(dev); | 214 | struct backlight_device *bd = to_backlight_device(dev); |
214 | 215 | ||
@@ -235,6 +236,10 @@ static int backlight_resume(struct device *dev) | |||
235 | 236 | ||
236 | return 0; | 237 | return 0; |
237 | } | 238 | } |
239 | #endif | ||
240 | |||
241 | static SIMPLE_DEV_PM_OPS(backlight_class_dev_pm_ops, backlight_suspend, | ||
242 | backlight_resume); | ||
238 | 243 | ||
239 | static void bl_device_release(struct device *dev) | 244 | static void bl_device_release(struct device *dev) |
240 | { | 245 | { |
@@ -304,7 +309,7 @@ struct backlight_device *backlight_device_register(const char *name, | |||
304 | new_bd->dev.class = backlight_class; | 309 | new_bd->dev.class = backlight_class; |
305 | new_bd->dev.parent = parent; | 310 | new_bd->dev.parent = parent; |
306 | new_bd->dev.release = bl_device_release; | 311 | new_bd->dev.release = bl_device_release; |
307 | dev_set_name(&new_bd->dev, name); | 312 | dev_set_name(&new_bd->dev, "%s", name); |
308 | dev_set_drvdata(&new_bd->dev, devdata); | 313 | dev_set_drvdata(&new_bd->dev, devdata); |
309 | 314 | ||
310 | /* Set default properties */ | 315 | /* Set default properties */ |
@@ -370,6 +375,81 @@ void backlight_device_unregister(struct backlight_device *bd) | |||
370 | } | 375 | } |
371 | EXPORT_SYMBOL(backlight_device_unregister); | 376 | EXPORT_SYMBOL(backlight_device_unregister); |
372 | 377 | ||
378 | static void devm_backlight_device_release(struct device *dev, void *res) | ||
379 | { | ||
380 | struct backlight_device *backlight = *(struct backlight_device **)res; | ||
381 | |||
382 | backlight_device_unregister(backlight); | ||
383 | } | ||
384 | |||
385 | static int devm_backlight_device_match(struct device *dev, void *res, | ||
386 | void *data) | ||
387 | { | ||
388 | struct backlight_device **r = res; | ||
389 | |||
390 | return *r == data; | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * devm_backlight_device_register - resource managed backlight_device_register() | ||
395 | * @dev: the device to register | ||
396 | * @name: the name of the device | ||
397 | * @parent: a pointer to the parent device | ||
398 | * @devdata: an optional pointer to be stored for private driver use | ||
399 | * @ops: the backlight operations structure | ||
400 | * @props: the backlight properties | ||
401 | * | ||
402 | * @return a struct backlight on success, or an ERR_PTR on error | ||
403 | * | ||
404 | * Managed backlight_device_register(). The backlight_device returned | ||
405 | * from this function are automatically freed on driver detach. | ||
406 | * See backlight_device_register() for more information. | ||
407 | */ | ||
408 | struct backlight_device *devm_backlight_device_register(struct device *dev, | ||
409 | const char *name, struct device *parent, void *devdata, | ||
410 | const struct backlight_ops *ops, | ||
411 | const struct backlight_properties *props) | ||
412 | { | ||
413 | struct backlight_device **ptr, *backlight; | ||
414 | |||
415 | ptr = devres_alloc(devm_backlight_device_release, sizeof(*ptr), | ||
416 | GFP_KERNEL); | ||
417 | if (!ptr) | ||
418 | return ERR_PTR(-ENOMEM); | ||
419 | |||
420 | backlight = backlight_device_register(name, parent, devdata, ops, | ||
421 | props); | ||
422 | if (!IS_ERR(backlight)) { | ||
423 | *ptr = backlight; | ||
424 | devres_add(dev, ptr); | ||
425 | } else { | ||
426 | devres_free(ptr); | ||
427 | } | ||
428 | |||
429 | return backlight; | ||
430 | } | ||
431 | EXPORT_SYMBOL(devm_backlight_device_register); | ||
432 | |||
433 | /** | ||
434 | * devm_backlight_device_unregister - resource managed backlight_device_unregister() | ||
435 | * @dev: the device to unregister | ||
436 | * @bd: the backlight device to unregister | ||
437 | * | ||
438 | * Deallocated a backlight allocated with devm_backlight_device_register(). | ||
439 | * Normally this function will not need to be called and the resource management | ||
440 | * code will ensure that the resource is freed. | ||
441 | */ | ||
442 | void devm_backlight_device_unregister(struct device *dev, | ||
443 | struct backlight_device *bd) | ||
444 | { | ||
445 | int rc; | ||
446 | |||
447 | rc = devres_release(dev, devm_backlight_device_release, | ||
448 | devm_backlight_device_match, bd); | ||
449 | WARN_ON(rc); | ||
450 | } | ||
451 | EXPORT_SYMBOL(devm_backlight_device_unregister); | ||
452 | |||
373 | #ifdef CONFIG_OF | 453 | #ifdef CONFIG_OF |
374 | static int of_parent_match(struct device *dev, const void *data) | 454 | static int of_parent_match(struct device *dev, const void *data) |
375 | { | 455 | { |
@@ -414,8 +494,7 @@ static int __init backlight_class_init(void) | |||
414 | } | 494 | } |
415 | 495 | ||
416 | backlight_class->dev_attrs = bl_device_attributes; | 496 | backlight_class->dev_attrs = bl_device_attributes; |
417 | backlight_class->suspend = backlight_suspend; | 497 | backlight_class->pm = &backlight_class_dev_pm_ops; |
418 | backlight_class->resume = backlight_resume; | ||
419 | return 0; | 498 | return 0; |
420 | } | 499 | } |
421 | 500 | ||
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c index 33455821dd31..018368ba4124 100644 --- a/drivers/video/backlight/ep93xx_bl.c +++ b/drivers/video/backlight/ep93xx_bl.c | |||
@@ -111,7 +111,6 @@ static int ep93xxbl_remove(struct platform_device *dev) | |||
111 | struct backlight_device *bl = platform_get_drvdata(dev); | 111 | struct backlight_device *bl = platform_get_drvdata(dev); |
112 | 112 | ||
113 | backlight_device_unregister(bl); | 113 | backlight_device_unregister(bl); |
114 | platform_set_drvdata(dev, NULL); | ||
115 | return 0; | 114 | return 0; |
116 | } | 115 | } |
117 | 116 | ||
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 34fb6bd798c8..41964a71a036 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c | |||
@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent, | |||
219 | new_ld->dev.class = lcd_class; | 219 | new_ld->dev.class = lcd_class; |
220 | new_ld->dev.parent = parent; | 220 | new_ld->dev.parent = parent; |
221 | new_ld->dev.release = lcd_device_release; | 221 | new_ld->dev.release = lcd_device_release; |
222 | dev_set_name(&new_ld->dev, name); | 222 | dev_set_name(&new_ld->dev, "%s", name); |
223 | dev_set_drvdata(&new_ld->dev, devdata); | 223 | dev_set_drvdata(&new_ld->dev, devdata); |
224 | 224 | ||
225 | rc = device_register(&new_ld->dev); | 225 | rc = device_register(&new_ld->dev); |
@@ -260,6 +260,76 @@ void lcd_device_unregister(struct lcd_device *ld) | |||
260 | } | 260 | } |
261 | EXPORT_SYMBOL(lcd_device_unregister); | 261 | EXPORT_SYMBOL(lcd_device_unregister); |
262 | 262 | ||
263 | static void devm_lcd_device_release(struct device *dev, void *res) | ||
264 | { | ||
265 | struct lcd_device *lcd = *(struct lcd_device **)res; | ||
266 | |||
267 | lcd_device_unregister(lcd); | ||
268 | } | ||
269 | |||
270 | static int devm_lcd_device_match(struct device *dev, void *res, void *data) | ||
271 | { | ||
272 | struct lcd_device **r = res; | ||
273 | |||
274 | return *r == data; | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * devm_lcd_device_register - resource managed lcd_device_register() | ||
279 | * @dev: the device to register | ||
280 | * @name: the name of the device | ||
281 | * @parent: a pointer to the parent device | ||
282 | * @devdata: an optional pointer to be stored for private driver use | ||
283 | * @ops: the lcd operations structure | ||
284 | * | ||
285 | * @return a struct lcd on success, or an ERR_PTR on error | ||
286 | * | ||
287 | * Managed lcd_device_register(). The lcd_device returned from this function | ||
288 | * are automatically freed on driver detach. See lcd_device_register() | ||
289 | * for more information. | ||
290 | */ | ||
291 | struct lcd_device *devm_lcd_device_register(struct device *dev, | ||
292 | const char *name, struct device *parent, | ||
293 | void *devdata, struct lcd_ops *ops) | ||
294 | { | ||
295 | struct lcd_device **ptr, *lcd; | ||
296 | |||
297 | ptr = devres_alloc(devm_lcd_device_release, sizeof(*ptr), GFP_KERNEL); | ||
298 | if (!ptr) | ||
299 | return ERR_PTR(-ENOMEM); | ||
300 | |||
301 | lcd = lcd_device_register(name, parent, devdata, ops); | ||
302 | if (!IS_ERR(lcd)) { | ||
303 | *ptr = lcd; | ||
304 | devres_add(dev, ptr); | ||
305 | } else { | ||
306 | devres_free(ptr); | ||
307 | } | ||
308 | |||
309 | return lcd; | ||
310 | } | ||
311 | EXPORT_SYMBOL(devm_lcd_device_register); | ||
312 | |||
313 | /** | ||
314 | * devm_lcd_device_unregister - resource managed lcd_device_unregister() | ||
315 | * @dev: the device to unregister | ||
316 | * @ld: the lcd device to unregister | ||
317 | * | ||
318 | * Deallocated a lcd allocated with devm_lcd_device_register(). Normally | ||
319 | * this function will not need to be called and the resource management | ||
320 | * code will ensure that the resource is freed. | ||
321 | */ | ||
322 | void devm_lcd_device_unregister(struct device *dev, struct lcd_device *ld) | ||
323 | { | ||
324 | int rc; | ||
325 | |||
326 | rc = devres_release(dev, devm_lcd_device_release, | ||
327 | devm_lcd_device_match, ld); | ||
328 | WARN_ON(rc); | ||
329 | } | ||
330 | EXPORT_SYMBOL(devm_lcd_device_unregister); | ||
331 | |||
332 | |||
263 | static void __exit lcd_class_exit(void) | 333 | static void __exit lcd_class_exit(void) |
264 | { | 334 | { |
265 | class_destroy(lcd_class); | 335 | class_destroy(lcd_class); |
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c index 4bb8b4f140cf..980855ec9bb1 100644 --- a/drivers/video/backlight/lp8788_bl.c +++ b/drivers/video/backlight/lp8788_bl.c | |||
@@ -312,7 +312,6 @@ static int lp8788_backlight_remove(struct platform_device *pdev) | |||
312 | backlight_update_status(bl_dev); | 312 | backlight_update_status(bl_dev); |
313 | sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group); | 313 | sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group); |
314 | lp8788_backlight_unregister(bl); | 314 | lp8788_backlight_unregister(bl); |
315 | platform_set_drvdata(pdev, NULL); | ||
316 | 315 | ||
317 | return 0; | 316 | return 0; |
318 | } | 317 | } |
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c index e87c7a3394f3..6ed76be18f19 100644 --- a/drivers/video/backlight/pcf50633-backlight.c +++ b/drivers/video/backlight/pcf50633-backlight.c | |||
@@ -153,8 +153,6 @@ static int pcf50633_bl_remove(struct platform_device *pdev) | |||
153 | 153 | ||
154 | backlight_device_unregister(pcf_bl->bl); | 154 | backlight_device_unregister(pcf_bl->bl); |
155 | 155 | ||
156 | platform_set_drvdata(pdev, NULL); | ||
157 | |||
158 | return 0; | 156 | return 0; |
159 | } | 157 | } |
160 | 158 | ||
diff --git a/drivers/video/output.c b/drivers/video/output.c index 0d6f2cda9369..6285b9718451 100644 --- a/drivers/video/output.c +++ b/drivers/video/output.c | |||
@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name, | |||
97 | new_dev->props = op; | 97 | new_dev->props = op; |
98 | new_dev->dev.class = &video_output_class; | 98 | new_dev->dev.class = &video_output_class; |
99 | new_dev->dev.parent = dev; | 99 | new_dev->dev.parent = dev; |
100 | dev_set_name(&new_dev->dev, name); | 100 | dev_set_name(&new_dev->dev, "%s", name); |
101 | dev_set_drvdata(&new_dev->dev, devdata); | 101 | dev_set_drvdata(&new_dev->dev, devdata); |
102 | ret_code = device_register(&new_dev->dev); | 102 | ret_code = device_register(&new_dev->dev); |
103 | if (ret_code) { | 103 | if (ret_code) { |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index bd3ae324a1a2..0098810df69d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -148,7 +148,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
148 | } | 148 | } |
149 | set_page_pfns(vb->pfns + vb->num_pfns, page); | 149 | set_page_pfns(vb->pfns + vb->num_pfns, page); |
150 | vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; | 150 | vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; |
151 | totalram_pages--; | 151 | adjust_managed_page_count(page, -1); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* Did we get any? */ | 154 | /* Did we get any? */ |
@@ -163,8 +163,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) | |||
163 | 163 | ||
164 | /* Find pfns pointing at start of each page, get pages and free them. */ | 164 | /* Find pfns pointing at start of each page, get pages and free them. */ |
165 | for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { | 165 | for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { |
166 | balloon_page_free(balloon_pfn_to_page(pfns[i])); | 166 | struct page *page = balloon_pfn_to_page(pfns[i]); |
167 | totalram_pages++; | 167 | balloon_page_free(page); |
168 | adjust_managed_page_count(page, 1); | ||
168 | } | 169 | } |
169 | } | 170 | } |
170 | 171 | ||
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index 91cc2cdf02c0..cb8a8e5d9573 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c | |||
@@ -302,7 +302,33 @@ error: | |||
302 | return -EIO; | 302 | return -EIO; |
303 | } | 303 | } |
304 | 304 | ||
305 | /* | ||
306 | * This is a special sequence we must do to ensure the P0 output is not stuck | ||
307 | * in test mode. This is described in rev 2 of the ds2408's datasheet | ||
308 | * (http://datasheets.maximintegrated.com/en/ds/DS2408.pdf) under | ||
309 | * "APPLICATION INFORMATION/Power-up timing". | ||
310 | */ | ||
311 | static int w1_f29_disable_test_mode(struct w1_slave *sl) | ||
312 | { | ||
313 | int res; | ||
314 | u8 magic[10] = {0x96, }; | ||
315 | u64 rn = le64_to_cpu(*((u64*)&sl->reg_num)); | ||
316 | |||
317 | memcpy(&magic[1], &rn, 8); | ||
318 | magic[9] = 0x3C; | ||
319 | |||
320 | mutex_lock(&sl->master->bus_mutex); | ||
305 | 321 | ||
322 | res = w1_reset_bus(sl->master); | ||
323 | if (res) | ||
324 | goto out; | ||
325 | w1_write_block(sl->master, magic, ARRAY_SIZE(magic)); | ||
326 | |||
327 | res = w1_reset_bus(sl->master); | ||
328 | out: | ||
329 | mutex_unlock(&sl->master->bus_mutex); | ||
330 | return res; | ||
331 | } | ||
306 | 332 | ||
307 | static struct bin_attribute w1_f29_sysfs_bin_files[] = { | 333 | static struct bin_attribute w1_f29_sysfs_bin_files[] = { |
308 | { | 334 | { |
@@ -363,6 +389,10 @@ static int w1_f29_add_slave(struct w1_slave *sl) | |||
363 | int err = 0; | 389 | int err = 0; |
364 | int i; | 390 | int i; |
365 | 391 | ||
392 | err = w1_f29_disable_test_mode(sl); | ||
393 | if (err) | ||
394 | return err; | ||
395 | |||
366 | for (i = 0; i < ARRAY_SIZE(w1_f29_sysfs_bin_files) && !err; ++i) | 396 | for (i = 0; i < ARRAY_SIZE(w1_f29_sysfs_bin_files) && !err; ++i) |
367 | err = sysfs_create_bin_file( | 397 | err = sysfs_create_bin_file( |
368 | &sl->dev.kobj, | 398 | &sl->dev.kobj, |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index e33615335aa0..2a2ef97697b2 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -91,14 +91,6 @@ EXPORT_SYMBOL_GPL(balloon_stats); | |||
91 | /* We increase/decrease in batches which fit in a page */ | 91 | /* We increase/decrease in batches which fit in a page */ |
92 | static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; | 92 | static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; |
93 | 93 | ||
94 | #ifdef CONFIG_HIGHMEM | ||
95 | #define inc_totalhigh_pages() (totalhigh_pages++) | ||
96 | #define dec_totalhigh_pages() (totalhigh_pages--) | ||
97 | #else | ||
98 | #define inc_totalhigh_pages() do {} while (0) | ||
99 | #define dec_totalhigh_pages() do {} while (0) | ||
100 | #endif | ||
101 | |||
102 | /* List of ballooned pages, threaded through the mem_map array. */ | 94 | /* List of ballooned pages, threaded through the mem_map array. */ |
103 | static LIST_HEAD(ballooned_pages); | 95 | static LIST_HEAD(ballooned_pages); |
104 | 96 | ||
@@ -134,9 +126,7 @@ static void __balloon_append(struct page *page) | |||
134 | static void balloon_append(struct page *page) | 126 | static void balloon_append(struct page *page) |
135 | { | 127 | { |
136 | __balloon_append(page); | 128 | __balloon_append(page); |
137 | if (PageHighMem(page)) | 129 | adjust_managed_page_count(page, -1); |
138 | dec_totalhigh_pages(); | ||
139 | totalram_pages--; | ||
140 | } | 130 | } |
141 | 131 | ||
142 | /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ | 132 | /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ |
@@ -153,13 +143,12 @@ static struct page *balloon_retrieve(bool prefer_highmem) | |||
153 | page = list_entry(ballooned_pages.next, struct page, lru); | 143 | page = list_entry(ballooned_pages.next, struct page, lru); |
154 | list_del(&page->lru); | 144 | list_del(&page->lru); |
155 | 145 | ||
156 | if (PageHighMem(page)) { | 146 | if (PageHighMem(page)) |
157 | balloon_stats.balloon_high--; | 147 | balloon_stats.balloon_high--; |
158 | inc_totalhigh_pages(); | 148 | else |
159 | } else | ||
160 | balloon_stats.balloon_low--; | 149 | balloon_stats.balloon_low--; |
161 | 150 | ||
162 | totalram_pages++; | 151 | adjust_managed_page_count(page, 1); |
163 | 152 | ||
164 | return page; | 153 | return page; |
165 | } | 154 | } |
@@ -374,9 +363,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
374 | #endif | 363 | #endif |
375 | 364 | ||
376 | /* Relinquish the page back to the allocator. */ | 365 | /* Relinquish the page back to the allocator. */ |
377 | ClearPageReserved(page); | 366 | __free_reserved_page(page); |
378 | init_page_count(page); | ||
379 | __free_page(page); | ||
380 | } | 367 | } |
381 | 368 | ||
382 | balloon_stats.current_pages += rc; | 369 | balloon_stats.current_pages += rc; |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index cb1afc00c96d..38e92b770e91 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -449,7 +449,7 @@ int xenbus_probe_node(struct xen_bus_type *bus, | |||
449 | if (err) | 449 | if (err) |
450 | goto fail; | 450 | goto fail; |
451 | 451 | ||
452 | dev_set_name(&xendev->dev, devname); | 452 | dev_set_name(&xendev->dev, "%s", devname); |
453 | 453 | ||
454 | /* Register with generic device framework. */ | 454 | /* Register with generic device framework. */ |
455 | err = device_register(&xendev->dev); | 455 | err = device_register(&xendev->dev); |
@@ -625,7 +625,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
625 | 625 | ||
626 | /* | 626 | /* |
627 | * Add a completion event to the ring buffer. Must be done holding | 627 | * Add a completion event to the ring buffer. Must be done holding |
628 | * ctx->ctx_lock to prevent other code from messing with the tail | 628 | * ctx->completion_lock to prevent other code from messing with the tail |
629 | * pointer since we might be called from irq context. | 629 | * pointer since we might be called from irq context. |
630 | */ | 630 | */ |
631 | spin_lock_irqsave(&ctx->completion_lock, flags); | 631 | spin_lock_irqsave(&ctx->completion_lock, flags); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 431b6a04ebfd..bb43ce081d6e 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1562,6 +1562,7 @@ static const struct address_space_operations def_blk_aops = { | |||
1562 | .writepages = generic_writepages, | 1562 | .writepages = generic_writepages, |
1563 | .releasepage = blkdev_releasepage, | 1563 | .releasepage = blkdev_releasepage, |
1564 | .direct_IO = blkdev_direct_IO, | 1564 | .direct_IO = blkdev_direct_IO, |
1565 | .is_dirty_writeback = buffer_check_dirty_writeback, | ||
1565 | }; | 1566 | }; |
1566 | 1567 | ||
1567 | const struct file_operations def_blk_fops = { | 1568 | const struct file_operations def_blk_fops = { |
diff --git a/fs/buffer.c b/fs/buffer.c index f93392e2df12..4d7433534f5c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -83,6 +83,40 @@ void unlock_buffer(struct buffer_head *bh) | |||
83 | EXPORT_SYMBOL(unlock_buffer); | 83 | EXPORT_SYMBOL(unlock_buffer); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Returns if the page has dirty or writeback buffers. If all the buffers | ||
87 | * are unlocked and clean then the PageDirty information is stale. If | ||
88 | * any of the pages are locked, it is assumed they are locked for IO. | ||
89 | */ | ||
90 | void buffer_check_dirty_writeback(struct page *page, | ||
91 | bool *dirty, bool *writeback) | ||
92 | { | ||
93 | struct buffer_head *head, *bh; | ||
94 | *dirty = false; | ||
95 | *writeback = false; | ||
96 | |||
97 | BUG_ON(!PageLocked(page)); | ||
98 | |||
99 | if (!page_has_buffers(page)) | ||
100 | return; | ||
101 | |||
102 | if (PageWriteback(page)) | ||
103 | *writeback = true; | ||
104 | |||
105 | head = page_buffers(page); | ||
106 | bh = head; | ||
107 | do { | ||
108 | if (buffer_locked(bh)) | ||
109 | *writeback = true; | ||
110 | |||
111 | if (buffer_dirty(bh)) | ||
112 | *dirty = true; | ||
113 | |||
114 | bh = bh->b_this_page; | ||
115 | } while (bh != head); | ||
116 | } | ||
117 | EXPORT_SYMBOL(buffer_check_dirty_writeback); | ||
118 | |||
119 | /* | ||
86 | * Block until a buffer comes unlocked. This doesn't stop it | 120 | * Block until a buffer comes unlocked. This doesn't stop it |
87 | * from becoming locked again - you have to lock it yourself | 121 | * from becoming locked again - you have to lock it yourself |
88 | * if you want to preserve its state. | 122 | * if you want to preserve its state. |
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 317f9ee9c991..ebaff368120d 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/mount.h> | 12 | #include <linux/mount.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/file.h> | 14 | #include <linux/file.h> |
15 | #include <linux/swap.h> | ||
15 | #include "internal.h" | 16 | #include "internal.h" |
16 | 17 | ||
17 | /* | 18 | /* |
@@ -227,8 +228,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op) | |||
227 | */ | 228 | */ |
228 | static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | 229 | static int cachefiles_read_backing_file_one(struct cachefiles_object *object, |
229 | struct fscache_retrieval *op, | 230 | struct fscache_retrieval *op, |
230 | struct page *netpage, | 231 | struct page *netpage) |
231 | struct pagevec *pagevec) | ||
232 | { | 232 | { |
233 | struct cachefiles_one_read *monitor; | 233 | struct cachefiles_one_read *monitor; |
234 | struct address_space *bmapping; | 234 | struct address_space *bmapping; |
@@ -237,8 +237,6 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | |||
237 | 237 | ||
238 | _enter(""); | 238 | _enter(""); |
239 | 239 | ||
240 | pagevec_reinit(pagevec); | ||
241 | |||
242 | _debug("read back %p{%lu,%d}", | 240 | _debug("read back %p{%lu,%d}", |
243 | netpage, netpage->index, page_count(netpage)); | 241 | netpage, netpage->index, page_count(netpage)); |
244 | 242 | ||
@@ -283,9 +281,7 @@ installed_new_backing_page: | |||
283 | backpage = newpage; | 281 | backpage = newpage; |
284 | newpage = NULL; | 282 | newpage = NULL; |
285 | 283 | ||
286 | page_cache_get(backpage); | 284 | lru_cache_add_file(backpage); |
287 | pagevec_add(pagevec, backpage); | ||
288 | __pagevec_lru_add_file(pagevec); | ||
289 | 285 | ||
290 | read_backing_page: | 286 | read_backing_page: |
291 | ret = bmapping->a_ops->readpage(NULL, backpage); | 287 | ret = bmapping->a_ops->readpage(NULL, backpage); |
@@ -452,8 +448,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |||
452 | if (block) { | 448 | if (block) { |
453 | /* submit the apparently valid page to the backing fs to be | 449 | /* submit the apparently valid page to the backing fs to be |
454 | * read from disk */ | 450 | * read from disk */ |
455 | ret = cachefiles_read_backing_file_one(object, op, page, | 451 | ret = cachefiles_read_backing_file_one(object, op, page); |
456 | &pagevec); | ||
457 | } else if (cachefiles_has_space(cache, 0, 1) == 0) { | 452 | } else if (cachefiles_has_space(cache, 0, 1) == 0) { |
458 | /* there's space in the cache we can use */ | 453 | /* there's space in the cache we can use */ |
459 | fscache_mark_page_cached(op, page); | 454 | fscache_mark_page_cached(op, page); |
@@ -482,14 +477,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
482 | { | 477 | { |
483 | struct cachefiles_one_read *monitor = NULL; | 478 | struct cachefiles_one_read *monitor = NULL; |
484 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | 479 | struct address_space *bmapping = object->backer->d_inode->i_mapping; |
485 | struct pagevec lru_pvec; | ||
486 | struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; | 480 | struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; |
487 | int ret = 0; | 481 | int ret = 0; |
488 | 482 | ||
489 | _enter(""); | 483 | _enter(""); |
490 | 484 | ||
491 | pagevec_init(&lru_pvec, 0); | ||
492 | |||
493 | list_for_each_entry_safe(netpage, _n, list, lru) { | 485 | list_for_each_entry_safe(netpage, _n, list, lru) { |
494 | list_del(&netpage->lru); | 486 | list_del(&netpage->lru); |
495 | 487 | ||
@@ -534,9 +526,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
534 | backpage = newpage; | 526 | backpage = newpage; |
535 | newpage = NULL; | 527 | newpage = NULL; |
536 | 528 | ||
537 | page_cache_get(backpage); | 529 | lru_cache_add_file(backpage); |
538 | if (!pagevec_add(&lru_pvec, backpage)) | ||
539 | __pagevec_lru_add_file(&lru_pvec); | ||
540 | 530 | ||
541 | reread_backing_page: | 531 | reread_backing_page: |
542 | ret = bmapping->a_ops->readpage(NULL, backpage); | 532 | ret = bmapping->a_ops->readpage(NULL, backpage); |
@@ -559,9 +549,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
559 | goto nomem; | 549 | goto nomem; |
560 | } | 550 | } |
561 | 551 | ||
562 | page_cache_get(netpage); | 552 | lru_cache_add_file(netpage); |
563 | if (!pagevec_add(&lru_pvec, netpage)) | ||
564 | __pagevec_lru_add_file(&lru_pvec); | ||
565 | 553 | ||
566 | /* install a monitor */ | 554 | /* install a monitor */ |
567 | page_cache_get(netpage); | 555 | page_cache_get(netpage); |
@@ -643,9 +631,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
643 | 631 | ||
644 | fscache_mark_page_cached(op, netpage); | 632 | fscache_mark_page_cached(op, netpage); |
645 | 633 | ||
646 | page_cache_get(netpage); | 634 | lru_cache_add_file(netpage); |
647 | if (!pagevec_add(&lru_pvec, netpage)) | ||
648 | __pagevec_lru_add_file(&lru_pvec); | ||
649 | 635 | ||
650 | /* the netpage is unlocked and marked up to date here */ | 636 | /* the netpage is unlocked and marked up to date here */ |
651 | fscache_end_io(op, netpage, 0); | 637 | fscache_end_io(op, netpage, 0); |
@@ -661,8 +647,6 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
661 | 647 | ||
662 | out: | 648 | out: |
663 | /* tidy up */ | 649 | /* tidy up */ |
664 | pagevec_lru_add_file(&lru_pvec); | ||
665 | |||
666 | if (newpage) | 650 | if (newpage) |
667 | page_cache_release(newpage); | 651 | page_cache_release(newpage); |
668 | if (netpage) | 652 | if (netpage) |
diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 2b6cb23dd14e..1d1c41f1014d 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c | |||
@@ -203,7 +203,7 @@ configfs_write_file(struct file *file, const char __user *buf, size_t count, lof | |||
203 | mutex_lock(&buffer->mutex); | 203 | mutex_lock(&buffer->mutex); |
204 | len = fill_write_buffer(buffer, buf, count); | 204 | len = fill_write_buffer(buffer, buf, count); |
205 | if (len > 0) | 205 | if (len > 0) |
206 | len = flush_write_buffer(file->f_path.dentry, buffer, count); | 206 | len = flush_write_buffer(file->f_path.dentry, buffer, len); |
207 | if (len > 0) | 207 | if (len > 0) |
208 | *ppos += len; | 208 | *ppos += len; |
209 | mutex_unlock(&buffer->mutex); | 209 | mutex_unlock(&buffer->mutex); |
diff --git a/fs/coredump.c b/fs/coredump.c index dafafbafa731..72f816d6cad9 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -45,69 +45,79 @@ | |||
45 | #include <trace/events/sched.h> | 45 | #include <trace/events/sched.h> |
46 | 46 | ||
47 | int core_uses_pid; | 47 | int core_uses_pid; |
48 | char core_pattern[CORENAME_MAX_SIZE] = "core"; | ||
49 | unsigned int core_pipe_limit; | 48 | unsigned int core_pipe_limit; |
49 | char core_pattern[CORENAME_MAX_SIZE] = "core"; | ||
50 | static int core_name_size = CORENAME_MAX_SIZE; | ||
50 | 51 | ||
51 | struct core_name { | 52 | struct core_name { |
52 | char *corename; | 53 | char *corename; |
53 | int used, size; | 54 | int used, size; |
54 | }; | 55 | }; |
55 | static atomic_t call_count = ATOMIC_INIT(1); | ||
56 | 56 | ||
57 | /* The maximal length of core_pattern is also specified in sysctl.c */ | 57 | /* The maximal length of core_pattern is also specified in sysctl.c */ |
58 | 58 | ||
59 | static int expand_corename(struct core_name *cn) | 59 | static int expand_corename(struct core_name *cn, int size) |
60 | { | 60 | { |
61 | char *old_corename = cn->corename; | 61 | char *corename = krealloc(cn->corename, size, GFP_KERNEL); |
62 | |||
63 | cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); | ||
64 | cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); | ||
65 | 62 | ||
66 | if (!cn->corename) { | 63 | if (!corename) |
67 | kfree(old_corename); | ||
68 | return -ENOMEM; | 64 | return -ENOMEM; |
69 | } | ||
70 | 65 | ||
66 | if (size > core_name_size) /* racy but harmless */ | ||
67 | core_name_size = size; | ||
68 | |||
69 | cn->size = ksize(corename); | ||
70 | cn->corename = corename; | ||
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg) | ||
75 | { | ||
76 | int free, need; | ||
77 | |||
78 | again: | ||
79 | free = cn->size - cn->used; | ||
80 | need = vsnprintf(cn->corename + cn->used, free, fmt, arg); | ||
81 | if (need < free) { | ||
82 | cn->used += need; | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | if (!expand_corename(cn, cn->size + need - free + 1)) | ||
87 | goto again; | ||
88 | |||
89 | return -ENOMEM; | ||
90 | } | ||
91 | |||
74 | static int cn_printf(struct core_name *cn, const char *fmt, ...) | 92 | static int cn_printf(struct core_name *cn, const char *fmt, ...) |
75 | { | 93 | { |
76 | char *cur; | ||
77 | int need; | ||
78 | int ret; | ||
79 | va_list arg; | 94 | va_list arg; |
95 | int ret; | ||
80 | 96 | ||
81 | va_start(arg, fmt); | 97 | va_start(arg, fmt); |
82 | need = vsnprintf(NULL, 0, fmt, arg); | 98 | ret = cn_vprintf(cn, fmt, arg); |
83 | va_end(arg); | 99 | va_end(arg); |
84 | 100 | ||
85 | if (likely(need < cn->size - cn->used - 1)) | 101 | return ret; |
86 | goto out_printf; | 102 | } |
87 | 103 | ||
88 | ret = expand_corename(cn); | 104 | static int cn_esc_printf(struct core_name *cn, const char *fmt, ...) |
89 | if (ret) | 105 | { |
90 | goto expand_fail; | 106 | int cur = cn->used; |
107 | va_list arg; | ||
108 | int ret; | ||
91 | 109 | ||
92 | out_printf: | ||
93 | cur = cn->corename + cn->used; | ||
94 | va_start(arg, fmt); | 110 | va_start(arg, fmt); |
95 | vsnprintf(cur, need + 1, fmt, arg); | 111 | ret = cn_vprintf(cn, fmt, arg); |
96 | va_end(arg); | 112 | va_end(arg); |
97 | cn->used += need; | ||
98 | return 0; | ||
99 | 113 | ||
100 | expand_fail: | 114 | for (; cur < cn->used; ++cur) { |
115 | if (cn->corename[cur] == '/') | ||
116 | cn->corename[cur] = '!'; | ||
117 | } | ||
101 | return ret; | 118 | return ret; |
102 | } | 119 | } |
103 | 120 | ||
104 | static void cn_escape(char *str) | ||
105 | { | ||
106 | for (; *str; str++) | ||
107 | if (*str == '/') | ||
108 | *str = '!'; | ||
109 | } | ||
110 | |||
111 | static int cn_print_exe_file(struct core_name *cn) | 121 | static int cn_print_exe_file(struct core_name *cn) |
112 | { | 122 | { |
113 | struct file *exe_file; | 123 | struct file *exe_file; |
@@ -115,12 +125,8 @@ static int cn_print_exe_file(struct core_name *cn) | |||
115 | int ret; | 125 | int ret; |
116 | 126 | ||
117 | exe_file = get_mm_exe_file(current->mm); | 127 | exe_file = get_mm_exe_file(current->mm); |
118 | if (!exe_file) { | 128 | if (!exe_file) |
119 | char *commstart = cn->corename + cn->used; | 129 | return cn_esc_printf(cn, "%s (path unknown)", current->comm); |
120 | ret = cn_printf(cn, "%s (path unknown)", current->comm); | ||
121 | cn_escape(commstart); | ||
122 | return ret; | ||
123 | } | ||
124 | 130 | ||
125 | pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); | 131 | pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); |
126 | if (!pathbuf) { | 132 | if (!pathbuf) { |
@@ -134,9 +140,7 @@ static int cn_print_exe_file(struct core_name *cn) | |||
134 | goto free_buf; | 140 | goto free_buf; |
135 | } | 141 | } |
136 | 142 | ||
137 | cn_escape(path); | 143 | ret = cn_esc_printf(cn, "%s", path); |
138 | |||
139 | ret = cn_printf(cn, "%s", path); | ||
140 | 144 | ||
141 | free_buf: | 145 | free_buf: |
142 | kfree(pathbuf); | 146 | kfree(pathbuf); |
@@ -157,19 +161,19 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) | |||
157 | int pid_in_pattern = 0; | 161 | int pid_in_pattern = 0; |
158 | int err = 0; | 162 | int err = 0; |
159 | 163 | ||
160 | cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); | ||
161 | cn->corename = kmalloc(cn->size, GFP_KERNEL); | ||
162 | cn->used = 0; | 164 | cn->used = 0; |
163 | 165 | cn->corename = NULL; | |
164 | if (!cn->corename) | 166 | if (expand_corename(cn, core_name_size)) |
165 | return -ENOMEM; | 167 | return -ENOMEM; |
168 | cn->corename[0] = '\0'; | ||
169 | |||
170 | if (ispipe) | ||
171 | ++pat_ptr; | ||
166 | 172 | ||
167 | /* Repeat as long as we have more pattern to process and more output | 173 | /* Repeat as long as we have more pattern to process and more output |
168 | space */ | 174 | space */ |
169 | while (*pat_ptr) { | 175 | while (*pat_ptr) { |
170 | if (*pat_ptr != '%') { | 176 | if (*pat_ptr != '%') { |
171 | if (*pat_ptr == 0) | ||
172 | goto out; | ||
173 | err = cn_printf(cn, "%c", *pat_ptr++); | 177 | err = cn_printf(cn, "%c", *pat_ptr++); |
174 | } else { | 178 | } else { |
175 | switch (*++pat_ptr) { | 179 | switch (*++pat_ptr) { |
@@ -210,22 +214,16 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) | |||
210 | break; | 214 | break; |
211 | } | 215 | } |
212 | /* hostname */ | 216 | /* hostname */ |
213 | case 'h': { | 217 | case 'h': |
214 | char *namestart = cn->corename + cn->used; | ||
215 | down_read(&uts_sem); | 218 | down_read(&uts_sem); |
216 | err = cn_printf(cn, "%s", | 219 | err = cn_esc_printf(cn, "%s", |
217 | utsname()->nodename); | 220 | utsname()->nodename); |
218 | up_read(&uts_sem); | 221 | up_read(&uts_sem); |
219 | cn_escape(namestart); | ||
220 | break; | 222 | break; |
221 | } | ||
222 | /* executable */ | 223 | /* executable */ |
223 | case 'e': { | 224 | case 'e': |
224 | char *commstart = cn->corename + cn->used; | 225 | err = cn_esc_printf(cn, "%s", current->comm); |
225 | err = cn_printf(cn, "%s", current->comm); | ||
226 | cn_escape(commstart); | ||
227 | break; | 226 | break; |
228 | } | ||
229 | case 'E': | 227 | case 'E': |
230 | err = cn_print_exe_file(cn); | 228 | err = cn_print_exe_file(cn); |
231 | break; | 229 | break; |
@@ -244,6 +242,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) | |||
244 | return err; | 242 | return err; |
245 | } | 243 | } |
246 | 244 | ||
245 | out: | ||
247 | /* Backward compatibility with core_uses_pid: | 246 | /* Backward compatibility with core_uses_pid: |
248 | * | 247 | * |
249 | * If core_pattern does not include a %p (as is the default) | 248 | * If core_pattern does not include a %p (as is the default) |
@@ -254,7 +253,6 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) | |||
254 | if (err) | 253 | if (err) |
255 | return err; | 254 | return err; |
256 | } | 255 | } |
257 | out: | ||
258 | return ispipe; | 256 | return ispipe; |
259 | } | 257 | } |
260 | 258 | ||
@@ -549,7 +547,7 @@ void do_coredump(siginfo_t *siginfo) | |||
549 | if (ispipe < 0) { | 547 | if (ispipe < 0) { |
550 | printk(KERN_WARNING "format_corename failed\n"); | 548 | printk(KERN_WARNING "format_corename failed\n"); |
551 | printk(KERN_WARNING "Aborting core\n"); | 549 | printk(KERN_WARNING "Aborting core\n"); |
552 | goto fail_corename; | 550 | goto fail_unlock; |
553 | } | 551 | } |
554 | 552 | ||
555 | if (cprm.limit == 1) { | 553 | if (cprm.limit == 1) { |
@@ -584,7 +582,7 @@ void do_coredump(siginfo_t *siginfo) | |||
584 | goto fail_dropcount; | 582 | goto fail_dropcount; |
585 | } | 583 | } |
586 | 584 | ||
587 | helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL); | 585 | helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); |
588 | if (!helper_argv) { | 586 | if (!helper_argv) { |
589 | printk(KERN_WARNING "%s failed to allocate memory\n", | 587 | printk(KERN_WARNING "%s failed to allocate memory\n", |
590 | __func__); | 588 | __func__); |
@@ -601,7 +599,7 @@ void do_coredump(siginfo_t *siginfo) | |||
601 | 599 | ||
602 | argv_free(helper_argv); | 600 | argv_free(helper_argv); |
603 | if (retval) { | 601 | if (retval) { |
604 | printk(KERN_INFO "Core dump to %s pipe failed\n", | 602 | printk(KERN_INFO "Core dump to |%s pipe failed\n", |
605 | cn.corename); | 603 | cn.corename); |
606 | goto close_fail; | 604 | goto close_fail; |
607 | } | 605 | } |
@@ -669,7 +667,6 @@ fail_dropcount: | |||
669 | atomic_dec(&core_dump_count); | 667 | atomic_dec(&core_dump_count); |
670 | fail_unlock: | 668 | fail_unlock: |
671 | kfree(cn.corename); | 669 | kfree(cn.corename); |
672 | fail_corename: | ||
673 | coredump_finish(mm, core_dumped); | 670 | coredump_finish(mm, core_dumped); |
674 | revert_creds(old_cred); | 671 | revert_creds(old_cred); |
675 | fail_creds: | 672 | fail_creds: |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 0cff4434880d..9ad17b15b454 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -1977,8 +1977,8 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, | |||
1977 | return -EINVAL; | 1977 | return -EINVAL; |
1978 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | 1978 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) |
1979 | return -EFAULT; | 1979 | return -EFAULT; |
1980 | sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 1980 | sigsaved = current->blocked; |
1981 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | 1981 | set_current_blocked(&ksigmask); |
1982 | } | 1982 | } |
1983 | 1983 | ||
1984 | error = sys_epoll_wait(epfd, events, maxevents, timeout); | 1984 | error = sys_epoll_wait(epfd, events, maxevents, timeout); |
@@ -1995,7 +1995,7 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, | |||
1995 | sizeof(sigsaved)); | 1995 | sizeof(sigsaved)); |
1996 | set_restore_sigmask(); | 1996 | set_restore_sigmask(); |
1997 | } else | 1997 | } else |
1998 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 1998 | set_current_blocked(&sigsaved); |
1999 | } | 1999 | } |
2000 | 2000 | ||
2001 | return error; | 2001 | return error; |
@@ -2022,8 +2022,8 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, | |||
2022 | if (copy_from_user(&csigmask, sigmask, sizeof(csigmask))) | 2022 | if (copy_from_user(&csigmask, sigmask, sizeof(csigmask))) |
2023 | return -EFAULT; | 2023 | return -EFAULT; |
2024 | sigset_from_compat(&ksigmask, &csigmask); | 2024 | sigset_from_compat(&ksigmask, &csigmask); |
2025 | sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2025 | sigsaved = current->blocked; |
2026 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | 2026 | set_current_blocked(&ksigmask); |
2027 | } | 2027 | } |
2028 | 2028 | ||
2029 | err = sys_epoll_wait(epfd, events, maxevents, timeout); | 2029 | err = sys_epoll_wait(epfd, events, maxevents, timeout); |
@@ -2040,7 +2040,7 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, | |||
2040 | sizeof(sigsaved)); | 2040 | sizeof(sigsaved)); |
2041 | set_restore_sigmask(); | 2041 | set_restore_sigmask(); |
2042 | } else | 2042 | } else |
2043 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 2043 | set_current_blocked(&sigsaved); |
2044 | } | 2044 | } |
2045 | 2045 | ||
2046 | return err; | 2046 | return err; |
@@ -932,6 +932,7 @@ static int de_thread(struct task_struct *tsk) | |||
932 | * also take its birthdate (always earlier than our own). | 932 | * also take its birthdate (always earlier than our own). |
933 | */ | 933 | */ |
934 | tsk->start_time = leader->start_time; | 934 | tsk->start_time = leader->start_time; |
935 | tsk->real_start_time = leader->real_start_time; | ||
935 | 936 | ||
936 | BUG_ON(!same_thread_group(leader, tsk)); | 937 | BUG_ON(!same_thread_group(leader, tsk)); |
937 | BUG_ON(has_group_leader_pid(tsk)); | 938 | BUG_ON(has_group_leader_pid(tsk)); |
@@ -947,9 +948,8 @@ static int de_thread(struct task_struct *tsk) | |||
947 | * Note: The old leader also uses this pid until release_task | 948 | * Note: The old leader also uses this pid until release_task |
948 | * is called. Odd but simple and correct. | 949 | * is called. Odd but simple and correct. |
949 | */ | 950 | */ |
950 | detach_pid(tsk, PIDTYPE_PID); | ||
951 | tsk->pid = leader->pid; | 951 | tsk->pid = leader->pid; |
952 | attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); | 952 | change_pid(tsk, PIDTYPE_PID, task_pid(leader)); |
953 | transfer_pid(leader, tsk, PIDTYPE_PGID); | 953 | transfer_pid(leader, tsk, PIDTYPE_PGID); |
954 | transfer_pid(leader, tsk, PIDTYPE_SID); | 954 | transfer_pid(leader, tsk, PIDTYPE_SID); |
955 | 955 | ||
@@ -1465,7 +1465,6 @@ static int do_execve_common(const char *filename, | |||
1465 | struct files_struct *displaced; | 1465 | struct files_struct *displaced; |
1466 | bool clear_in_exec; | 1466 | bool clear_in_exec; |
1467 | int retval; | 1467 | int retval; |
1468 | const struct cred *cred = current_cred(); | ||
1469 | 1468 | ||
1470 | /* | 1469 | /* |
1471 | * We move the actual failure in case of RLIMIT_NPROC excess from | 1470 | * We move the actual failure in case of RLIMIT_NPROC excess from |
@@ -1474,7 +1473,7 @@ static int do_execve_common(const char *filename, | |||
1474 | * whether NPROC limit is still exceeded. | 1473 | * whether NPROC limit is still exceeded. |
1475 | */ | 1474 | */ |
1476 | if ((current->flags & PF_NPROC_EXCEEDED) && | 1475 | if ((current->flags & PF_NPROC_EXCEEDED) && |
1477 | atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) { | 1476 | atomic_read(¤t_user()->processes) > rlimit(RLIMIT_NPROC)) { |
1478 | retval = -EAGAIN; | 1477 | retval = -EAGAIN; |
1479 | goto out_ret; | 1478 | goto out_ret; |
1480 | } | 1479 | } |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index f67668f724ba..2bd85486b879 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -1985,6 +1985,7 @@ static const struct address_space_operations ext3_ordered_aops = { | |||
1985 | .direct_IO = ext3_direct_IO, | 1985 | .direct_IO = ext3_direct_IO, |
1986 | .migratepage = buffer_migrate_page, | 1986 | .migratepage = buffer_migrate_page, |
1987 | .is_partially_uptodate = block_is_partially_uptodate, | 1987 | .is_partially_uptodate = block_is_partially_uptodate, |
1988 | .is_dirty_writeback = buffer_check_dirty_writeback, | ||
1988 | .error_remove_page = generic_error_remove_page, | 1989 | .error_remove_page = generic_error_remove_page, |
1989 | }; | 1990 | }; |
1990 | 1991 | ||
diff --git a/fs/fat/misc.c b/fs/fat/misc.c index 359d307b5507..628e22a5a543 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c | |||
@@ -30,7 +30,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...) | |||
30 | va_start(args, fmt); | 30 | va_start(args, fmt); |
31 | vaf.fmt = fmt; | 31 | vaf.fmt = fmt; |
32 | vaf.va = &args; | 32 | vaf.va = &args; |
33 | printk(KERN_ERR "FAT-fs (%s): error, %pV\n", sb->s_id, &vaf); | 33 | fat_msg(sb, KERN_ERR, "error, %pV", &vaf); |
34 | va_end(args); | 34 | va_end(args); |
35 | } | 35 | } |
36 | 36 | ||
@@ -38,8 +38,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...) | |||
38 | panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id); | 38 | panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id); |
39 | else if (opts->errors == FAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) { | 39 | else if (opts->errors == FAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) { |
40 | sb->s_flags |= MS_RDONLY; | 40 | sb->s_flags |= MS_RDONLY; |
41 | printk(KERN_ERR "FAT-fs (%s): Filesystem has been " | 41 | fat_msg(sb, KERN_ERR, "Filesystem has been set read-only"); |
42 | "set read-only\n", sb->s_id); | ||
43 | } | 42 | } |
44 | } | 43 | } |
45 | EXPORT_SYMBOL_GPL(__fat_fs_error); | 44 | EXPORT_SYMBOL_GPL(__fat_fs_error); |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 9a0cdde14a08..0b578598c6ac 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -785,7 +785,7 @@ static const struct super_operations fuse_super_operations = { | |||
785 | static void sanitize_global_limit(unsigned *limit) | 785 | static void sanitize_global_limit(unsigned *limit) |
786 | { | 786 | { |
787 | if (*limit == 0) | 787 | if (*limit == 0) |
788 | *limit = ((num_physpages << PAGE_SHIFT) >> 13) / | 788 | *limit = ((totalram_pages << PAGE_SHIFT) >> 13) / |
789 | sizeof(struct fuse_req); | 789 | sizeof(struct fuse_req); |
790 | 790 | ||
791 | if (*limit >= 1 << 16) | 791 | if (*limit >= 1 << 16) |
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index fc90ab11c340..4338ff32959d 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c | |||
@@ -69,7 +69,7 @@ static char *dentry_name(struct dentry *dentry, int extra) | |||
69 | struct dentry *parent; | 69 | struct dentry *parent; |
70 | char *root, *name; | 70 | char *root, *name; |
71 | const char *seg_name; | 71 | const char *seg_name; |
72 | int len, seg_len; | 72 | int len, seg_len, root_len; |
73 | 73 | ||
74 | len = 0; | 74 | len = 0; |
75 | parent = dentry; | 75 | parent = dentry; |
@@ -81,7 +81,8 @@ static char *dentry_name(struct dentry *dentry, int extra) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | root = "proc"; | 83 | root = "proc"; |
84 | len += strlen(root); | 84 | root_len = strlen(root); |
85 | len += root_len; | ||
85 | name = kmalloc(len + extra + 1, GFP_KERNEL); | 86 | name = kmalloc(len + extra + 1, GFP_KERNEL); |
86 | if (name == NULL) | 87 | if (name == NULL) |
87 | return NULL; | 88 | return NULL; |
@@ -91,7 +92,7 @@ static char *dentry_name(struct dentry *dentry, int extra) | |||
91 | while (parent->d_parent != parent) { | 92 | while (parent->d_parent != parent) { |
92 | if (is_pid(parent)) { | 93 | if (is_pid(parent)) { |
93 | seg_name = "pid"; | 94 | seg_name = "pid"; |
94 | seg_len = strlen("pid"); | 95 | seg_len = strlen(seg_name); |
95 | } | 96 | } |
96 | else { | 97 | else { |
97 | seg_name = parent->d_name.name; | 98 | seg_name = parent->d_name.name; |
@@ -100,10 +101,10 @@ static char *dentry_name(struct dentry *dentry, int extra) | |||
100 | 101 | ||
101 | len -= seg_len + 1; | 102 | len -= seg_len + 1; |
102 | name[len] = '/'; | 103 | name[len] = '/'; |
103 | strncpy(&name[len + 1], seg_name, seg_len); | 104 | memcpy(&name[len + 1], seg_name, seg_len); |
104 | parent = parent->d_parent; | 105 | parent = parent->d_parent; |
105 | } | 106 | } |
106 | strncpy(name, root, strlen(root)); | 107 | memcpy(name, root, root_len); |
107 | return name; | 108 | return name; |
108 | } | 109 | } |
109 | 110 | ||
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index a2aa97d45670..10d6c41aecad 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv) | |||
305 | svc_sock_update_bufs(serv); | 305 | svc_sock_update_bufs(serv); |
306 | serv->sv_maxconn = nlm_max_connections; | 306 | serv->sv_maxconn = nlm_max_connections; |
307 | 307 | ||
308 | nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name); | 308 | nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name); |
309 | if (IS_ERR(nlmsvc_task)) { | 309 | if (IS_ERR(nlmsvc_task)) { |
310 | error = PTR_ERR(nlmsvc_task); | 310 | error = PTR_ERR(nlmsvc_task); |
311 | printk(KERN_WARNING | 311 | printk(KERN_WARNING |
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index ee24df5af1f9..3c5dd55d284c 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c | |||
@@ -117,7 +117,7 @@ int ncp_mmap(struct file *file, struct vm_area_struct *vma) | |||
117 | return -EINVAL; | 117 | return -EINVAL; |
118 | /* we do not support files bigger than 4GB... We eventually | 118 | /* we do not support files bigger than 4GB... We eventually |
119 | supports just 4GB... */ | 119 | supports just 4GB... */ |
120 | if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff | 120 | if (vma_pages(vma) + vma->vm_pgoff |
121 | > (1U << (32 - PAGE_SHIFT))) | 121 | > (1U << (32 - PAGE_SHIFT))) |
122 | return -EFBIG; | 122 | return -EFBIG; |
123 | 123 | ||
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index cff089a412c7..da6a43d19aa3 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, | |||
211 | struct svc_rqst *rqstp; | 211 | struct svc_rqst *rqstp; |
212 | int (*callback_svc)(void *vrqstp); | 212 | int (*callback_svc)(void *vrqstp); |
213 | struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; | 213 | struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; |
214 | char svc_name[12]; | ||
215 | int ret; | 214 | int ret; |
216 | 215 | ||
217 | nfs_callback_bc_serv(minorversion, xprt, serv); | 216 | nfs_callback_bc_serv(minorversion, xprt, serv); |
@@ -235,10 +234,10 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, | |||
235 | 234 | ||
236 | svc_sock_update_bufs(serv); | 235 | svc_sock_update_bufs(serv); |
237 | 236 | ||
238 | sprintf(svc_name, "nfsv4.%u-svc", minorversion); | ||
239 | cb_info->serv = serv; | 237 | cb_info->serv = serv; |
240 | cb_info->rqst = rqstp; | 238 | cb_info->rqst = rqstp; |
241 | cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name); | 239 | cb_info->task = kthread_run(callback_svc, cb_info->rqst, |
240 | "nfsv4.%u-svc", minorversion); | ||
242 | if (IS_ERR(cb_info->task)) { | 241 | if (IS_ERR(cb_info->task)) { |
243 | ret = PTR_ERR(cb_info->task); | 242 | ret = PTR_ERR(cb_info->task); |
244 | svc_exit_thread(cb_info->rqst); | 243 | svc_exit_thread(cb_info->rqst); |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5d051419527b..d7ed697133f0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/pagevec.h> | 33 | #include <linux/pagevec.h> |
34 | #include <linux/namei.h> | 34 | #include <linux/namei.h> |
35 | #include <linux/mount.h> | 35 | #include <linux/mount.h> |
36 | #include <linux/swap.h> | ||
36 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
37 | #include <linux/kmemleak.h> | 38 | #include <linux/kmemleak.h> |
38 | #include <linux/xattr.h> | 39 | #include <linux/xattr.h> |
@@ -1758,7 +1759,6 @@ EXPORT_SYMBOL_GPL(nfs_unlink); | |||
1758 | */ | 1759 | */ |
1759 | int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | 1760 | int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) |
1760 | { | 1761 | { |
1761 | struct pagevec lru_pvec; | ||
1762 | struct page *page; | 1762 | struct page *page; |
1763 | char *kaddr; | 1763 | char *kaddr; |
1764 | struct iattr attr; | 1764 | struct iattr attr; |
@@ -1798,11 +1798,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |||
1798 | * No big deal if we can't add this page to the page cache here. | 1798 | * No big deal if we can't add this page to the page cache here. |
1799 | * READLINK will get the missing page from the server if needed. | 1799 | * READLINK will get the missing page from the server if needed. |
1800 | */ | 1800 | */ |
1801 | pagevec_init(&lru_pvec, 0); | 1801 | if (!add_to_page_cache_lru(page, dentry->d_inode->i_mapping, 0, |
1802 | if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0, | ||
1803 | GFP_KERNEL)) { | 1802 | GFP_KERNEL)) { |
1804 | pagevec_add(&lru_pvec, page); | ||
1805 | pagevec_lru_add_file(&lru_pvec); | ||
1806 | SetPageUptodate(page); | 1803 | SetPageUptodate(page); |
1807 | unlock_page(page); | 1804 | unlock_page(page); |
1808 | } else | 1805 | } else |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6b4a79f4ad1d..94e94bd11aae 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -495,6 +495,35 @@ static int nfs_release_page(struct page *page, gfp_t gfp) | |||
495 | return nfs_fscache_release_page(page, gfp); | 495 | return nfs_fscache_release_page(page, gfp); |
496 | } | 496 | } |
497 | 497 | ||
498 | static void nfs_check_dirty_writeback(struct page *page, | ||
499 | bool *dirty, bool *writeback) | ||
500 | { | ||
501 | struct nfs_inode *nfsi; | ||
502 | struct address_space *mapping = page_file_mapping(page); | ||
503 | |||
504 | if (!mapping || PageSwapCache(page)) | ||
505 | return; | ||
506 | |||
507 | /* | ||
508 | * Check if an unstable page is currently being committed and | ||
509 | * if so, have the VM treat it as if the page is under writeback | ||
510 | * so it will not block due to pages that will shortly be freeable. | ||
511 | */ | ||
512 | nfsi = NFS_I(mapping->host); | ||
513 | if (test_bit(NFS_INO_COMMIT, &nfsi->flags)) { | ||
514 | *writeback = true; | ||
515 | return; | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * If PagePrivate() is set, then the page is not freeable and as the | ||
520 | * inode is not being committed, it's not going to be cleaned in the | ||
521 | * near future so treat it as dirty | ||
522 | */ | ||
523 | if (PagePrivate(page)) | ||
524 | *dirty = true; | ||
525 | } | ||
526 | |||
498 | /* | 527 | /* |
499 | * Attempt to clear the private state associated with a page when an error | 528 | * Attempt to clear the private state associated with a page when an error |
500 | * occurs that requires the cached contents of an inode to be written back or | 529 | * occurs that requires the cached contents of an inode to be written back or |
@@ -542,6 +571,7 @@ const struct address_space_operations nfs_file_aops = { | |||
542 | .direct_IO = nfs_direct_IO, | 571 | .direct_IO = nfs_direct_IO, |
543 | .migratepage = nfs_migrate_page, | 572 | .migratepage = nfs_migrate_page, |
544 | .launder_page = nfs_launder_page, | 573 | .launder_page = nfs_launder_page, |
574 | .is_dirty_writeback = nfs_check_dirty_writeback, | ||
545 | .error_remove_page = generic_error_remove_page, | 575 | .error_remove_page = generic_error_remove_page, |
546 | #ifdef CONFIG_NFS_SWAP | 576 | #ifdef CONFIG_NFS_SWAP |
547 | .swap_activate = nfs_swap_activate, | 577 | .swap_activate = nfs_swap_activate, |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ff10b4aa534c..55418811a55a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1194,7 +1194,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) | |||
1194 | snprintf(buf, sizeof(buf), "%s-manager", | 1194 | snprintf(buf, sizeof(buf), "%s-manager", |
1195 | rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); | 1195 | rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); |
1196 | rcu_read_unlock(); | 1196 | rcu_read_unlock(); |
1197 | task = kthread_run(nfs4_run_state_manager, clp, buf); | 1197 | task = kthread_run(nfs4_run_state_manager, clp, "%s", buf); |
1198 | if (IS_ERR(task)) { | 1198 | if (IS_ERR(task)) { |
1199 | printk(KERN_ERR "%s: kthread_run: %ld\n", | 1199 | printk(KERN_ERR "%s: kthread_run: %ld\n", |
1200 | __func__, PTR_ERR(task)); | 1200 | __func__, PTR_ERR(task)); |
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index eed4d7b26249..741fd02e0444 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c | |||
@@ -398,6 +398,69 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /** | 400 | /** |
401 | * nilfs_palloc_count_desc_blocks - count descriptor blocks number | ||
402 | * @inode: inode of metadata file using this allocator | ||
403 | * @desc_blocks: descriptor blocks number [out] | ||
404 | */ | ||
405 | static int nilfs_palloc_count_desc_blocks(struct inode *inode, | ||
406 | unsigned long *desc_blocks) | ||
407 | { | ||
408 | unsigned long blknum; | ||
409 | int ret; | ||
410 | |||
411 | ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum); | ||
412 | if (likely(!ret)) | ||
413 | *desc_blocks = DIV_ROUND_UP( | ||
414 | blknum, NILFS_MDT(inode)->mi_blocks_per_desc_block); | ||
415 | return ret; | ||
416 | } | ||
417 | |||
418 | /** | ||
419 | * nilfs_palloc_mdt_file_can_grow - check potential opportunity for | ||
420 | * MDT file growing | ||
421 | * @inode: inode of metadata file using this allocator | ||
422 | * @desc_blocks: known current descriptor blocks count | ||
423 | */ | ||
424 | static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, | ||
425 | unsigned long desc_blocks) | ||
426 | { | ||
427 | return (nilfs_palloc_groups_per_desc_block(inode) * desc_blocks) < | ||
428 | nilfs_palloc_groups_count(inode); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * nilfs_palloc_count_max_entries - count max number of entries that can be | ||
433 | * described by descriptor blocks count | ||
434 | * @inode: inode of metadata file using this allocator | ||
435 | * @nused: current number of used entries | ||
436 | * @nmaxp: max number of entries [out] | ||
437 | */ | ||
438 | int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) | ||
439 | { | ||
440 | unsigned long desc_blocks = 0; | ||
441 | u64 entries_per_desc_block, nmax; | ||
442 | int err; | ||
443 | |||
444 | err = nilfs_palloc_count_desc_blocks(inode, &desc_blocks); | ||
445 | if (unlikely(err)) | ||
446 | return err; | ||
447 | |||
448 | entries_per_desc_block = (u64)nilfs_palloc_entries_per_group(inode) * | ||
449 | nilfs_palloc_groups_per_desc_block(inode); | ||
450 | nmax = entries_per_desc_block * desc_blocks; | ||
451 | |||
452 | if (nused == nmax && | ||
453 | nilfs_palloc_mdt_file_can_grow(inode, desc_blocks)) | ||
454 | nmax += entries_per_desc_block; | ||
455 | |||
456 | if (nused > nmax) | ||
457 | return -ERANGE; | ||
458 | |||
459 | *nmaxp = nmax; | ||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | /** | ||
401 | * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object | 464 | * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object |
402 | * @inode: inode of metadata file using this allocator | 465 | * @inode: inode of metadata file using this allocator |
403 | * @req: nilfs_palloc_req structure exchanged for the allocation | 466 | * @req: nilfs_palloc_req structure exchanged for the allocation |
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index fb7238100548..4bd6451b5703 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h | |||
@@ -48,6 +48,8 @@ int nilfs_palloc_get_entry_block(struct inode *, __u64, int, | |||
48 | void *nilfs_palloc_block_get_entry(const struct inode *, __u64, | 48 | void *nilfs_palloc_block_get_entry(const struct inode *, __u64, |
49 | const struct buffer_head *, void *); | 49 | const struct buffer_head *, void *); |
50 | 50 | ||
51 | int nilfs_palloc_count_max_entries(struct inode *, u64, u64 *); | ||
52 | |||
51 | /** | 53 | /** |
52 | * nilfs_palloc_req - persistent allocator request and reply | 54 | * nilfs_palloc_req - persistent allocator request and reply |
53 | * @pr_entry_nr: entry number (vblocknr or inode number) | 55 | * @pr_entry_nr: entry number (vblocknr or inode number) |
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index d8e65bde083c..6548c7851b48 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c | |||
@@ -160,6 +160,28 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, | |||
160 | } | 160 | } |
161 | 161 | ||
162 | /** | 162 | /** |
163 | * nilfs_ifile_count_free_inodes - calculate free inodes count | ||
164 | * @ifile: ifile inode | ||
165 | * @nmaxinodes: current maximum of available inodes count [out] | ||
166 | * @nfreeinodes: free inodes count [out] | ||
167 | */ | ||
168 | int nilfs_ifile_count_free_inodes(struct inode *ifile, | ||
169 | u64 *nmaxinodes, u64 *nfreeinodes) | ||
170 | { | ||
171 | u64 nused; | ||
172 | int err; | ||
173 | |||
174 | *nmaxinodes = 0; | ||
175 | *nfreeinodes = 0; | ||
176 | |||
177 | nused = atomic64_read(&NILFS_I(ifile)->i_root->inodes_count); | ||
178 | err = nilfs_palloc_count_max_entries(ifile, nused, nmaxinodes); | ||
179 | if (likely(!err)) | ||
180 | *nfreeinodes = *nmaxinodes - nused; | ||
181 | return err; | ||
182 | } | ||
183 | |||
184 | /** | ||
163 | * nilfs_ifile_read - read or get ifile inode | 185 | * nilfs_ifile_read - read or get ifile inode |
164 | * @sb: super block instance | 186 | * @sb: super block instance |
165 | * @root: root object | 187 | * @root: root object |
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h index 59b6f2b51df6..679674d13372 100644 --- a/fs/nilfs2/ifile.h +++ b/fs/nilfs2/ifile.h | |||
@@ -49,6 +49,8 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **); | |||
49 | int nilfs_ifile_delete_inode(struct inode *, ino_t); | 49 | int nilfs_ifile_delete_inode(struct inode *, ino_t); |
50 | int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **); | 50 | int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **); |
51 | 51 | ||
52 | int nilfs_ifile_count_free_inodes(struct inode *, u64 *, u64 *); | ||
53 | |||
52 | int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root, | 54 | int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root, |
53 | size_t inode_size, struct nilfs_inode *raw_inode, | 55 | size_t inode_size, struct nilfs_inode *raw_inode, |
54 | struct inode **inodep); | 56 | struct inode **inodep); |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index bccfec8343c5..b1a5277cfd18 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -54,7 +54,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n) | |||
54 | 54 | ||
55 | inode_add_bytes(inode, (1 << inode->i_blkbits) * n); | 55 | inode_add_bytes(inode, (1 << inode->i_blkbits) * n); |
56 | if (root) | 56 | if (root) |
57 | atomic_add(n, &root->blocks_count); | 57 | atomic64_add(n, &root->blocks_count); |
58 | } | 58 | } |
59 | 59 | ||
60 | void nilfs_inode_sub_blocks(struct inode *inode, int n) | 60 | void nilfs_inode_sub_blocks(struct inode *inode, int n) |
@@ -63,7 +63,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) | |||
63 | 63 | ||
64 | inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); | 64 | inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); |
65 | if (root) | 65 | if (root) |
66 | atomic_sub(n, &root->blocks_count); | 66 | atomic64_sub(n, &root->blocks_count); |
67 | } | 67 | } |
68 | 68 | ||
69 | /** | 69 | /** |
@@ -369,7 +369,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) | |||
369 | goto failed_ifile_create_inode; | 369 | goto failed_ifile_create_inode; |
370 | /* reference count of i_bh inherits from nilfs_mdt_read_block() */ | 370 | /* reference count of i_bh inherits from nilfs_mdt_read_block() */ |
371 | 371 | ||
372 | atomic_inc(&root->inodes_count); | 372 | atomic64_inc(&root->inodes_count); |
373 | inode_init_owner(inode, dir, mode); | 373 | inode_init_owner(inode, dir, mode); |
374 | inode->i_ino = ino; | 374 | inode->i_ino = ino; |
375 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 375 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
@@ -801,7 +801,7 @@ void nilfs_evict_inode(struct inode *inode) | |||
801 | 801 | ||
802 | ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); | 802 | ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); |
803 | if (!ret) | 803 | if (!ret) |
804 | atomic_dec(&ii->i_root->inodes_count); | 804 | atomic64_dec(&ii->i_root->inodes_count); |
805 | 805 | ||
806 | nilfs_clear_inode(inode); | 806 | nilfs_clear_inode(inode); |
807 | 807 | ||
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index a5752a589932..bd88a7461063 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -835,9 +835,9 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) | |||
835 | raw_cp->cp_snapshot_list.ssl_next = 0; | 835 | raw_cp->cp_snapshot_list.ssl_next = 0; |
836 | raw_cp->cp_snapshot_list.ssl_prev = 0; | 836 | raw_cp->cp_snapshot_list.ssl_prev = 0; |
837 | raw_cp->cp_inodes_count = | 837 | raw_cp->cp_inodes_count = |
838 | cpu_to_le64(atomic_read(&sci->sc_root->inodes_count)); | 838 | cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count)); |
839 | raw_cp->cp_blocks_count = | 839 | raw_cp->cp_blocks_count = |
840 | cpu_to_le64(atomic_read(&sci->sc_root->blocks_count)); | 840 | cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count)); |
841 | raw_cp->cp_nblk_inc = | 841 | raw_cp->cp_nblk_inc = |
842 | cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); | 842 | cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); |
843 | raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); | 843 | raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index c7d1f9f18b09..1427de5ebf4d 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -554,8 +554,10 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, | |||
554 | if (err) | 554 | if (err) |
555 | goto failed_bh; | 555 | goto failed_bh; |
556 | 556 | ||
557 | atomic_set(&root->inodes_count, le64_to_cpu(raw_cp->cp_inodes_count)); | 557 | atomic64_set(&root->inodes_count, |
558 | atomic_set(&root->blocks_count, le64_to_cpu(raw_cp->cp_blocks_count)); | 558 | le64_to_cpu(raw_cp->cp_inodes_count)); |
559 | atomic64_set(&root->blocks_count, | ||
560 | le64_to_cpu(raw_cp->cp_blocks_count)); | ||
559 | 561 | ||
560 | nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); | 562 | nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); |
561 | 563 | ||
@@ -609,6 +611,7 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
609 | unsigned long overhead; | 611 | unsigned long overhead; |
610 | unsigned long nrsvblocks; | 612 | unsigned long nrsvblocks; |
611 | sector_t nfreeblocks; | 613 | sector_t nfreeblocks; |
614 | u64 nmaxinodes, nfreeinodes; | ||
612 | int err; | 615 | int err; |
613 | 616 | ||
614 | /* | 617 | /* |
@@ -633,14 +636,34 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
633 | if (unlikely(err)) | 636 | if (unlikely(err)) |
634 | return err; | 637 | return err; |
635 | 638 | ||
639 | err = nilfs_ifile_count_free_inodes(root->ifile, | ||
640 | &nmaxinodes, &nfreeinodes); | ||
641 | if (unlikely(err)) { | ||
642 | printk(KERN_WARNING | ||
643 | "NILFS warning: fail to count free inodes: err %d.\n", | ||
644 | err); | ||
645 | if (err == -ERANGE) { | ||
646 | /* | ||
647 | * If nilfs_palloc_count_max_entries() returns | ||
648 | * -ERANGE error code then we simply treat | ||
649 | * curent inodes count as maximum possible and | ||
650 | * zero as free inodes value. | ||
651 | */ | ||
652 | nmaxinodes = atomic64_read(&root->inodes_count); | ||
653 | nfreeinodes = 0; | ||
654 | err = 0; | ||
655 | } else | ||
656 | return err; | ||
657 | } | ||
658 | |||
636 | buf->f_type = NILFS_SUPER_MAGIC; | 659 | buf->f_type = NILFS_SUPER_MAGIC; |
637 | buf->f_bsize = sb->s_blocksize; | 660 | buf->f_bsize = sb->s_blocksize; |
638 | buf->f_blocks = blocks - overhead; | 661 | buf->f_blocks = blocks - overhead; |
639 | buf->f_bfree = nfreeblocks; | 662 | buf->f_bfree = nfreeblocks; |
640 | buf->f_bavail = (buf->f_bfree >= nrsvblocks) ? | 663 | buf->f_bavail = (buf->f_bfree >= nrsvblocks) ? |
641 | (buf->f_bfree - nrsvblocks) : 0; | 664 | (buf->f_bfree - nrsvblocks) : 0; |
642 | buf->f_files = atomic_read(&root->inodes_count); | 665 | buf->f_files = nmaxinodes; |
643 | buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */ | 666 | buf->f_ffree = nfreeinodes; |
644 | buf->f_namelen = NILFS_NAME_LEN; | 667 | buf->f_namelen = NILFS_NAME_LEN; |
645 | buf->f_fsid.val[0] = (u32)id; | 668 | buf->f_fsid.val[0] = (u32)id; |
646 | buf->f_fsid.val[1] = (u32)(id >> 32); | 669 | buf->f_fsid.val[1] = (u32)(id >> 32); |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 41e6a04a561f..94c451ce6d24 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -764,8 +764,8 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno) | |||
764 | new->ifile = NULL; | 764 | new->ifile = NULL; |
765 | new->nilfs = nilfs; | 765 | new->nilfs = nilfs; |
766 | atomic_set(&new->count, 1); | 766 | atomic_set(&new->count, 1); |
767 | atomic_set(&new->inodes_count, 0); | 767 | atomic64_set(&new->inodes_count, 0); |
768 | atomic_set(&new->blocks_count, 0); | 768 | atomic64_set(&new->blocks_count, 0); |
769 | 769 | ||
770 | rb_link_node(&new->rb_node, parent, p); | 770 | rb_link_node(&new->rb_node, parent, p); |
771 | rb_insert_color(&new->rb_node, &nilfs->ns_cptree); | 771 | rb_insert_color(&new->rb_node, &nilfs->ns_cptree); |
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index be1267a34cea..de8cc53b4a5c 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h | |||
@@ -241,8 +241,8 @@ struct nilfs_root { | |||
241 | struct the_nilfs *nilfs; | 241 | struct the_nilfs *nilfs; |
242 | struct inode *ifile; | 242 | struct inode *ifile; |
243 | 243 | ||
244 | atomic_t inodes_count; | 244 | atomic64_t inodes_count; |
245 | atomic_t blocks_count; | 245 | atomic64_t blocks_count; |
246 | }; | 246 | }; |
247 | 247 | ||
248 | /* Special checkpoint number */ | 248 | /* Special checkpoint number */ |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index b8a9d87231b1..17e6bdde96c5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -5655,7 +5655,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
5655 | &ref_tree, NULL); | 5655 | &ref_tree, NULL); |
5656 | if (ret) { | 5656 | if (ret) { |
5657 | mlog_errno(ret); | 5657 | mlog_errno(ret); |
5658 | goto out; | 5658 | goto bail; |
5659 | } | 5659 | } |
5660 | 5660 | ||
5661 | ret = ocfs2_prepare_refcount_change_for_del(inode, | 5661 | ret = ocfs2_prepare_refcount_change_for_del(inode, |
@@ -5666,7 +5666,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
5666 | &extra_blocks); | 5666 | &extra_blocks); |
5667 | if (ret < 0) { | 5667 | if (ret < 0) { |
5668 | mlog_errno(ret); | 5668 | mlog_errno(ret); |
5669 | goto out; | 5669 | goto bail; |
5670 | } | 5670 | } |
5671 | } | 5671 | } |
5672 | 5672 | ||
@@ -5674,7 +5674,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
5674 | extra_blocks); | 5674 | extra_blocks); |
5675 | if (ret) { | 5675 | if (ret) { |
5676 | mlog_errno(ret); | 5676 | mlog_errno(ret); |
5677 | return ret; | 5677 | goto bail; |
5678 | } | 5678 | } |
5679 | 5679 | ||
5680 | mutex_lock(&tl_inode->i_mutex); | 5680 | mutex_lock(&tl_inode->i_mutex); |
@@ -5734,7 +5734,7 @@ out_commit: | |||
5734 | ocfs2_commit_trans(osb, handle); | 5734 | ocfs2_commit_trans(osb, handle); |
5735 | out: | 5735 | out: |
5736 | mutex_unlock(&tl_inode->i_mutex); | 5736 | mutex_unlock(&tl_inode->i_mutex); |
5737 | 5737 | bail: | |
5738 | if (meta_ac) | 5738 | if (meta_ac) |
5739 | ocfs2_free_alloc_context(meta_ac); | 5739 | ocfs2_free_alloc_context(meta_ac); |
5740 | 5740 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 42252bf64b51..5c1c864e81cc 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -176,7 +176,7 @@ static void o2hb_dead_threshold_set(unsigned int threshold) | |||
176 | } | 176 | } |
177 | } | 177 | } |
178 | 178 | ||
179 | static int o2hb_global_hearbeat_mode_set(unsigned int hb_mode) | 179 | static int o2hb_global_heartbeat_mode_set(unsigned int hb_mode) |
180 | { | 180 | { |
181 | int ret = -1; | 181 | int ret = -1; |
182 | 182 | ||
@@ -500,7 +500,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg, | |||
500 | } | 500 | } |
501 | 501 | ||
502 | atomic_inc(&write_wc->wc_num_reqs); | 502 | atomic_inc(&write_wc->wc_num_reqs); |
503 | submit_bio(WRITE, bio); | 503 | submit_bio(WRITE_SYNC, bio); |
504 | 504 | ||
505 | status = 0; | 505 | status = 0; |
506 | bail: | 506 | bail: |
@@ -2271,7 +2271,7 @@ ssize_t o2hb_heartbeat_group_mode_store(struct o2hb_heartbeat_group *group, | |||
2271 | if (strnicmp(page, o2hb_heartbeat_mode_desc[i], len)) | 2271 | if (strnicmp(page, o2hb_heartbeat_mode_desc[i], len)) |
2272 | continue; | 2272 | continue; |
2273 | 2273 | ||
2274 | ret = o2hb_global_hearbeat_mode_set(i); | 2274 | ret = o2hb_global_heartbeat_mode_set(i); |
2275 | if (!ret) | 2275 | if (!ret) |
2276 | printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n", | 2276 | printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n", |
2277 | o2hb_heartbeat_mode_desc[i]); | 2277 | o2hb_heartbeat_mode_desc[i]); |
@@ -2304,7 +2304,7 @@ static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { | |||
2304 | NULL, | 2304 | NULL, |
2305 | }; | 2305 | }; |
2306 | 2306 | ||
2307 | static struct configfs_item_operations o2hb_hearbeat_group_item_ops = { | 2307 | static struct configfs_item_operations o2hb_heartbeat_group_item_ops = { |
2308 | .show_attribute = o2hb_heartbeat_group_show, | 2308 | .show_attribute = o2hb_heartbeat_group_show, |
2309 | .store_attribute = o2hb_heartbeat_group_store, | 2309 | .store_attribute = o2hb_heartbeat_group_store, |
2310 | }; | 2310 | }; |
@@ -2316,7 +2316,7 @@ static struct configfs_group_operations o2hb_heartbeat_group_group_ops = { | |||
2316 | 2316 | ||
2317 | static struct config_item_type o2hb_heartbeat_group_type = { | 2317 | static struct config_item_type o2hb_heartbeat_group_type = { |
2318 | .ct_group_ops = &o2hb_heartbeat_group_group_ops, | 2318 | .ct_group_ops = &o2hb_heartbeat_group_group_ops, |
2319 | .ct_item_ops = &o2hb_hearbeat_group_item_ops, | 2319 | .ct_item_ops = &o2hb_heartbeat_group_item_ops, |
2320 | .ct_attrs = o2hb_heartbeat_group_attrs, | 2320 | .ct_attrs = o2hb_heartbeat_group_attrs, |
2321 | .ct_owner = THIS_MODULE, | 2321 | .ct_owner = THIS_MODULE, |
2322 | }; | 2322 | }; |
@@ -2389,6 +2389,9 @@ static int o2hb_region_pin(const char *region_uuid) | |||
2389 | assert_spin_locked(&o2hb_live_lock); | 2389 | assert_spin_locked(&o2hb_live_lock); |
2390 | 2390 | ||
2391 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { | 2391 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2392 | if (reg->hr_item_dropped) | ||
2393 | continue; | ||
2394 | |||
2392 | uuid = config_item_name(®->hr_item); | 2395 | uuid = config_item_name(®->hr_item); |
2393 | 2396 | ||
2394 | /* local heartbeat */ | 2397 | /* local heartbeat */ |
@@ -2439,6 +2442,9 @@ static void o2hb_region_unpin(const char *region_uuid) | |||
2439 | assert_spin_locked(&o2hb_live_lock); | 2442 | assert_spin_locked(&o2hb_live_lock); |
2440 | 2443 | ||
2441 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { | 2444 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2445 | if (reg->hr_item_dropped) | ||
2446 | continue; | ||
2447 | |||
2442 | uuid = config_item_name(®->hr_item); | 2448 | uuid = config_item_name(®->hr_item); |
2443 | if (region_uuid) { | 2449 | if (region_uuid) { |
2444 | if (strcmp(region_uuid, uuid)) | 2450 | if (strcmp(region_uuid, uuid)) |
@@ -2654,6 +2660,9 @@ int o2hb_get_all_regions(char *region_uuids, u8 max_regions) | |||
2654 | 2660 | ||
2655 | p = region_uuids; | 2661 | p = region_uuids; |
2656 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { | 2662 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2663 | if (reg->hr_item_dropped) | ||
2664 | continue; | ||
2665 | |||
2657 | mlog(0, "Region: %s\n", config_item_name(®->hr_item)); | 2666 | mlog(0, "Region: %s\n", config_item_name(®->hr_item)); |
2658 | if (numregs < max_regions) { | 2667 | if (numregs < max_regions) { |
2659 | memcpy(p, config_item_name(®->hr_item), | 2668 | memcpy(p, config_item_name(®->hr_item), |
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index c19897d0fe14..1ec141e758d7 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c | |||
@@ -264,7 +264,7 @@ void o2quo_hb_still_up(u8 node) | |||
264 | /* This is analogous to hb_up. as a node's connection comes up we delay the | 264 | /* This is analogous to hb_up. as a node's connection comes up we delay the |
265 | * quorum decision until we see it heartbeating. the hold will be droped in | 265 | * quorum decision until we see it heartbeating. the hold will be droped in |
266 | * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if | 266 | * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if |
267 | * it's already heartbeating we we might be dropping a hold that conn_up got. | 267 | * it's already heartbeating we might be dropping a hold that conn_up got. |
268 | * */ | 268 | * */ |
269 | void o2quo_conn_up(u8 node) | 269 | void o2quo_conn_up(u8 node) |
270 | { | 270 | { |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index aa88bd8bcedc..d644dc611425 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -406,6 +406,9 @@ static void sc_kref_release(struct kref *kref) | |||
406 | sc->sc_node = NULL; | 406 | sc->sc_node = NULL; |
407 | 407 | ||
408 | o2net_debug_del_sc(sc); | 408 | o2net_debug_del_sc(sc); |
409 | |||
410 | if (sc->sc_page) | ||
411 | __free_page(sc->sc_page); | ||
409 | kfree(sc); | 412 | kfree(sc); |
410 | } | 413 | } |
411 | 414 | ||
@@ -630,19 +633,19 @@ static void o2net_state_change(struct sock *sk) | |||
630 | state_change = sc->sc_state_change; | 633 | state_change = sc->sc_state_change; |
631 | 634 | ||
632 | switch(sk->sk_state) { | 635 | switch(sk->sk_state) { |
633 | /* ignore connecting sockets as they make progress */ | 636 | /* ignore connecting sockets as they make progress */ |
634 | case TCP_SYN_SENT: | 637 | case TCP_SYN_SENT: |
635 | case TCP_SYN_RECV: | 638 | case TCP_SYN_RECV: |
636 | break; | 639 | break; |
637 | case TCP_ESTABLISHED: | 640 | case TCP_ESTABLISHED: |
638 | o2net_sc_queue_work(sc, &sc->sc_connect_work); | 641 | o2net_sc_queue_work(sc, &sc->sc_connect_work); |
639 | break; | 642 | break; |
640 | default: | 643 | default: |
641 | printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT | 644 | printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT |
642 | " shutdown, state %d\n", | 645 | " shutdown, state %d\n", |
643 | SC_NODEF_ARGS(sc), sk->sk_state); | 646 | SC_NODEF_ARGS(sc), sk->sk_state); |
644 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); | 647 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); |
645 | break; | 648 | break; |
646 | } | 649 | } |
647 | out: | 650 | out: |
648 | read_unlock(&sk->sk_callback_lock); | 651 | read_unlock(&sk->sk_callback_lock); |
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 975810b98492..47e67c2d228f 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -178,6 +178,7 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, | |||
178 | lock->ml.node); | 178 | lock->ml.node); |
179 | } | 179 | } |
180 | } else { | 180 | } else { |
181 | status = DLM_NORMAL; | ||
181 | dlm_lock_get(lock); | 182 | dlm_lock_get(lock); |
182 | list_add_tail(&lock->list, &res->blocked); | 183 | list_add_tail(&lock->list, &res->blocked); |
183 | kick_thread = 1; | 184 | kick_thread = 1; |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index e68588e6b1e8..773bd32bfd8c 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -55,9 +55,6 @@ | |||
55 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); | 55 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); |
56 | 56 | ||
57 | static int dlm_recovery_thread(void *data); | 57 | static int dlm_recovery_thread(void *data); |
58 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | ||
59 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | ||
60 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | ||
61 | static int dlm_do_recovery(struct dlm_ctxt *dlm); | 58 | static int dlm_do_recovery(struct dlm_ctxt *dlm); |
62 | 59 | ||
63 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); | 60 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); |
@@ -789,7 +786,7 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
789 | u8 dead_node) | 786 | u8 dead_node) |
790 | { | 787 | { |
791 | struct dlm_lock_request lr; | 788 | struct dlm_lock_request lr; |
792 | enum dlm_status ret; | 789 | int ret; |
793 | 790 | ||
794 | mlog(0, "\n"); | 791 | mlog(0, "\n"); |
795 | 792 | ||
@@ -802,7 +799,6 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
802 | lr.dead_node = dead_node; | 799 | lr.dead_node = dead_node; |
803 | 800 | ||
804 | // send message | 801 | // send message |
805 | ret = DLM_NOLOCKMGR; | ||
806 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, | 802 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, |
807 | &lr, sizeof(lr), request_from, NULL); | 803 | &lr, sizeof(lr), request_from, NULL); |
808 | 804 | ||
@@ -2696,6 +2692,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, | |||
2696 | dlm->name, br->node_idx, br->dead_node, | 2692 | dlm->name, br->node_idx, br->dead_node, |
2697 | dlm->reco.dead_node, dlm->reco.new_master); | 2693 | dlm->reco.dead_node, dlm->reco.new_master); |
2698 | spin_unlock(&dlm->spinlock); | 2694 | spin_unlock(&dlm->spinlock); |
2695 | dlm_put(dlm); | ||
2699 | return -EAGAIN; | 2696 | return -EAGAIN; |
2700 | } | 2697 | } |
2701 | spin_unlock(&dlm->spinlock); | 2698 | spin_unlock(&dlm->spinlock); |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index a3385b63ff5e..96f9ac237e86 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -200,7 +200,6 @@ void ocfs2_complete_quota_recovery(struct ocfs2_super *osb); | |||
200 | 200 | ||
201 | static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb) | 201 | static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb) |
202 | { | 202 | { |
203 | atomic_set(&osb->needs_checkpoint, 1); | ||
204 | wake_up(&osb->checkpoint_event); | 203 | wake_up(&osb->checkpoint_event); |
205 | } | 204 | } |
206 | 205 | ||
@@ -538,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, | |||
538 | extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); | 537 | extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); |
539 | 538 | ||
540 | return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + | 539 | return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + |
541 | ocfs2_quota_trans_credits(sb); | 540 | ocfs2_quota_trans_credits(sb) + bits_wanted; |
542 | } | 541 | } |
543 | 542 | ||
544 | static inline int ocfs2_calc_symlink_credits(struct super_block *sb) | 543 | static inline int ocfs2_calc_symlink_credits(struct super_block *sb) |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index b4a5cdf9dbc5..be3f8676a438 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -522,7 +522,7 @@ static int __ocfs2_mknod_locked(struct inode *dir, | |||
522 | 522 | ||
523 | fe->i_last_eb_blk = 0; | 523 | fe->i_last_eb_blk = 0; |
524 | strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE); | 524 | strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE); |
525 | le32_add_cpu(&fe->i_flags, OCFS2_VALID_FL); | 525 | fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL); |
526 | fe->i_atime = fe->i_ctime = fe->i_mtime = | 526 | fe->i_atime = fe->i_ctime = fe->i_mtime = |
527 | cpu_to_le64(CURRENT_TIME.tv_sec); | 527 | cpu_to_le64(CURRENT_TIME.tv_sec); |
528 | fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec = | 528 | fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec = |
@@ -773,7 +773,7 @@ static int ocfs2_remote_dentry_delete(struct dentry *dentry) | |||
773 | return ret; | 773 | return ret; |
774 | } | 774 | } |
775 | 775 | ||
776 | static inline int inode_is_unlinkable(struct inode *inode) | 776 | static inline int ocfs2_inode_is_unlinkable(struct inode *inode) |
777 | { | 777 | { |
778 | if (S_ISDIR(inode->i_mode)) { | 778 | if (S_ISDIR(inode->i_mode)) { |
779 | if (inode->i_nlink == 2) | 779 | if (inode->i_nlink == 2) |
@@ -791,6 +791,7 @@ static int ocfs2_unlink(struct inode *dir, | |||
791 | { | 791 | { |
792 | int status; | 792 | int status; |
793 | int child_locked = 0; | 793 | int child_locked = 0; |
794 | bool is_unlinkable = false; | ||
794 | struct inode *inode = dentry->d_inode; | 795 | struct inode *inode = dentry->d_inode; |
795 | struct inode *orphan_dir = NULL; | 796 | struct inode *orphan_dir = NULL; |
796 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | 797 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
@@ -865,7 +866,7 @@ static int ocfs2_unlink(struct inode *dir, | |||
865 | goto leave; | 866 | goto leave; |
866 | } | 867 | } |
867 | 868 | ||
868 | if (inode_is_unlinkable(inode)) { | 869 | if (ocfs2_inode_is_unlinkable(inode)) { |
869 | status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, | 870 | status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, |
870 | OCFS2_I(inode)->ip_blkno, | 871 | OCFS2_I(inode)->ip_blkno, |
871 | orphan_name, &orphan_insert); | 872 | orphan_name, &orphan_insert); |
@@ -873,6 +874,7 @@ static int ocfs2_unlink(struct inode *dir, | |||
873 | mlog_errno(status); | 874 | mlog_errno(status); |
874 | goto leave; | 875 | goto leave; |
875 | } | 876 | } |
877 | is_unlinkable = true; | ||
876 | } | 878 | } |
877 | 879 | ||
878 | handle = ocfs2_start_trans(osb, ocfs2_unlink_credits(osb->sb)); | 880 | handle = ocfs2_start_trans(osb, ocfs2_unlink_credits(osb->sb)); |
@@ -892,15 +894,6 @@ static int ocfs2_unlink(struct inode *dir, | |||
892 | 894 | ||
893 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | 895 | fe = (struct ocfs2_dinode *) fe_bh->b_data; |
894 | 896 | ||
895 | if (inode_is_unlinkable(inode)) { | ||
896 | status = ocfs2_orphan_add(osb, handle, inode, fe_bh, orphan_name, | ||
897 | &orphan_insert, orphan_dir); | ||
898 | if (status < 0) { | ||
899 | mlog_errno(status); | ||
900 | goto leave; | ||
901 | } | ||
902 | } | ||
903 | |||
904 | /* delete the name from the parent dir */ | 897 | /* delete the name from the parent dir */ |
905 | status = ocfs2_delete_entry(handle, dir, &lookup); | 898 | status = ocfs2_delete_entry(handle, dir, &lookup); |
906 | if (status < 0) { | 899 | if (status < 0) { |
@@ -923,6 +916,14 @@ static int ocfs2_unlink(struct inode *dir, | |||
923 | mlog_errno(status); | 916 | mlog_errno(status); |
924 | if (S_ISDIR(inode->i_mode)) | 917 | if (S_ISDIR(inode->i_mode)) |
925 | inc_nlink(dir); | 918 | inc_nlink(dir); |
919 | goto leave; | ||
920 | } | ||
921 | |||
922 | if (is_unlinkable) { | ||
923 | status = ocfs2_orphan_add(osb, handle, inode, fe_bh, | ||
924 | orphan_name, &orphan_insert, orphan_dir); | ||
925 | if (status < 0) | ||
926 | mlog_errno(status); | ||
926 | } | 927 | } |
927 | 928 | ||
928 | leave: | 929 | leave: |
@@ -2012,6 +2013,21 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
2012 | goto leave; | 2013 | goto leave; |
2013 | } | 2014 | } |
2014 | 2015 | ||
2016 | /* | ||
2017 | * We're going to journal the change of i_flags and i_orphaned_slot. | ||
2018 | * It's safe anyway, though some callers may duplicate the journaling. | ||
2019 | * Journaling within the func just make the logic look more | ||
2020 | * straightforward. | ||
2021 | */ | ||
2022 | status = ocfs2_journal_access_di(handle, | ||
2023 | INODE_CACHE(inode), | ||
2024 | fe_bh, | ||
2025 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
2026 | if (status < 0) { | ||
2027 | mlog_errno(status); | ||
2028 | goto leave; | ||
2029 | } | ||
2030 | |||
2015 | /* we're a cluster, and nlink can change on disk from | 2031 | /* we're a cluster, and nlink can change on disk from |
2016 | * underneath us... */ | 2032 | * underneath us... */ |
2017 | orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data; | 2033 | orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data; |
@@ -2026,25 +2042,10 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
2026 | orphan_dir_bh, lookup); | 2042 | orphan_dir_bh, lookup); |
2027 | if (status < 0) { | 2043 | if (status < 0) { |
2028 | mlog_errno(status); | 2044 | mlog_errno(status); |
2029 | goto leave; | 2045 | goto rollback; |
2030 | } | ||
2031 | |||
2032 | /* | ||
2033 | * We're going to journal the change of i_flags and i_orphaned_slot. | ||
2034 | * It's safe anyway, though some callers may duplicate the journaling. | ||
2035 | * Journaling within the func just make the logic look more | ||
2036 | * straightforward. | ||
2037 | */ | ||
2038 | status = ocfs2_journal_access_di(handle, | ||
2039 | INODE_CACHE(inode), | ||
2040 | fe_bh, | ||
2041 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
2042 | if (status < 0) { | ||
2043 | mlog_errno(status); | ||
2044 | goto leave; | ||
2045 | } | 2046 | } |
2046 | 2047 | ||
2047 | le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL); | 2048 | fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL); |
2048 | OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR; | 2049 | OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR; |
2049 | 2050 | ||
2050 | /* Record which orphan dir our inode now resides | 2051 | /* Record which orphan dir our inode now resides |
@@ -2057,11 +2058,16 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
2057 | trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno, | 2058 | trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno, |
2058 | osb->slot_num); | 2059 | osb->slot_num); |
2059 | 2060 | ||
2061 | rollback: | ||
2062 | if (status < 0) { | ||
2063 | if (S_ISDIR(inode->i_mode)) | ||
2064 | ocfs2_add_links_count(orphan_fe, -1); | ||
2065 | set_nlink(orphan_dir_inode, ocfs2_read_links_count(orphan_fe)); | ||
2066 | } | ||
2067 | |||
2060 | leave: | 2068 | leave: |
2061 | brelse(orphan_dir_bh); | 2069 | brelse(orphan_dir_bh); |
2062 | 2070 | ||
2063 | if (status) | ||
2064 | mlog_errno(status); | ||
2065 | return status; | 2071 | return status; |
2066 | } | 2072 | } |
2067 | 2073 | ||
@@ -2434,7 +2440,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
2434 | } | 2440 | } |
2435 | 2441 | ||
2436 | di = (struct ocfs2_dinode *)di_bh->b_data; | 2442 | di = (struct ocfs2_dinode *)di_bh->b_data; |
2437 | le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); | 2443 | di->i_flags &= ~cpu_to_le32(OCFS2_ORPHANED_FL); |
2438 | di->i_orphaned_slot = 0; | 2444 | di->i_orphaned_slot = 0; |
2439 | set_nlink(inode, 1); | 2445 | set_nlink(inode, 1); |
2440 | ocfs2_set_links_count(di, inode->i_nlink); | 2446 | ocfs2_set_links_count(di, inode->i_nlink); |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d355e6e36b36..3a903470c794 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -347,7 +347,6 @@ struct ocfs2_super | |||
347 | struct task_struct *recovery_thread_task; | 347 | struct task_struct *recovery_thread_task; |
348 | int disable_recovery; | 348 | int disable_recovery; |
349 | wait_queue_head_t checkpoint_event; | 349 | wait_queue_head_t checkpoint_event; |
350 | atomic_t needs_checkpoint; | ||
351 | struct ocfs2_journal *journal; | 350 | struct ocfs2_journal *journal; |
352 | unsigned long osb_commit_interval; | 351 | unsigned long osb_commit_interval; |
353 | 352 | ||
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index b7e74b580c0f..5397c07ce608 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -1422,7 +1422,7 @@ static int ocfs2_relink_block_group(handle_t *handle, | |||
1422 | int status; | 1422 | int status; |
1423 | /* there is a really tiny chance the journal calls could fail, | 1423 | /* there is a really tiny chance the journal calls could fail, |
1424 | * but we wouldn't want inconsistent blocks in *any* case. */ | 1424 | * but we wouldn't want inconsistent blocks in *any* case. */ |
1425 | u64 fe_ptr, bg_ptr, prev_bg_ptr; | 1425 | u64 bg_ptr, prev_bg_ptr; |
1426 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; | 1426 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; |
1427 | struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; | 1427 | struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; |
1428 | struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data; | 1428 | struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data; |
@@ -1437,51 +1437,44 @@ static int ocfs2_relink_block_group(handle_t *handle, | |||
1437 | (unsigned long long)le64_to_cpu(bg->bg_blkno), | 1437 | (unsigned long long)le64_to_cpu(bg->bg_blkno), |
1438 | (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); | 1438 | (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); |
1439 | 1439 | ||
1440 | fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); | ||
1441 | bg_ptr = le64_to_cpu(bg->bg_next_group); | 1440 | bg_ptr = le64_to_cpu(bg->bg_next_group); |
1442 | prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group); | 1441 | prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group); |
1443 | 1442 | ||
1444 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), | 1443 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), |
1445 | prev_bg_bh, | 1444 | prev_bg_bh, |
1446 | OCFS2_JOURNAL_ACCESS_WRITE); | 1445 | OCFS2_JOURNAL_ACCESS_WRITE); |
1447 | if (status < 0) { | 1446 | if (status < 0) |
1448 | mlog_errno(status); | 1447 | goto out; |
1449 | goto out_rollback; | ||
1450 | } | ||
1451 | 1448 | ||
1452 | prev_bg->bg_next_group = bg->bg_next_group; | 1449 | prev_bg->bg_next_group = bg->bg_next_group; |
1453 | ocfs2_journal_dirty(handle, prev_bg_bh); | 1450 | ocfs2_journal_dirty(handle, prev_bg_bh); |
1454 | 1451 | ||
1455 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), | 1452 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), |
1456 | bg_bh, OCFS2_JOURNAL_ACCESS_WRITE); | 1453 | bg_bh, OCFS2_JOURNAL_ACCESS_WRITE); |
1457 | if (status < 0) { | 1454 | if (status < 0) |
1458 | mlog_errno(status); | 1455 | goto out_rollback_prev_bg; |
1459 | goto out_rollback; | ||
1460 | } | ||
1461 | 1456 | ||
1462 | bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno; | 1457 | bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno; |
1463 | ocfs2_journal_dirty(handle, bg_bh); | 1458 | ocfs2_journal_dirty(handle, bg_bh); |
1464 | 1459 | ||
1465 | status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode), | 1460 | status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode), |
1466 | fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); | 1461 | fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); |
1467 | if (status < 0) { | 1462 | if (status < 0) |
1468 | mlog_errno(status); | 1463 | goto out_rollback_bg; |
1469 | goto out_rollback; | ||
1470 | } | ||
1471 | 1464 | ||
1472 | fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno; | 1465 | fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno; |
1473 | ocfs2_journal_dirty(handle, fe_bh); | 1466 | ocfs2_journal_dirty(handle, fe_bh); |
1474 | 1467 | ||
1475 | out_rollback: | 1468 | out: |
1476 | if (status < 0) { | 1469 | if (status < 0) |
1477 | fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr); | ||
1478 | bg->bg_next_group = cpu_to_le64(bg_ptr); | ||
1479 | prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); | ||
1480 | } | ||
1481 | |||
1482 | if (status) | ||
1483 | mlog_errno(status); | 1470 | mlog_errno(status); |
1484 | return status; | 1471 | return status; |
1472 | |||
1473 | out_rollback_bg: | ||
1474 | bg->bg_next_group = cpu_to_le64(bg_ptr); | ||
1475 | out_rollback_prev_bg: | ||
1476 | prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); | ||
1477 | goto out; | ||
1485 | } | 1478 | } |
1486 | 1479 | ||
1487 | static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg, | 1480 | static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg, |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 01b85165552b..854d80955bf8 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -286,10 +286,9 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) | |||
286 | spin_unlock(&osb->osb_lock); | 286 | spin_unlock(&osb->osb_lock); |
287 | 287 | ||
288 | out += snprintf(buf + out, len - out, | 288 | out += snprintf(buf + out, len - out, |
289 | "%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit", | 289 | "%10s => Pid: %d Interval: %lu\n", "Commit", |
290 | (osb->commit_task ? task_pid_nr(osb->commit_task) : -1), | 290 | (osb->commit_task ? task_pid_nr(osb->commit_task) : -1), |
291 | osb->osb_commit_interval, | 291 | osb->osb_commit_interval); |
292 | atomic_read(&osb->needs_checkpoint)); | ||
293 | 292 | ||
294 | out += snprintf(buf + out, len - out, | 293 | out += snprintf(buf + out, len - out, |
295 | "%10s => State: %d TxnId: %lu NumTxns: %d\n", | 294 | "%10s => State: %d TxnId: %lu NumTxns: %d\n", |
@@ -2154,7 +2153,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2154 | } | 2153 | } |
2155 | 2154 | ||
2156 | init_waitqueue_head(&osb->checkpoint_event); | 2155 | init_waitqueue_head(&osb->checkpoint_event); |
2157 | atomic_set(&osb->needs_checkpoint, 0); | ||
2158 | 2156 | ||
2159 | osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; | 2157 | osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; |
2160 | 2158 | ||
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 2e3ea308c144..317ef0abccbb 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -2751,7 +2751,6 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, | |||
2751 | { | 2751 | { |
2752 | int ret; | 2752 | int ret; |
2753 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 2753 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
2754 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; | ||
2755 | struct ocfs2_xa_loc loc; | 2754 | struct ocfs2_xa_loc loc; |
2756 | 2755 | ||
2757 | if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) | 2756 | if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) |
@@ -2759,13 +2758,6 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, | |||
2759 | 2758 | ||
2760 | down_write(&oi->ip_alloc_sem); | 2759 | down_write(&oi->ip_alloc_sem); |
2761 | if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { | 2760 | if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { |
2762 | if (!ocfs2_xattr_has_space_inline(inode, di)) { | ||
2763 | ret = -ENOSPC; | ||
2764 | goto out; | ||
2765 | } | ||
2766 | } | ||
2767 | |||
2768 | if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { | ||
2769 | ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); | 2761 | ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); |
2770 | if (ret) { | 2762 | if (ret) { |
2771 | if (ret != -ENOSPC) | 2763 | if (ret != -ENOSPC) |
@@ -6499,6 +6491,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args) | |||
6499 | } | 6491 | } |
6500 | 6492 | ||
6501 | new_oi = OCFS2_I(args->new_inode); | 6493 | new_oi = OCFS2_I(args->new_inode); |
6494 | /* | ||
6495 | * Adjust extent record count to reserve space for extended attribute. | ||
6496 | * Inline data count had been adjusted in ocfs2_duplicate_inline_data(). | ||
6497 | */ | ||
6498 | if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) && | ||
6499 | !(ocfs2_inode_is_fast_symlink(args->new_inode))) { | ||
6500 | struct ocfs2_extent_list *el = &new_di->id2.i_list; | ||
6501 | le16_add_cpu(&el->l_count, -(inline_size / | ||
6502 | sizeof(struct ocfs2_extent_rec))); | ||
6503 | } | ||
6502 | spin_lock(&new_oi->ip_lock); | 6504 | spin_lock(&new_oi->ip_lock); |
6503 | new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; | 6505 | new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; |
6504 | new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); | 6506 | new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 0a22194e5d58..06ea155e1a59 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -408,7 +408,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) | |||
408 | prpsinfo.pr_zomb = 0; | 408 | prpsinfo.pr_zomb = 0; |
409 | 409 | ||
410 | strcpy(prpsinfo.pr_fname, "vmlinux"); | 410 | strcpy(prpsinfo.pr_fname, "vmlinux"); |
411 | strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ); | 411 | strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs)); |
412 | 412 | ||
413 | nhdr->p_filesz += notesize(¬es[1]); | 413 | nhdr->p_filesz += notesize(¬es[1]); |
414 | bufp = storenote(¬es[1], bufp); | 414 | bufp = storenote(¬es[1], bufp); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3e636d864d56..dbf61f6174f0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/rmap.h> | 11 | #include <linux/rmap.h> |
12 | #include <linux/swap.h> | 12 | #include <linux/swap.h> |
13 | #include <linux/swapops.h> | 13 | #include <linux/swapops.h> |
14 | #include <linux/mmu_notifier.h> | ||
14 | 15 | ||
15 | #include <asm/elf.h> | 16 | #include <asm/elf.h> |
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
@@ -688,10 +689,58 @@ const struct file_operations proc_tid_smaps_operations = { | |||
688 | .release = seq_release_private, | 689 | .release = seq_release_private, |
689 | }; | 690 | }; |
690 | 691 | ||
692 | /* | ||
693 | * We do not want to have constant page-shift bits sitting in | ||
694 | * pagemap entries and are about to reuse them some time soon. | ||
695 | * | ||
696 | * Here's the "migration strategy": | ||
697 | * 1. when the system boots these bits remain what they are, | ||
698 | * but a warning about future change is printed in log; | ||
699 | * 2. once anyone clears soft-dirty bits via clear_refs file, | ||
700 | * these flag is set to denote, that user is aware of the | ||
701 | * new API and those page-shift bits change their meaning. | ||
702 | * The respective warning is printed in dmesg; | ||
703 | * 3. In a couple of releases we will remove all the mentions | ||
704 | * of page-shift in pagemap entries. | ||
705 | */ | ||
706 | |||
707 | static bool soft_dirty_cleared __read_mostly; | ||
708 | |||
709 | enum clear_refs_types { | ||
710 | CLEAR_REFS_ALL = 1, | ||
711 | CLEAR_REFS_ANON, | ||
712 | CLEAR_REFS_MAPPED, | ||
713 | CLEAR_REFS_SOFT_DIRTY, | ||
714 | CLEAR_REFS_LAST, | ||
715 | }; | ||
716 | |||
717 | struct clear_refs_private { | ||
718 | struct vm_area_struct *vma; | ||
719 | enum clear_refs_types type; | ||
720 | }; | ||
721 | |||
722 | static inline void clear_soft_dirty(struct vm_area_struct *vma, | ||
723 | unsigned long addr, pte_t *pte) | ||
724 | { | ||
725 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
726 | /* | ||
727 | * The soft-dirty tracker uses #PF-s to catch writes | ||
728 | * to pages, so write-protect the pte as well. See the | ||
729 | * Documentation/vm/soft-dirty.txt for full description | ||
730 | * of how soft-dirty works. | ||
731 | */ | ||
732 | pte_t ptent = *pte; | ||
733 | ptent = pte_wrprotect(ptent); | ||
734 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | ||
735 | set_pte_at(vma->vm_mm, addr, pte, ptent); | ||
736 | #endif | ||
737 | } | ||
738 | |||
691 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | 739 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, |
692 | unsigned long end, struct mm_walk *walk) | 740 | unsigned long end, struct mm_walk *walk) |
693 | { | 741 | { |
694 | struct vm_area_struct *vma = walk->private; | 742 | struct clear_refs_private *cp = walk->private; |
743 | struct vm_area_struct *vma = cp->vma; | ||
695 | pte_t *pte, ptent; | 744 | pte_t *pte, ptent; |
696 | spinlock_t *ptl; | 745 | spinlock_t *ptl; |
697 | struct page *page; | 746 | struct page *page; |
@@ -706,6 +755,11 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
706 | if (!pte_present(ptent)) | 755 | if (!pte_present(ptent)) |
707 | continue; | 756 | continue; |
708 | 757 | ||
758 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { | ||
759 | clear_soft_dirty(vma, addr, pte); | ||
760 | continue; | ||
761 | } | ||
762 | |||
709 | page = vm_normal_page(vma, addr, ptent); | 763 | page = vm_normal_page(vma, addr, ptent); |
710 | if (!page) | 764 | if (!page) |
711 | continue; | 765 | continue; |
@@ -719,10 +773,6 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
719 | return 0; | 773 | return 0; |
720 | } | 774 | } |
721 | 775 | ||
722 | #define CLEAR_REFS_ALL 1 | ||
723 | #define CLEAR_REFS_ANON 2 | ||
724 | #define CLEAR_REFS_MAPPED 3 | ||
725 | |||
726 | static ssize_t clear_refs_write(struct file *file, const char __user *buf, | 776 | static ssize_t clear_refs_write(struct file *file, const char __user *buf, |
727 | size_t count, loff_t *ppos) | 777 | size_t count, loff_t *ppos) |
728 | { | 778 | { |
@@ -730,7 +780,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
730 | char buffer[PROC_NUMBUF]; | 780 | char buffer[PROC_NUMBUF]; |
731 | struct mm_struct *mm; | 781 | struct mm_struct *mm; |
732 | struct vm_area_struct *vma; | 782 | struct vm_area_struct *vma; |
733 | int type; | 783 | enum clear_refs_types type; |
784 | int itype; | ||
734 | int rv; | 785 | int rv; |
735 | 786 | ||
736 | memset(buffer, 0, sizeof(buffer)); | 787 | memset(buffer, 0, sizeof(buffer)); |
@@ -738,23 +789,37 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
738 | count = sizeof(buffer) - 1; | 789 | count = sizeof(buffer) - 1; |
739 | if (copy_from_user(buffer, buf, count)) | 790 | if (copy_from_user(buffer, buf, count)) |
740 | return -EFAULT; | 791 | return -EFAULT; |
741 | rv = kstrtoint(strstrip(buffer), 10, &type); | 792 | rv = kstrtoint(strstrip(buffer), 10, &itype); |
742 | if (rv < 0) | 793 | if (rv < 0) |
743 | return rv; | 794 | return rv; |
744 | if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) | 795 | type = (enum clear_refs_types)itype; |
796 | if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) | ||
745 | return -EINVAL; | 797 | return -EINVAL; |
798 | |||
799 | if (type == CLEAR_REFS_SOFT_DIRTY) { | ||
800 | soft_dirty_cleared = true; | ||
801 | pr_warn_once("The pagemap bits 55-60 has changed their meaning! " | ||
802 | "See the linux/Documentation/vm/pagemap.txt for details.\n"); | ||
803 | } | ||
804 | |||
746 | task = get_proc_task(file_inode(file)); | 805 | task = get_proc_task(file_inode(file)); |
747 | if (!task) | 806 | if (!task) |
748 | return -ESRCH; | 807 | return -ESRCH; |
749 | mm = get_task_mm(task); | 808 | mm = get_task_mm(task); |
750 | if (mm) { | 809 | if (mm) { |
810 | struct clear_refs_private cp = { | ||
811 | .type = type, | ||
812 | }; | ||
751 | struct mm_walk clear_refs_walk = { | 813 | struct mm_walk clear_refs_walk = { |
752 | .pmd_entry = clear_refs_pte_range, | 814 | .pmd_entry = clear_refs_pte_range, |
753 | .mm = mm, | 815 | .mm = mm, |
816 | .private = &cp, | ||
754 | }; | 817 | }; |
755 | down_read(&mm->mmap_sem); | 818 | down_read(&mm->mmap_sem); |
819 | if (type == CLEAR_REFS_SOFT_DIRTY) | ||
820 | mmu_notifier_invalidate_range_start(mm, 0, -1); | ||
756 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 821 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
757 | clear_refs_walk.private = vma; | 822 | cp.vma = vma; |
758 | if (is_vm_hugetlb_page(vma)) | 823 | if (is_vm_hugetlb_page(vma)) |
759 | continue; | 824 | continue; |
760 | /* | 825 | /* |
@@ -773,6 +838,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
773 | walk_page_range(vma->vm_start, vma->vm_end, | 838 | walk_page_range(vma->vm_start, vma->vm_end, |
774 | &clear_refs_walk); | 839 | &clear_refs_walk); |
775 | } | 840 | } |
841 | if (type == CLEAR_REFS_SOFT_DIRTY) | ||
842 | mmu_notifier_invalidate_range_end(mm, 0, -1); | ||
776 | flush_tlb_mm(mm); | 843 | flush_tlb_mm(mm); |
777 | up_read(&mm->mmap_sem); | 844 | up_read(&mm->mmap_sem); |
778 | mmput(mm); | 845 | mmput(mm); |
@@ -794,6 +861,7 @@ typedef struct { | |||
794 | struct pagemapread { | 861 | struct pagemapread { |
795 | int pos, len; | 862 | int pos, len; |
796 | pagemap_entry_t *buffer; | 863 | pagemap_entry_t *buffer; |
864 | bool v2; | ||
797 | }; | 865 | }; |
798 | 866 | ||
799 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 867 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
@@ -807,14 +875,17 @@ struct pagemapread { | |||
807 | #define PM_PSHIFT_BITS 6 | 875 | #define PM_PSHIFT_BITS 6 |
808 | #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) | 876 | #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) |
809 | #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) | 877 | #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) |
810 | #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) | 878 | #define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) |
811 | #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) | 879 | #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) |
812 | #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) | 880 | #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) |
881 | /* in "new" pagemap pshift bits are occupied with more status bits */ | ||
882 | #define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT)) | ||
813 | 883 | ||
884 | #define __PM_SOFT_DIRTY (1LL) | ||
814 | #define PM_PRESENT PM_STATUS(4LL) | 885 | #define PM_PRESENT PM_STATUS(4LL) |
815 | #define PM_SWAP PM_STATUS(2LL) | 886 | #define PM_SWAP PM_STATUS(2LL) |
816 | #define PM_FILE PM_STATUS(1LL) | 887 | #define PM_FILE PM_STATUS(1LL) |
817 | #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) | 888 | #define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0) |
818 | #define PM_END_OF_BUFFER 1 | 889 | #define PM_END_OF_BUFFER 1 |
819 | 890 | ||
820 | static inline pagemap_entry_t make_pme(u64 val) | 891 | static inline pagemap_entry_t make_pme(u64 val) |
@@ -837,7 +908,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
837 | struct pagemapread *pm = walk->private; | 908 | struct pagemapread *pm = walk->private; |
838 | unsigned long addr; | 909 | unsigned long addr; |
839 | int err = 0; | 910 | int err = 0; |
840 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); | 911 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
841 | 912 | ||
842 | for (addr = start; addr < end; addr += PAGE_SIZE) { | 913 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
843 | err = add_to_pagemap(addr, &pme, pm); | 914 | err = add_to_pagemap(addr, &pme, pm); |
@@ -847,11 +918,12 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
847 | return err; | 918 | return err; |
848 | } | 919 | } |
849 | 920 | ||
850 | static void pte_to_pagemap_entry(pagemap_entry_t *pme, | 921 | static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, |
851 | struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 922 | struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
852 | { | 923 | { |
853 | u64 frame, flags; | 924 | u64 frame, flags; |
854 | struct page *page = NULL; | 925 | struct page *page = NULL; |
926 | int flags2 = 0; | ||
855 | 927 | ||
856 | if (pte_present(pte)) { | 928 | if (pte_present(pte)) { |
857 | frame = pte_pfn(pte); | 929 | frame = pte_pfn(pte); |
@@ -866,19 +938,21 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, | |||
866 | if (is_migration_entry(entry)) | 938 | if (is_migration_entry(entry)) |
867 | page = migration_entry_to_page(entry); | 939 | page = migration_entry_to_page(entry); |
868 | } else { | 940 | } else { |
869 | *pme = make_pme(PM_NOT_PRESENT); | 941 | *pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
870 | return; | 942 | return; |
871 | } | 943 | } |
872 | 944 | ||
873 | if (page && !PageAnon(page)) | 945 | if (page && !PageAnon(page)) |
874 | flags |= PM_FILE; | 946 | flags |= PM_FILE; |
947 | if (pte_soft_dirty(pte)) | ||
948 | flags2 |= __PM_SOFT_DIRTY; | ||
875 | 949 | ||
876 | *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags); | 950 | *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); |
877 | } | 951 | } |
878 | 952 | ||
879 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 953 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
880 | static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, | 954 | static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, |
881 | pmd_t pmd, int offset) | 955 | pmd_t pmd, int offset, int pmd_flags2) |
882 | { | 956 | { |
883 | /* | 957 | /* |
884 | * Currently pmd for thp is always present because thp can not be | 958 | * Currently pmd for thp is always present because thp can not be |
@@ -887,13 +961,13 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, | |||
887 | */ | 961 | */ |
888 | if (pmd_present(pmd)) | 962 | if (pmd_present(pmd)) |
889 | *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) | 963 | *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) |
890 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); | 964 | | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); |
891 | else | 965 | else |
892 | *pme = make_pme(PM_NOT_PRESENT); | 966 | *pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
893 | } | 967 | } |
894 | #else | 968 | #else |
895 | static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, | 969 | static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, |
896 | pmd_t pmd, int offset) | 970 | pmd_t pmd, int offset, int pmd_flags2) |
897 | { | 971 | { |
898 | } | 972 | } |
899 | #endif | 973 | #endif |
@@ -905,17 +979,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
905 | struct pagemapread *pm = walk->private; | 979 | struct pagemapread *pm = walk->private; |
906 | pte_t *pte; | 980 | pte_t *pte; |
907 | int err = 0; | 981 | int err = 0; |
908 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); | 982 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
909 | 983 | ||
910 | /* find the first VMA at or above 'addr' */ | 984 | /* find the first VMA at or above 'addr' */ |
911 | vma = find_vma(walk->mm, addr); | 985 | vma = find_vma(walk->mm, addr); |
912 | if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { | 986 | if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { |
987 | int pmd_flags2; | ||
988 | |||
989 | pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0); | ||
913 | for (; addr != end; addr += PAGE_SIZE) { | 990 | for (; addr != end; addr += PAGE_SIZE) { |
914 | unsigned long offset; | 991 | unsigned long offset; |
915 | 992 | ||
916 | offset = (addr & ~PAGEMAP_WALK_MASK) >> | 993 | offset = (addr & ~PAGEMAP_WALK_MASK) >> |
917 | PAGE_SHIFT; | 994 | PAGE_SHIFT; |
918 | thp_pmd_to_pagemap_entry(&pme, *pmd, offset); | 995 | thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2); |
919 | err = add_to_pagemap(addr, &pme, pm); | 996 | err = add_to_pagemap(addr, &pme, pm); |
920 | if (err) | 997 | if (err) |
921 | break; | 998 | break; |
@@ -932,7 +1009,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
932 | * and need a new, higher one */ | 1009 | * and need a new, higher one */ |
933 | if (vma && (addr >= vma->vm_end)) { | 1010 | if (vma && (addr >= vma->vm_end)) { |
934 | vma = find_vma(walk->mm, addr); | 1011 | vma = find_vma(walk->mm, addr); |
935 | pme = make_pme(PM_NOT_PRESENT); | 1012 | pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
936 | } | 1013 | } |
937 | 1014 | ||
938 | /* check that 'vma' actually covers this address, | 1015 | /* check that 'vma' actually covers this address, |
@@ -940,7 +1017,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
940 | if (vma && (vma->vm_start <= addr) && | 1017 | if (vma && (vma->vm_start <= addr) && |
941 | !is_vm_hugetlb_page(vma)) { | 1018 | !is_vm_hugetlb_page(vma)) { |
942 | pte = pte_offset_map(pmd, addr); | 1019 | pte = pte_offset_map(pmd, addr); |
943 | pte_to_pagemap_entry(&pme, vma, addr, *pte); | 1020 | pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); |
944 | /* unmap before userspace copy */ | 1021 | /* unmap before userspace copy */ |
945 | pte_unmap(pte); | 1022 | pte_unmap(pte); |
946 | } | 1023 | } |
@@ -955,14 +1032,14 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
955 | } | 1032 | } |
956 | 1033 | ||
957 | #ifdef CONFIG_HUGETLB_PAGE | 1034 | #ifdef CONFIG_HUGETLB_PAGE |
958 | static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, | 1035 | static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, |
959 | pte_t pte, int offset) | 1036 | pte_t pte, int offset) |
960 | { | 1037 | { |
961 | if (pte_present(pte)) | 1038 | if (pte_present(pte)) |
962 | *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | 1039 | *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
963 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); | 1040 | | PM_STATUS2(pm->v2, 0) | PM_PRESENT); |
964 | else | 1041 | else |
965 | *pme = make_pme(PM_NOT_PRESENT); | 1042 | *pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
966 | } | 1043 | } |
967 | 1044 | ||
968 | /* This function walks within one hugetlb entry in the single call */ | 1045 | /* This function walks within one hugetlb entry in the single call */ |
@@ -976,7 +1053,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
976 | 1053 | ||
977 | for (; addr != end; addr += PAGE_SIZE) { | 1054 | for (; addr != end; addr += PAGE_SIZE) { |
978 | int offset = (addr & ~hmask) >> PAGE_SHIFT; | 1055 | int offset = (addr & ~hmask) >> PAGE_SHIFT; |
979 | huge_pte_to_pagemap_entry(&pme, *pte, offset); | 1056 | huge_pte_to_pagemap_entry(&pme, pm, *pte, offset); |
980 | err = add_to_pagemap(addr, &pme, pm); | 1057 | err = add_to_pagemap(addr, &pme, pm); |
981 | if (err) | 1058 | if (err) |
982 | return err; | 1059 | return err; |
@@ -1038,6 +1115,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
1038 | if (!count) | 1115 | if (!count) |
1039 | goto out_task; | 1116 | goto out_task; |
1040 | 1117 | ||
1118 | pm.v2 = soft_dirty_cleared; | ||
1041 | pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); | 1119 | pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); |
1042 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); | 1120 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); |
1043 | ret = -ENOMEM; | 1121 | ret = -ENOMEM; |
@@ -1110,9 +1188,18 @@ out: | |||
1110 | return ret; | 1188 | return ret; |
1111 | } | 1189 | } |
1112 | 1190 | ||
1191 | static int pagemap_open(struct inode *inode, struct file *file) | ||
1192 | { | ||
1193 | pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " | ||
1194 | "to stop being page-shift some time soon. See the " | ||
1195 | "linux/Documentation/vm/pagemap.txt for details.\n"); | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1113 | const struct file_operations proc_pagemap_operations = { | 1199 | const struct file_operations proc_pagemap_operations = { |
1114 | .llseek = mem_lseek, /* borrow this */ | 1200 | .llseek = mem_lseek, /* borrow this */ |
1115 | .read = pagemap_read, | 1201 | .read = pagemap_read, |
1202 | .open = pagemap_open, | ||
1116 | }; | 1203 | }; |
1117 | #endif /* CONFIG_PROC_PAGE_MONITOR */ | 1204 | #endif /* CONFIG_PROC_PAGE_MONITOR */ |
1118 | 1205 | ||
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 9610ac772d7e..061894625903 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c | |||
@@ -20,8 +20,7 @@ static int uptime_proc_show(struct seq_file *m, void *v) | |||
20 | for_each_possible_cpu(i) | 20 | for_each_possible_cpu(i) |
21 | idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; | 21 | idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; |
22 | 22 | ||
23 | do_posix_clock_monotonic_gettime(&uptime); | 23 | get_monotonic_boottime(&uptime); |
24 | monotonic_to_bootbased(&uptime); | ||
25 | nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; | 24 | nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; |
26 | idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); | 25 | idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); |
27 | idle.tv_nsec = rem; | 26 | idle.tv_nsec = rem; |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 17f7e080d7ff..28503172f2e4 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/crash_dump.h> | 21 | #include <linux/crash_dump.h> |
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/vmalloc.h> | ||
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include "internal.h" | 26 | #include "internal.h" |
@@ -32,6 +33,10 @@ static LIST_HEAD(vmcore_list); | |||
32 | /* Stores the pointer to the buffer containing kernel elf core headers. */ | 33 | /* Stores the pointer to the buffer containing kernel elf core headers. */ |
33 | static char *elfcorebuf; | 34 | static char *elfcorebuf; |
34 | static size_t elfcorebuf_sz; | 35 | static size_t elfcorebuf_sz; |
36 | static size_t elfcorebuf_sz_orig; | ||
37 | |||
38 | static char *elfnotes_buf; | ||
39 | static size_t elfnotes_sz; | ||
35 | 40 | ||
36 | /* Total size of vmcore file. */ | 41 | /* Total size of vmcore file. */ |
37 | static u64 vmcore_size; | 42 | static u64 vmcore_size; |
@@ -118,27 +123,6 @@ static ssize_t read_from_oldmem(char *buf, size_t count, | |||
118 | return read; | 123 | return read; |
119 | } | 124 | } |
120 | 125 | ||
121 | /* Maps vmcore file offset to respective physical address in memroy. */ | ||
122 | static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list, | ||
123 | struct vmcore **m_ptr) | ||
124 | { | ||
125 | struct vmcore *m; | ||
126 | u64 paddr; | ||
127 | |||
128 | list_for_each_entry(m, vc_list, list) { | ||
129 | u64 start, end; | ||
130 | start = m->offset; | ||
131 | end = m->offset + m->size - 1; | ||
132 | if (offset >= start && offset <= end) { | ||
133 | paddr = m->paddr + offset - start; | ||
134 | *m_ptr = m; | ||
135 | return paddr; | ||
136 | } | ||
137 | } | ||
138 | *m_ptr = NULL; | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /* Read from the ELF header and then the crash dump. On error, negative value is | 126 | /* Read from the ELF header and then the crash dump. On error, negative value is |
143 | * returned otherwise number of bytes read are returned. | 127 | * returned otherwise number of bytes read are returned. |
144 | */ | 128 | */ |
@@ -147,8 +131,8 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
147 | { | 131 | { |
148 | ssize_t acc = 0, tmp; | 132 | ssize_t acc = 0, tmp; |
149 | size_t tsz; | 133 | size_t tsz; |
150 | u64 start, nr_bytes; | 134 | u64 start; |
151 | struct vmcore *curr_m = NULL; | 135 | struct vmcore *m = NULL; |
152 | 136 | ||
153 | if (buflen == 0 || *fpos >= vmcore_size) | 137 | if (buflen == 0 || *fpos >= vmcore_size) |
154 | return 0; | 138 | return 0; |
@@ -159,9 +143,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
159 | 143 | ||
160 | /* Read ELF core header */ | 144 | /* Read ELF core header */ |
161 | if (*fpos < elfcorebuf_sz) { | 145 | if (*fpos < elfcorebuf_sz) { |
162 | tsz = elfcorebuf_sz - *fpos; | 146 | tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); |
163 | if (buflen < tsz) | ||
164 | tsz = buflen; | ||
165 | if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) | 147 | if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) |
166 | return -EFAULT; | 148 | return -EFAULT; |
167 | buflen -= tsz; | 149 | buflen -= tsz; |
@@ -174,39 +156,161 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
174 | return acc; | 156 | return acc; |
175 | } | 157 | } |
176 | 158 | ||
177 | start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); | 159 | /* Read Elf note segment */ |
178 | if (!curr_m) | 160 | if (*fpos < elfcorebuf_sz + elfnotes_sz) { |
179 | return -EINVAL; | 161 | void *kaddr; |
180 | |||
181 | while (buflen) { | ||
182 | tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK)); | ||
183 | 162 | ||
184 | /* Calculate left bytes in current memory segment. */ | 163 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); |
185 | nr_bytes = (curr_m->size - (start - curr_m->paddr)); | 164 | kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; |
186 | if (tsz > nr_bytes) | 165 | if (copy_to_user(buffer, kaddr, tsz)) |
187 | tsz = nr_bytes; | 166 | return -EFAULT; |
188 | |||
189 | tmp = read_from_oldmem(buffer, tsz, &start, 1); | ||
190 | if (tmp < 0) | ||
191 | return tmp; | ||
192 | buflen -= tsz; | 167 | buflen -= tsz; |
193 | *fpos += tsz; | 168 | *fpos += tsz; |
194 | buffer += tsz; | 169 | buffer += tsz; |
195 | acc += tsz; | 170 | acc += tsz; |
196 | if (start >= (curr_m->paddr + curr_m->size)) { | 171 | |
197 | if (curr_m->list.next == &vmcore_list) | 172 | /* leave now if filled buffer already */ |
198 | return acc; /*EOF*/ | 173 | if (buflen == 0) |
199 | curr_m = list_entry(curr_m->list.next, | 174 | return acc; |
200 | struct vmcore, list); | 175 | } |
201 | start = curr_m->paddr; | 176 | |
177 | list_for_each_entry(m, &vmcore_list, list) { | ||
178 | if (*fpos < m->offset + m->size) { | ||
179 | tsz = min_t(size_t, m->offset + m->size - *fpos, buflen); | ||
180 | start = m->paddr + *fpos - m->offset; | ||
181 | tmp = read_from_oldmem(buffer, tsz, &start, 1); | ||
182 | if (tmp < 0) | ||
183 | return tmp; | ||
184 | buflen -= tsz; | ||
185 | *fpos += tsz; | ||
186 | buffer += tsz; | ||
187 | acc += tsz; | ||
188 | |||
189 | /* leave now if filled buffer already */ | ||
190 | if (buflen == 0) | ||
191 | return acc; | ||
202 | } | 192 | } |
203 | } | 193 | } |
194 | |||
204 | return acc; | 195 | return acc; |
205 | } | 196 | } |
206 | 197 | ||
198 | /** | ||
199 | * alloc_elfnotes_buf - allocate buffer for ELF note segment in | ||
200 | * vmalloc memory | ||
201 | * | ||
202 | * @notes_sz: size of buffer | ||
203 | * | ||
204 | * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap | ||
205 | * the buffer to user-space by means of remap_vmalloc_range(). | ||
206 | * | ||
207 | * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is | ||
208 | * disabled and there's no need to allow users to mmap the buffer. | ||
209 | */ | ||
210 | static inline char *alloc_elfnotes_buf(size_t notes_sz) | ||
211 | { | ||
212 | #ifdef CONFIG_MMU | ||
213 | return vmalloc_user(notes_sz); | ||
214 | #else | ||
215 | return vzalloc(notes_sz); | ||
216 | #endif | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is | ||
221 | * essential for mmap_vmcore() in order to map physically | ||
222 | * non-contiguous objects (ELF header, ELF note segment and memory | ||
223 | * regions in the 1st kernel pointed to by PT_LOAD entries) into | ||
224 | * virtually contiguous user-space in ELF layout. | ||
225 | */ | ||
226 | #ifdef CONFIG_MMU | ||
227 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | ||
228 | { | ||
229 | size_t size = vma->vm_end - vma->vm_start; | ||
230 | u64 start, end, len, tsz; | ||
231 | struct vmcore *m; | ||
232 | |||
233 | start = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
234 | end = start + size; | ||
235 | |||
236 | if (size > vmcore_size || end > vmcore_size) | ||
237 | return -EINVAL; | ||
238 | |||
239 | if (vma->vm_flags & (VM_WRITE | VM_EXEC)) | ||
240 | return -EPERM; | ||
241 | |||
242 | vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); | ||
243 | vma->vm_flags |= VM_MIXEDMAP; | ||
244 | |||
245 | len = 0; | ||
246 | |||
247 | if (start < elfcorebuf_sz) { | ||
248 | u64 pfn; | ||
249 | |||
250 | tsz = min(elfcorebuf_sz - (size_t)start, size); | ||
251 | pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; | ||
252 | if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, | ||
253 | vma->vm_page_prot)) | ||
254 | return -EAGAIN; | ||
255 | size -= tsz; | ||
256 | start += tsz; | ||
257 | len += tsz; | ||
258 | |||
259 | if (size == 0) | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | if (start < elfcorebuf_sz + elfnotes_sz) { | ||
264 | void *kaddr; | ||
265 | |||
266 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); | ||
267 | kaddr = elfnotes_buf + start - elfcorebuf_sz; | ||
268 | if (remap_vmalloc_range_partial(vma, vma->vm_start + len, | ||
269 | kaddr, tsz)) | ||
270 | goto fail; | ||
271 | size -= tsz; | ||
272 | start += tsz; | ||
273 | len += tsz; | ||
274 | |||
275 | if (size == 0) | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | list_for_each_entry(m, &vmcore_list, list) { | ||
280 | if (start < m->offset + m->size) { | ||
281 | u64 paddr = 0; | ||
282 | |||
283 | tsz = min_t(size_t, m->offset + m->size - start, size); | ||
284 | paddr = m->paddr + start - m->offset; | ||
285 | if (remap_pfn_range(vma, vma->vm_start + len, | ||
286 | paddr >> PAGE_SHIFT, tsz, | ||
287 | vma->vm_page_prot)) | ||
288 | goto fail; | ||
289 | size -= tsz; | ||
290 | start += tsz; | ||
291 | len += tsz; | ||
292 | |||
293 | if (size == 0) | ||
294 | return 0; | ||
295 | } | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | fail: | ||
300 | do_munmap(vma->vm_mm, vma->vm_start, len); | ||
301 | return -EAGAIN; | ||
302 | } | ||
303 | #else | ||
304 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | ||
305 | { | ||
306 | return -ENOSYS; | ||
307 | } | ||
308 | #endif | ||
309 | |||
207 | static const struct file_operations proc_vmcore_operations = { | 310 | static const struct file_operations proc_vmcore_operations = { |
208 | .read = read_vmcore, | 311 | .read = read_vmcore, |
209 | .llseek = default_llseek, | 312 | .llseek = default_llseek, |
313 | .mmap = mmap_vmcore, | ||
210 | }; | 314 | }; |
211 | 315 | ||
212 | static struct vmcore* __init get_new_element(void) | 316 | static struct vmcore* __init get_new_element(void) |
@@ -214,61 +318,40 @@ static struct vmcore* __init get_new_element(void) | |||
214 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); | 318 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); |
215 | } | 319 | } |
216 | 320 | ||
217 | static u64 __init get_vmcore_size_elf64(char *elfptr) | 321 | static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz, |
322 | struct list_head *vc_list) | ||
218 | { | 323 | { |
219 | int i; | ||
220 | u64 size; | ||
221 | Elf64_Ehdr *ehdr_ptr; | ||
222 | Elf64_Phdr *phdr_ptr; | ||
223 | |||
224 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | ||
225 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); | ||
226 | size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); | ||
227 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { | ||
228 | size += phdr_ptr->p_memsz; | ||
229 | phdr_ptr++; | ||
230 | } | ||
231 | return size; | ||
232 | } | ||
233 | |||
234 | static u64 __init get_vmcore_size_elf32(char *elfptr) | ||
235 | { | ||
236 | int i; | ||
237 | u64 size; | 324 | u64 size; |
238 | Elf32_Ehdr *ehdr_ptr; | 325 | struct vmcore *m; |
239 | Elf32_Phdr *phdr_ptr; | ||
240 | 326 | ||
241 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | 327 | size = elfsz + elfnotesegsz; |
242 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); | 328 | list_for_each_entry(m, vc_list, list) { |
243 | size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); | 329 | size += m->size; |
244 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { | ||
245 | size += phdr_ptr->p_memsz; | ||
246 | phdr_ptr++; | ||
247 | } | 330 | } |
248 | return size; | 331 | return size; |
249 | } | 332 | } |
250 | 333 | ||
251 | /* Merges all the PT_NOTE headers into one. */ | 334 | /** |
252 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | 335 | * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry |
253 | struct list_head *vc_list) | 336 | * |
337 | * @ehdr_ptr: ELF header | ||
338 | * | ||
339 | * This function updates p_memsz member of each PT_NOTE entry in the | ||
340 | * program header table pointed to by @ehdr_ptr to real size of ELF | ||
341 | * note segment. | ||
342 | */ | ||
343 | static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) | ||
254 | { | 344 | { |
255 | int i, nr_ptnote=0, rc=0; | 345 | int i, rc=0; |
256 | char *tmp; | 346 | Elf64_Phdr *phdr_ptr; |
257 | Elf64_Ehdr *ehdr_ptr; | ||
258 | Elf64_Phdr phdr, *phdr_ptr; | ||
259 | Elf64_Nhdr *nhdr_ptr; | 347 | Elf64_Nhdr *nhdr_ptr; |
260 | u64 phdr_sz = 0, note_off; | ||
261 | 348 | ||
262 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | 349 | phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); |
263 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); | ||
264 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | 350 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
265 | int j; | ||
266 | void *notes_section; | 351 | void *notes_section; |
267 | struct vmcore *new; | ||
268 | u64 offset, max_sz, sz, real_sz = 0; | 352 | u64 offset, max_sz, sz, real_sz = 0; |
269 | if (phdr_ptr->p_type != PT_NOTE) | 353 | if (phdr_ptr->p_type != PT_NOTE) |
270 | continue; | 354 | continue; |
271 | nr_ptnote++; | ||
272 | max_sz = phdr_ptr->p_memsz; | 355 | max_sz = phdr_ptr->p_memsz; |
273 | offset = phdr_ptr->p_offset; | 356 | offset = phdr_ptr->p_offset; |
274 | notes_section = kmalloc(max_sz, GFP_KERNEL); | 357 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
@@ -280,7 +363,7 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |||
280 | return rc; | 363 | return rc; |
281 | } | 364 | } |
282 | nhdr_ptr = notes_section; | 365 | nhdr_ptr = notes_section; |
283 | for (j = 0; j < max_sz; j += sz) { | 366 | while (real_sz < max_sz) { |
284 | if (nhdr_ptr->n_namesz == 0) | 367 | if (nhdr_ptr->n_namesz == 0) |
285 | break; | 368 | break; |
286 | sz = sizeof(Elf64_Nhdr) + | 369 | sz = sizeof(Elf64_Nhdr) + |
@@ -289,26 +372,122 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |||
289 | real_sz += sz; | 372 | real_sz += sz; |
290 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); | 373 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); |
291 | } | 374 | } |
292 | |||
293 | /* Add this contiguous chunk of notes section to vmcore list.*/ | ||
294 | new = get_new_element(); | ||
295 | if (!new) { | ||
296 | kfree(notes_section); | ||
297 | return -ENOMEM; | ||
298 | } | ||
299 | new->paddr = phdr_ptr->p_offset; | ||
300 | new->size = real_sz; | ||
301 | list_add_tail(&new->list, vc_list); | ||
302 | phdr_sz += real_sz; | ||
303 | kfree(notes_section); | 375 | kfree(notes_section); |
376 | phdr_ptr->p_memsz = real_sz; | ||
377 | } | ||
378 | |||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | /** | ||
383 | * get_note_number_and_size_elf64 - get the number of PT_NOTE program | ||
384 | * headers and sum of real size of their ELF note segment headers and | ||
385 | * data. | ||
386 | * | ||
387 | * @ehdr_ptr: ELF header | ||
388 | * @nr_ptnote: buffer for the number of PT_NOTE program headers | ||
389 | * @sz_ptnote: buffer for size of unique PT_NOTE program header | ||
390 | * | ||
391 | * This function is used to merge multiple PT_NOTE program headers | ||
392 | * into a unique single one. The resulting unique entry will have | ||
393 | * @sz_ptnote in its phdr->p_mem. | ||
394 | * | ||
395 | * It is assumed that program headers with PT_NOTE type pointed to by | ||
396 | * @ehdr_ptr has already been updated by update_note_header_size_elf64 | ||
397 | * and each of PT_NOTE program headers has actual ELF note segment | ||
398 | * size in its p_memsz member. | ||
399 | */ | ||
400 | static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, | ||
401 | int *nr_ptnote, u64 *sz_ptnote) | ||
402 | { | ||
403 | int i; | ||
404 | Elf64_Phdr *phdr_ptr; | ||
405 | |||
406 | *nr_ptnote = *sz_ptnote = 0; | ||
407 | |||
408 | phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); | ||
409 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | ||
410 | if (phdr_ptr->p_type != PT_NOTE) | ||
411 | continue; | ||
412 | *nr_ptnote += 1; | ||
413 | *sz_ptnote += phdr_ptr->p_memsz; | ||
414 | } | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | /** | ||
420 | * copy_notes_elf64 - copy ELF note segments in a given buffer | ||
421 | * | ||
422 | * @ehdr_ptr: ELF header | ||
423 | * @notes_buf: buffer into which ELF note segments are copied | ||
424 | * | ||
425 | * This function is used to copy ELF note segment in the 1st kernel | ||
426 | * into the buffer @notes_buf in the 2nd kernel. It is assumed that | ||
427 | * size of the buffer @notes_buf is equal to or larger than sum of the | ||
428 | * real ELF note segment headers and data. | ||
429 | * | ||
430 | * It is assumed that program headers with PT_NOTE type pointed to by | ||
431 | * @ehdr_ptr has already been updated by update_note_header_size_elf64 | ||
432 | * and each of PT_NOTE program headers has actual ELF note segment | ||
433 | * size in its p_memsz member. | ||
434 | */ | ||
435 | static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) | ||
436 | { | ||
437 | int i, rc=0; | ||
438 | Elf64_Phdr *phdr_ptr; | ||
439 | |||
440 | phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); | ||
441 | |||
442 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | ||
443 | u64 offset; | ||
444 | if (phdr_ptr->p_type != PT_NOTE) | ||
445 | continue; | ||
446 | offset = phdr_ptr->p_offset; | ||
447 | rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0); | ||
448 | if (rc < 0) | ||
449 | return rc; | ||
450 | notes_buf += phdr_ptr->p_memsz; | ||
304 | } | 451 | } |
305 | 452 | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | /* Merges all the PT_NOTE headers into one. */ | ||
457 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | ||
458 | char **notes_buf, size_t *notes_sz) | ||
459 | { | ||
460 | int i, nr_ptnote=0, rc=0; | ||
461 | char *tmp; | ||
462 | Elf64_Ehdr *ehdr_ptr; | ||
463 | Elf64_Phdr phdr; | ||
464 | u64 phdr_sz = 0, note_off; | ||
465 | |||
466 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | ||
467 | |||
468 | rc = update_note_header_size_elf64(ehdr_ptr); | ||
469 | if (rc < 0) | ||
470 | return rc; | ||
471 | |||
472 | rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); | ||
473 | if (rc < 0) | ||
474 | return rc; | ||
475 | |||
476 | *notes_sz = roundup(phdr_sz, PAGE_SIZE); | ||
477 | *notes_buf = alloc_elfnotes_buf(*notes_sz); | ||
478 | if (!*notes_buf) | ||
479 | return -ENOMEM; | ||
480 | |||
481 | rc = copy_notes_elf64(ehdr_ptr, *notes_buf); | ||
482 | if (rc < 0) | ||
483 | return rc; | ||
484 | |||
306 | /* Prepare merged PT_NOTE program header. */ | 485 | /* Prepare merged PT_NOTE program header. */ |
307 | phdr.p_type = PT_NOTE; | 486 | phdr.p_type = PT_NOTE; |
308 | phdr.p_flags = 0; | 487 | phdr.p_flags = 0; |
309 | note_off = sizeof(Elf64_Ehdr) + | 488 | note_off = sizeof(Elf64_Ehdr) + |
310 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); | 489 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); |
311 | phdr.p_offset = note_off; | 490 | phdr.p_offset = roundup(note_off, PAGE_SIZE); |
312 | phdr.p_vaddr = phdr.p_paddr = 0; | 491 | phdr.p_vaddr = phdr.p_paddr = 0; |
313 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | 492 | phdr.p_filesz = phdr.p_memsz = phdr_sz; |
314 | phdr.p_align = 0; | 493 | phdr.p_align = 0; |
@@ -322,6 +501,8 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |||
322 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); | 501 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); |
323 | *elfsz = *elfsz - i; | 502 | *elfsz = *elfsz - i; |
324 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); | 503 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); |
504 | memset(elfptr + *elfsz, 0, i); | ||
505 | *elfsz = roundup(*elfsz, PAGE_SIZE); | ||
325 | 506 | ||
326 | /* Modify e_phnum to reflect merged headers. */ | 507 | /* Modify e_phnum to reflect merged headers. */ |
327 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | 508 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; |
@@ -329,27 +510,27 @@ static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |||
329 | return 0; | 510 | return 0; |
330 | } | 511 | } |
331 | 512 | ||
332 | /* Merges all the PT_NOTE headers into one. */ | 513 | /** |
333 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | 514 | * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry |
334 | struct list_head *vc_list) | 515 | * |
516 | * @ehdr_ptr: ELF header | ||
517 | * | ||
518 | * This function updates p_memsz member of each PT_NOTE entry in the | ||
519 | * program header table pointed to by @ehdr_ptr to real size of ELF | ||
520 | * note segment. | ||
521 | */ | ||
522 | static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) | ||
335 | { | 523 | { |
336 | int i, nr_ptnote=0, rc=0; | 524 | int i, rc=0; |
337 | char *tmp; | 525 | Elf32_Phdr *phdr_ptr; |
338 | Elf32_Ehdr *ehdr_ptr; | ||
339 | Elf32_Phdr phdr, *phdr_ptr; | ||
340 | Elf32_Nhdr *nhdr_ptr; | 526 | Elf32_Nhdr *nhdr_ptr; |
341 | u64 phdr_sz = 0, note_off; | ||
342 | 527 | ||
343 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | 528 | phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); |
344 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); | ||
345 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | 529 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
346 | int j; | ||
347 | void *notes_section; | 530 | void *notes_section; |
348 | struct vmcore *new; | ||
349 | u64 offset, max_sz, sz, real_sz = 0; | 531 | u64 offset, max_sz, sz, real_sz = 0; |
350 | if (phdr_ptr->p_type != PT_NOTE) | 532 | if (phdr_ptr->p_type != PT_NOTE) |
351 | continue; | 533 | continue; |
352 | nr_ptnote++; | ||
353 | max_sz = phdr_ptr->p_memsz; | 534 | max_sz = phdr_ptr->p_memsz; |
354 | offset = phdr_ptr->p_offset; | 535 | offset = phdr_ptr->p_offset; |
355 | notes_section = kmalloc(max_sz, GFP_KERNEL); | 536 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
@@ -361,7 +542,7 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |||
361 | return rc; | 542 | return rc; |
362 | } | 543 | } |
363 | nhdr_ptr = notes_section; | 544 | nhdr_ptr = notes_section; |
364 | for (j = 0; j < max_sz; j += sz) { | 545 | while (real_sz < max_sz) { |
365 | if (nhdr_ptr->n_namesz == 0) | 546 | if (nhdr_ptr->n_namesz == 0) |
366 | break; | 547 | break; |
367 | sz = sizeof(Elf32_Nhdr) + | 548 | sz = sizeof(Elf32_Nhdr) + |
@@ -370,26 +551,122 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |||
370 | real_sz += sz; | 551 | real_sz += sz; |
371 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); | 552 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); |
372 | } | 553 | } |
373 | |||
374 | /* Add this contiguous chunk of notes section to vmcore list.*/ | ||
375 | new = get_new_element(); | ||
376 | if (!new) { | ||
377 | kfree(notes_section); | ||
378 | return -ENOMEM; | ||
379 | } | ||
380 | new->paddr = phdr_ptr->p_offset; | ||
381 | new->size = real_sz; | ||
382 | list_add_tail(&new->list, vc_list); | ||
383 | phdr_sz += real_sz; | ||
384 | kfree(notes_section); | 554 | kfree(notes_section); |
555 | phdr_ptr->p_memsz = real_sz; | ||
556 | } | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | /** | ||
562 | * get_note_number_and_size_elf32 - get the number of PT_NOTE program | ||
563 | * headers and sum of real size of their ELF note segment headers and | ||
564 | * data. | ||
565 | * | ||
566 | * @ehdr_ptr: ELF header | ||
567 | * @nr_ptnote: buffer for the number of PT_NOTE program headers | ||
568 | * @sz_ptnote: buffer for size of unique PT_NOTE program header | ||
569 | * | ||
570 | * This function is used to merge multiple PT_NOTE program headers | ||
571 | * into a unique single one. The resulting unique entry will have | ||
572 | * @sz_ptnote in its phdr->p_mem. | ||
573 | * | ||
574 | * It is assumed that program headers with PT_NOTE type pointed to by | ||
575 | * @ehdr_ptr has already been updated by update_note_header_size_elf32 | ||
576 | * and each of PT_NOTE program headers has actual ELF note segment | ||
577 | * size in its p_memsz member. | ||
578 | */ | ||
579 | static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, | ||
580 | int *nr_ptnote, u64 *sz_ptnote) | ||
581 | { | ||
582 | int i; | ||
583 | Elf32_Phdr *phdr_ptr; | ||
584 | |||
585 | *nr_ptnote = *sz_ptnote = 0; | ||
586 | |||
587 | phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); | ||
588 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | ||
589 | if (phdr_ptr->p_type != PT_NOTE) | ||
590 | continue; | ||
591 | *nr_ptnote += 1; | ||
592 | *sz_ptnote += phdr_ptr->p_memsz; | ||
593 | } | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * copy_notes_elf32 - copy ELF note segments in a given buffer | ||
600 | * | ||
601 | * @ehdr_ptr: ELF header | ||
602 | * @notes_buf: buffer into which ELF note segments are copied | ||
603 | * | ||
604 | * This function is used to copy ELF note segment in the 1st kernel | ||
605 | * into the buffer @notes_buf in the 2nd kernel. It is assumed that | ||
606 | * size of the buffer @notes_buf is equal to or larger than sum of the | ||
607 | * real ELF note segment headers and data. | ||
608 | * | ||
609 | * It is assumed that program headers with PT_NOTE type pointed to by | ||
610 | * @ehdr_ptr has already been updated by update_note_header_size_elf32 | ||
611 | * and each of PT_NOTE program headers has actual ELF note segment | ||
612 | * size in its p_memsz member. | ||
613 | */ | ||
614 | static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) | ||
615 | { | ||
616 | int i, rc=0; | ||
617 | Elf32_Phdr *phdr_ptr; | ||
618 | |||
619 | phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); | ||
620 | |||
621 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | ||
622 | u64 offset; | ||
623 | if (phdr_ptr->p_type != PT_NOTE) | ||
624 | continue; | ||
625 | offset = phdr_ptr->p_offset; | ||
626 | rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0); | ||
627 | if (rc < 0) | ||
628 | return rc; | ||
629 | notes_buf += phdr_ptr->p_memsz; | ||
385 | } | 630 | } |
386 | 631 | ||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | /* Merges all the PT_NOTE headers into one. */ | ||
636 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | ||
637 | char **notes_buf, size_t *notes_sz) | ||
638 | { | ||
639 | int i, nr_ptnote=0, rc=0; | ||
640 | char *tmp; | ||
641 | Elf32_Ehdr *ehdr_ptr; | ||
642 | Elf32_Phdr phdr; | ||
643 | u64 phdr_sz = 0, note_off; | ||
644 | |||
645 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | ||
646 | |||
647 | rc = update_note_header_size_elf32(ehdr_ptr); | ||
648 | if (rc < 0) | ||
649 | return rc; | ||
650 | |||
651 | rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); | ||
652 | if (rc < 0) | ||
653 | return rc; | ||
654 | |||
655 | *notes_sz = roundup(phdr_sz, PAGE_SIZE); | ||
656 | *notes_buf = alloc_elfnotes_buf(*notes_sz); | ||
657 | if (!*notes_buf) | ||
658 | return -ENOMEM; | ||
659 | |||
660 | rc = copy_notes_elf32(ehdr_ptr, *notes_buf); | ||
661 | if (rc < 0) | ||
662 | return rc; | ||
663 | |||
387 | /* Prepare merged PT_NOTE program header. */ | 664 | /* Prepare merged PT_NOTE program header. */ |
388 | phdr.p_type = PT_NOTE; | 665 | phdr.p_type = PT_NOTE; |
389 | phdr.p_flags = 0; | 666 | phdr.p_flags = 0; |
390 | note_off = sizeof(Elf32_Ehdr) + | 667 | note_off = sizeof(Elf32_Ehdr) + |
391 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); | 668 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); |
392 | phdr.p_offset = note_off; | 669 | phdr.p_offset = roundup(note_off, PAGE_SIZE); |
393 | phdr.p_vaddr = phdr.p_paddr = 0; | 670 | phdr.p_vaddr = phdr.p_paddr = 0; |
394 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | 671 | phdr.p_filesz = phdr.p_memsz = phdr_sz; |
395 | phdr.p_align = 0; | 672 | phdr.p_align = 0; |
@@ -403,6 +680,8 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |||
403 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); | 680 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); |
404 | *elfsz = *elfsz - i; | 681 | *elfsz = *elfsz - i; |
405 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); | 682 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); |
683 | memset(elfptr + *elfsz, 0, i); | ||
684 | *elfsz = roundup(*elfsz, PAGE_SIZE); | ||
406 | 685 | ||
407 | /* Modify e_phnum to reflect merged headers. */ | 686 | /* Modify e_phnum to reflect merged headers. */ |
408 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | 687 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; |
@@ -414,6 +693,7 @@ static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |||
414 | * the new offset fields of exported program headers. */ | 693 | * the new offset fields of exported program headers. */ |
415 | static int __init process_ptload_program_headers_elf64(char *elfptr, | 694 | static int __init process_ptload_program_headers_elf64(char *elfptr, |
416 | size_t elfsz, | 695 | size_t elfsz, |
696 | size_t elfnotes_sz, | ||
417 | struct list_head *vc_list) | 697 | struct list_head *vc_list) |
418 | { | 698 | { |
419 | int i; | 699 | int i; |
@@ -425,32 +705,38 @@ static int __init process_ptload_program_headers_elf64(char *elfptr, | |||
425 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | 705 | ehdr_ptr = (Elf64_Ehdr *)elfptr; |
426 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ | 706 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ |
427 | 707 | ||
428 | /* First program header is PT_NOTE header. */ | 708 | /* Skip Elf header, program headers and Elf note segment. */ |
429 | vmcore_off = sizeof(Elf64_Ehdr) + | 709 | vmcore_off = elfsz + elfnotes_sz; |
430 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + | ||
431 | phdr_ptr->p_memsz; /* Note sections */ | ||
432 | 710 | ||
433 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | 711 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
712 | u64 paddr, start, end, size; | ||
713 | |||
434 | if (phdr_ptr->p_type != PT_LOAD) | 714 | if (phdr_ptr->p_type != PT_LOAD) |
435 | continue; | 715 | continue; |
436 | 716 | ||
717 | paddr = phdr_ptr->p_offset; | ||
718 | start = rounddown(paddr, PAGE_SIZE); | ||
719 | end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); | ||
720 | size = end - start; | ||
721 | |||
437 | /* Add this contiguous chunk of memory to vmcore list.*/ | 722 | /* Add this contiguous chunk of memory to vmcore list.*/ |
438 | new = get_new_element(); | 723 | new = get_new_element(); |
439 | if (!new) | 724 | if (!new) |
440 | return -ENOMEM; | 725 | return -ENOMEM; |
441 | new->paddr = phdr_ptr->p_offset; | 726 | new->paddr = start; |
442 | new->size = phdr_ptr->p_memsz; | 727 | new->size = size; |
443 | list_add_tail(&new->list, vc_list); | 728 | list_add_tail(&new->list, vc_list); |
444 | 729 | ||
445 | /* Update the program header offset. */ | 730 | /* Update the program header offset. */ |
446 | phdr_ptr->p_offset = vmcore_off; | 731 | phdr_ptr->p_offset = vmcore_off + (paddr - start); |
447 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; | 732 | vmcore_off = vmcore_off + size; |
448 | } | 733 | } |
449 | return 0; | 734 | return 0; |
450 | } | 735 | } |
451 | 736 | ||
452 | static int __init process_ptload_program_headers_elf32(char *elfptr, | 737 | static int __init process_ptload_program_headers_elf32(char *elfptr, |
453 | size_t elfsz, | 738 | size_t elfsz, |
739 | size_t elfnotes_sz, | ||
454 | struct list_head *vc_list) | 740 | struct list_head *vc_list) |
455 | { | 741 | { |
456 | int i; | 742 | int i; |
@@ -462,43 +748,44 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, | |||
462 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | 748 | ehdr_ptr = (Elf32_Ehdr *)elfptr; |
463 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ | 749 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ |
464 | 750 | ||
465 | /* First program header is PT_NOTE header. */ | 751 | /* Skip Elf header, program headers and Elf note segment. */ |
466 | vmcore_off = sizeof(Elf32_Ehdr) + | 752 | vmcore_off = elfsz + elfnotes_sz; |
467 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + | ||
468 | phdr_ptr->p_memsz; /* Note sections */ | ||
469 | 753 | ||
470 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | 754 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { |
755 | u64 paddr, start, end, size; | ||
756 | |||
471 | if (phdr_ptr->p_type != PT_LOAD) | 757 | if (phdr_ptr->p_type != PT_LOAD) |
472 | continue; | 758 | continue; |
473 | 759 | ||
760 | paddr = phdr_ptr->p_offset; | ||
761 | start = rounddown(paddr, PAGE_SIZE); | ||
762 | end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); | ||
763 | size = end - start; | ||
764 | |||
474 | /* Add this contiguous chunk of memory to vmcore list.*/ | 765 | /* Add this contiguous chunk of memory to vmcore list.*/ |
475 | new = get_new_element(); | 766 | new = get_new_element(); |
476 | if (!new) | 767 | if (!new) |
477 | return -ENOMEM; | 768 | return -ENOMEM; |
478 | new->paddr = phdr_ptr->p_offset; | 769 | new->paddr = start; |
479 | new->size = phdr_ptr->p_memsz; | 770 | new->size = size; |
480 | list_add_tail(&new->list, vc_list); | 771 | list_add_tail(&new->list, vc_list); |
481 | 772 | ||
482 | /* Update the program header offset */ | 773 | /* Update the program header offset */ |
483 | phdr_ptr->p_offset = vmcore_off; | 774 | phdr_ptr->p_offset = vmcore_off + (paddr - start); |
484 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; | 775 | vmcore_off = vmcore_off + size; |
485 | } | 776 | } |
486 | return 0; | 777 | return 0; |
487 | } | 778 | } |
488 | 779 | ||
489 | /* Sets offset fields of vmcore elements. */ | 780 | /* Sets offset fields of vmcore elements. */ |
490 | static void __init set_vmcore_list_offsets_elf64(char *elfptr, | 781 | static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, |
491 | struct list_head *vc_list) | 782 | struct list_head *vc_list) |
492 | { | 783 | { |
493 | loff_t vmcore_off; | 784 | loff_t vmcore_off; |
494 | Elf64_Ehdr *ehdr_ptr; | ||
495 | struct vmcore *m; | 785 | struct vmcore *m; |
496 | 786 | ||
497 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | 787 | /* Skip Elf header, program headers and Elf note segment. */ |
498 | 788 | vmcore_off = elfsz + elfnotes_sz; | |
499 | /* Skip Elf header and program headers. */ | ||
500 | vmcore_off = sizeof(Elf64_Ehdr) + | ||
501 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); | ||
502 | 789 | ||
503 | list_for_each_entry(m, vc_list, list) { | 790 | list_for_each_entry(m, vc_list, list) { |
504 | m->offset = vmcore_off; | 791 | m->offset = vmcore_off; |
@@ -506,24 +793,12 @@ static void __init set_vmcore_list_offsets_elf64(char *elfptr, | |||
506 | } | 793 | } |
507 | } | 794 | } |
508 | 795 | ||
509 | /* Sets offset fields of vmcore elements. */ | 796 | static void free_elfcorebuf(void) |
510 | static void __init set_vmcore_list_offsets_elf32(char *elfptr, | ||
511 | struct list_head *vc_list) | ||
512 | { | 797 | { |
513 | loff_t vmcore_off; | 798 | free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); |
514 | Elf32_Ehdr *ehdr_ptr; | 799 | elfcorebuf = NULL; |
515 | struct vmcore *m; | 800 | vfree(elfnotes_buf); |
516 | 801 | elfnotes_buf = NULL; | |
517 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | ||
518 | |||
519 | /* Skip Elf header and program headers. */ | ||
520 | vmcore_off = sizeof(Elf32_Ehdr) + | ||
521 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); | ||
522 | |||
523 | list_for_each_entry(m, vc_list, list) { | ||
524 | m->offset = vmcore_off; | ||
525 | vmcore_off += m->size; | ||
526 | } | ||
527 | } | 802 | } |
528 | 803 | ||
529 | static int __init parse_crash_elf64_headers(void) | 804 | static int __init parse_crash_elf64_headers(void) |
@@ -554,31 +829,32 @@ static int __init parse_crash_elf64_headers(void) | |||
554 | } | 829 | } |
555 | 830 | ||
556 | /* Read in all elf headers. */ | 831 | /* Read in all elf headers. */ |
557 | elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); | 832 | elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + |
558 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); | 833 | ehdr.e_phnum * sizeof(Elf64_Phdr); |
834 | elfcorebuf_sz = elfcorebuf_sz_orig; | ||
835 | elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
836 | get_order(elfcorebuf_sz_orig)); | ||
559 | if (!elfcorebuf) | 837 | if (!elfcorebuf) |
560 | return -ENOMEM; | 838 | return -ENOMEM; |
561 | addr = elfcorehdr_addr; | 839 | addr = elfcorehdr_addr; |
562 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); | 840 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0); |
563 | if (rc < 0) { | 841 | if (rc < 0) |
564 | kfree(elfcorebuf); | 842 | goto fail; |
565 | return rc; | ||
566 | } | ||
567 | 843 | ||
568 | /* Merge all PT_NOTE headers into one. */ | 844 | /* Merge all PT_NOTE headers into one. */ |
569 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); | 845 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, |
570 | if (rc) { | 846 | &elfnotes_buf, &elfnotes_sz); |
571 | kfree(elfcorebuf); | 847 | if (rc) |
572 | return rc; | 848 | goto fail; |
573 | } | ||
574 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, | 849 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, |
575 | &vmcore_list); | 850 | elfnotes_sz, &vmcore_list); |
576 | if (rc) { | 851 | if (rc) |
577 | kfree(elfcorebuf); | 852 | goto fail; |
578 | return rc; | 853 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); |
579 | } | ||
580 | set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); | ||
581 | return 0; | 854 | return 0; |
855 | fail: | ||
856 | free_elfcorebuf(); | ||
857 | return rc; | ||
582 | } | 858 | } |
583 | 859 | ||
584 | static int __init parse_crash_elf32_headers(void) | 860 | static int __init parse_crash_elf32_headers(void) |
@@ -609,31 +885,31 @@ static int __init parse_crash_elf32_headers(void) | |||
609 | } | 885 | } |
610 | 886 | ||
611 | /* Read in all elf headers. */ | 887 | /* Read in all elf headers. */ |
612 | elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); | 888 | elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); |
613 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); | 889 | elfcorebuf_sz = elfcorebuf_sz_orig; |
890 | elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
891 | get_order(elfcorebuf_sz_orig)); | ||
614 | if (!elfcorebuf) | 892 | if (!elfcorebuf) |
615 | return -ENOMEM; | 893 | return -ENOMEM; |
616 | addr = elfcorehdr_addr; | 894 | addr = elfcorehdr_addr; |
617 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); | 895 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0); |
618 | if (rc < 0) { | 896 | if (rc < 0) |
619 | kfree(elfcorebuf); | 897 | goto fail; |
620 | return rc; | ||
621 | } | ||
622 | 898 | ||
623 | /* Merge all PT_NOTE headers into one. */ | 899 | /* Merge all PT_NOTE headers into one. */ |
624 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); | 900 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, |
625 | if (rc) { | 901 | &elfnotes_buf, &elfnotes_sz); |
626 | kfree(elfcorebuf); | 902 | if (rc) |
627 | return rc; | 903 | goto fail; |
628 | } | ||
629 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, | 904 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, |
630 | &vmcore_list); | 905 | elfnotes_sz, &vmcore_list); |
631 | if (rc) { | 906 | if (rc) |
632 | kfree(elfcorebuf); | 907 | goto fail; |
633 | return rc; | 908 | set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); |
634 | } | ||
635 | set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); | ||
636 | return 0; | 909 | return 0; |
910 | fail: | ||
911 | free_elfcorebuf(); | ||
912 | return rc; | ||
637 | } | 913 | } |
638 | 914 | ||
639 | static int __init parse_crash_elf_headers(void) | 915 | static int __init parse_crash_elf_headers(void) |
@@ -655,20 +931,19 @@ static int __init parse_crash_elf_headers(void) | |||
655 | rc = parse_crash_elf64_headers(); | 931 | rc = parse_crash_elf64_headers(); |
656 | if (rc) | 932 | if (rc) |
657 | return rc; | 933 | return rc; |
658 | |||
659 | /* Determine vmcore size. */ | ||
660 | vmcore_size = get_vmcore_size_elf64(elfcorebuf); | ||
661 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { | 934 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { |
662 | rc = parse_crash_elf32_headers(); | 935 | rc = parse_crash_elf32_headers(); |
663 | if (rc) | 936 | if (rc) |
664 | return rc; | 937 | return rc; |
665 | |||
666 | /* Determine vmcore size. */ | ||
667 | vmcore_size = get_vmcore_size_elf32(elfcorebuf); | ||
668 | } else { | 938 | } else { |
669 | pr_warn("Warning: Core image elf header is not sane\n"); | 939 | pr_warn("Warning: Core image elf header is not sane\n"); |
670 | return -EINVAL; | 940 | return -EINVAL; |
671 | } | 941 | } |
942 | |||
943 | /* Determine vmcore size. */ | ||
944 | vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, | ||
945 | &vmcore_list); | ||
946 | |||
672 | return 0; | 947 | return 0; |
673 | } | 948 | } |
674 | 949 | ||
@@ -711,7 +986,6 @@ void vmcore_cleanup(void) | |||
711 | list_del(&m->list); | 986 | list_del(&m->list); |
712 | kfree(m); | 987 | kfree(m); |
713 | } | 988 | } |
714 | kfree(elfcorebuf); | 989 | free_elfcorebuf(); |
715 | elfcorebuf = NULL; | ||
716 | } | 990 | } |
717 | EXPORT_SYMBOL_GPL(vmcore_cleanup); | 991 | EXPORT_SYMBOL_GPL(vmcore_cleanup); |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b1836987d506..a7126d28f4cf 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -396,6 +396,28 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
396 | #define arch_start_context_switch(prev) do {} while (0) | 396 | #define arch_start_context_switch(prev) do {} while (0) |
397 | #endif | 397 | #endif |
398 | 398 | ||
399 | #ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY | ||
400 | static inline int pte_soft_dirty(pte_t pte) | ||
401 | { | ||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static inline int pmd_soft_dirty(pmd_t pmd) | ||
406 | { | ||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | static inline pte_t pte_mksoft_dirty(pte_t pte) | ||
411 | { | ||
412 | return pte; | ||
413 | } | ||
414 | |||
415 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | ||
416 | { | ||
417 | return pmd; | ||
418 | } | ||
419 | #endif | ||
420 | |||
399 | #ifndef __HAVE_PFNMAP_TRACKING | 421 | #ifndef __HAVE_PFNMAP_TRACKING |
400 | /* | 422 | /* |
401 | * Interfaces that can be used by architecture code to keep track of | 423 | * Interfaces that can be used by architecture code to keep track of |
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index c1a1216e29ce..f1a24b5c3b90 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h | |||
@@ -3,6 +3,26 @@ | |||
3 | 3 | ||
4 | /* References to section boundaries */ | 4 | /* References to section boundaries */ |
5 | 5 | ||
6 | /* | ||
7 | * Usage guidelines: | ||
8 | * _text, _data: architecture specific, don't use them in arch-independent code | ||
9 | * [_stext, _etext]: contains .text.* sections, may also contain .rodata.* | ||
10 | * and/or .init.* sections | ||
11 | * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* | ||
12 | * and/or .init.* sections. | ||
13 | * [__start_rodata, __end_rodata]: contains .rodata.* sections | ||
14 | * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* | ||
15 | * may be out of this range on some architectures. | ||
16 | * [_sinittext, _einittext]: contains .init.text.* sections | ||
17 | * [__bss_start, __bss_stop]: contains BSS sections | ||
18 | * | ||
19 | * Following global variables are optional and may be unavailable on some | ||
20 | * architectures and/or kernel configurations. | ||
21 | * _text, _data | ||
22 | * __kprobes_text_start, __kprobes_text_end | ||
23 | * __entry_text_start, __entry_text_end | ||
24 | * __ctors_start, __ctors_end | ||
25 | */ | ||
6 | extern char _text[], _stext[], _etext[]; | 26 | extern char _text[], _stext[], _etext[]; |
7 | extern char _data[], _sdata[], _edata[]; | 27 | extern char _data[], _sdata[], _edata[]; |
8 | extern char __bss_start[], __bss_stop[]; | 28 | extern char __bss_start[], __bss_stop[]; |
@@ -12,7 +32,6 @@ extern char _end[]; | |||
12 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; | 32 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; |
13 | extern char __kprobes_text_start[], __kprobes_text_end[]; | 33 | extern char __kprobes_text_start[], __kprobes_text_end[]; |
14 | extern char __entry_text_start[], __entry_text_end[]; | 34 | extern char __entry_text_start[], __entry_text_end[]; |
15 | extern char __initdata_begin[], __initdata_end[]; | ||
16 | extern char __start_rodata[], __end_rodata[]; | 35 | extern char __start_rodata[], __end_rodata[]; |
17 | 36 | ||
18 | /* Start and end of .ctors section - used for constructor calls. */ | 37 | /* Start and end of .ctors section - used for constructor calls. */ |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 4f2737208c42..c74d88baea60 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -275,13 +275,6 @@ | |||
275 | VMLINUX_SYMBOL(__end_builtin_fw) = .; \ | 275 | VMLINUX_SYMBOL(__end_builtin_fw) = .; \ |
276 | } \ | 276 | } \ |
277 | \ | 277 | \ |
278 | /* RapidIO route ops */ \ | ||
279 | .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ | ||
280 | VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ | ||
281 | *(.rio_switch_ops) \ | ||
282 | VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ | ||
283 | } \ | ||
284 | \ | ||
285 | TRACEDATA \ | 278 | TRACEDATA \ |
286 | \ | 279 | \ |
287 | /* Kernel symbol table: Normal symbols */ \ | 280 | /* Kernel symbol table: Normal symbols */ \ |
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index a1c486a88e88..179b38ffd351 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -182,10 +182,6 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
182 | unsigned int src_offset, size_t len, | 182 | unsigned int src_offset, size_t len, |
183 | struct async_submit_ctl *submit); | 183 | struct async_submit_ctl *submit); |
184 | 184 | ||
185 | struct dma_async_tx_descriptor * | ||
186 | async_memset(struct page *dest, int val, unsigned int offset, | ||
187 | size_t len, struct async_submit_ctl *submit); | ||
188 | |||
189 | struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); | 185 | struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); |
190 | 186 | ||
191 | struct dma_async_tx_descriptor * | 187 | struct dma_async_tx_descriptor * |
diff --git a/include/linux/backlight.h b/include/linux/backlight.h index da9a0825e007..53b77949c79d 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h | |||
@@ -114,7 +114,13 @@ static inline void backlight_update_status(struct backlight_device *bd) | |||
114 | extern struct backlight_device *backlight_device_register(const char *name, | 114 | extern struct backlight_device *backlight_device_register(const char *name, |
115 | struct device *dev, void *devdata, const struct backlight_ops *ops, | 115 | struct device *dev, void *devdata, const struct backlight_ops *ops, |
116 | const struct backlight_properties *props); | 116 | const struct backlight_properties *props); |
117 | extern struct backlight_device *devm_backlight_device_register( | ||
118 | struct device *dev, const char *name, struct device *parent, | ||
119 | void *devdata, const struct backlight_ops *ops, | ||
120 | const struct backlight_properties *props); | ||
117 | extern void backlight_device_unregister(struct backlight_device *bd); | 121 | extern void backlight_device_unregister(struct backlight_device *bd); |
122 | extern void devm_backlight_device_unregister(struct device *dev, | ||
123 | struct backlight_device *bd); | ||
118 | extern void backlight_force_update(struct backlight_device *bd, | 124 | extern void backlight_force_update(struct backlight_device *bd, |
119 | enum backlight_update_reason reason); | 125 | enum backlight_update_reason reason); |
120 | 126 | ||
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 5f0b0e1f7c08..f1f07d31a3af 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -44,8 +44,8 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, | |||
44 | unsigned long endpfn); | 44 | unsigned long endpfn); |
45 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); | 45 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); |
46 | 46 | ||
47 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); | ||
48 | extern unsigned long free_all_bootmem(void); | 47 | extern unsigned long free_all_bootmem(void); |
48 | extern void reset_all_zones_managed_pages(void); | ||
49 | 49 | ||
50 | extern void free_bootmem_node(pg_data_t *pgdat, | 50 | extern void free_bootmem_node(pg_data_t *pgdat, |
51 | unsigned long addr, | 51 | unsigned long addr, |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index f5a3b838ddb0..91fa9a94ae92 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -139,6 +139,9 @@ BUFFER_FNS(Prio, prio) | |||
139 | }) | 139 | }) |
140 | #define page_has_buffers(page) PagePrivate(page) | 140 | #define page_has_buffers(page) PagePrivate(page) |
141 | 141 | ||
142 | void buffer_check_dirty_writeback(struct page *page, | ||
143 | bool *dirty, bool *writeback); | ||
144 | |||
142 | /* | 145 | /* |
143 | * Declarations | 146 | * Declarations |
144 | */ | 147 | */ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 96d3e4ab11a9..cb286b1acdb6 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -66,7 +66,6 @@ enum dma_transaction_type { | |||
66 | DMA_PQ, | 66 | DMA_PQ, |
67 | DMA_XOR_VAL, | 67 | DMA_XOR_VAL, |
68 | DMA_PQ_VAL, | 68 | DMA_PQ_VAL, |
69 | DMA_MEMSET, | ||
70 | DMA_INTERRUPT, | 69 | DMA_INTERRUPT, |
71 | DMA_SG, | 70 | DMA_SG, |
72 | DMA_PRIVATE, | 71 | DMA_PRIVATE, |
@@ -520,7 +519,6 @@ struct dma_tx_state { | |||
520 | * @device_prep_dma_xor_val: prepares a xor validation operation | 519 | * @device_prep_dma_xor_val: prepares a xor validation operation |
521 | * @device_prep_dma_pq: prepares a pq operation | 520 | * @device_prep_dma_pq: prepares a pq operation |
522 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation | 521 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation |
523 | * @device_prep_dma_memset: prepares a memset operation | ||
524 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 522 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
525 | * @device_prep_slave_sg: prepares a slave dma operation | 523 | * @device_prep_slave_sg: prepares a slave dma operation |
526 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. | 524 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. |
@@ -573,9 +571,6 @@ struct dma_device { | |||
573 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | 571 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, |
574 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 572 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
575 | enum sum_check_flags *pqres, unsigned long flags); | 573 | enum sum_check_flags *pqres, unsigned long flags); |
576 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | ||
577 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | ||
578 | unsigned long flags); | ||
579 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 574 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
580 | struct dma_chan *chan, unsigned long flags); | 575 | struct dma_chan *chan, unsigned long flags); |
581 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( | 576 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( |
diff --git a/include/linux/err.h b/include/linux/err.h index f2edce25a76b..221fcfb676c4 100644 --- a/include/linux/err.h +++ b/include/linux/err.h | |||
@@ -24,17 +24,17 @@ static inline void * __must_check ERR_PTR(long error) | |||
24 | return (void *) error; | 24 | return (void *) error; |
25 | } | 25 | } |
26 | 26 | ||
27 | static inline long __must_check PTR_ERR(const void *ptr) | 27 | static inline long __must_check PTR_ERR(__force const void *ptr) |
28 | { | 28 | { |
29 | return (long) ptr; | 29 | return (long) ptr; |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline long __must_check IS_ERR(const void *ptr) | 32 | static inline long __must_check IS_ERR(__force const void *ptr) |
33 | { | 33 | { |
34 | return IS_ERR_VALUE((unsigned long)ptr); | 34 | return IS_ERR_VALUE((unsigned long)ptr); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline long __must_check IS_ERR_OR_NULL(const void *ptr) | 37 | static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr) |
38 | { | 38 | { |
39 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); | 39 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
40 | } | 40 | } |
@@ -46,13 +46,13 @@ static inline long __must_check IS_ERR_OR_NULL(const void *ptr) | |||
46 | * Explicitly cast an error-valued pointer to another pointer type in such a | 46 | * Explicitly cast an error-valued pointer to another pointer type in such a |
47 | * way as to make it clear that's what's going on. | 47 | * way as to make it clear that's what's going on. |
48 | */ | 48 | */ |
49 | static inline void * __must_check ERR_CAST(const void *ptr) | 49 | static inline void * __must_check ERR_CAST(__force const void *ptr) |
50 | { | 50 | { |
51 | /* cast away the const */ | 51 | /* cast away the const */ |
52 | return (void *) ptr; | 52 | return (void *) ptr; |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline int __must_check PTR_RET(const void *ptr) | 55 | static inline int __must_check PTR_RET(__force const void *ptr) |
56 | { | 56 | { |
57 | if (IS_ERR(ptr)) | 57 | if (IS_ERR(ptr)) |
58 | return PTR_ERR(ptr); | 58 | return PTR_ERR(ptr); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 2b82c8041490..99be011e00de 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -380,6 +380,7 @@ struct address_space_operations { | |||
380 | int (*launder_page) (struct page *); | 380 | int (*launder_page) (struct page *); |
381 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | 381 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, |
382 | unsigned long); | 382 | unsigned long); |
383 | void (*is_dirty_writeback) (struct page *, bool *, bool *); | ||
383 | int (*error_remove_page)(struct address_space *, struct page *); | 384 | int (*error_remove_page)(struct address_space *, struct page *); |
384 | 385 | ||
385 | /* swapfile support */ | 386 | /* swapfile support */ |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 89d4fbf681e7..c2b1801a160b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -55,7 +55,6 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, | |||
55 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | 55 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, |
56 | unsigned long start, unsigned long end, | 56 | unsigned long start, unsigned long end, |
57 | struct page *ref_page); | 57 | struct page *ref_page); |
58 | int hugetlb_prefault(struct address_space *, struct vm_area_struct *); | ||
59 | void hugetlb_report_meminfo(struct seq_file *); | 58 | void hugetlb_report_meminfo(struct seq_file *); |
60 | int hugetlb_report_node_meminfo(int, char *); | 59 | int hugetlb_report_node_meminfo(int, char *); |
61 | void hugetlb_show_meminfo(void); | 60 | void hugetlb_show_meminfo(void); |
@@ -114,7 +113,6 @@ static inline unsigned long hugetlb_total_pages(void) | |||
114 | #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) | 113 | #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) |
115 | #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) | 114 | #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) |
116 | #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) | 115 | #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) |
117 | #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) | ||
118 | static inline void hugetlb_report_meminfo(struct seq_file *m) | 116 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
119 | { | 117 | { |
120 | } | 118 | } |
diff --git a/include/linux/lcd.h b/include/linux/lcd.h index e00c3b0ebc6b..504f6246f38f 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h | |||
@@ -112,7 +112,12 @@ static inline void lcd_set_power(struct lcd_device *ld, int power) | |||
112 | 112 | ||
113 | extern struct lcd_device *lcd_device_register(const char *name, | 113 | extern struct lcd_device *lcd_device_register(const char *name, |
114 | struct device *parent, void *devdata, struct lcd_ops *ops); | 114 | struct device *parent, void *devdata, struct lcd_ops *ops); |
115 | extern struct lcd_device *devm_lcd_device_register(struct device *dev, | ||
116 | const char *name, struct device *parent, | ||
117 | void *devdata, struct lcd_ops *ops); | ||
115 | extern void lcd_device_unregister(struct lcd_device *ld); | 118 | extern void lcd_device_unregister(struct lcd_device *ld); |
119 | extern void devm_lcd_device_unregister(struct device *dev, | ||
120 | struct lcd_device *ld); | ||
116 | 121 | ||
117 | #define to_lcd_device(obj) container_of(obj, struct lcd_device, dev) | 122 | #define to_lcd_device(obj) container_of(obj, struct lcd_device, dev) |
118 | 123 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index d6183f06d8c1..7b4d9d79570b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -77,7 +77,8 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page); | |||
77 | 77 | ||
78 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | 78 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, |
79 | struct mem_cgroup *memcg); | 79 | struct mem_cgroup *memcg); |
80 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); | 80 | bool task_in_mem_cgroup(struct task_struct *task, |
81 | const struct mem_cgroup *memcg); | ||
81 | 82 | ||
82 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | 83 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); |
83 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 84 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
@@ -273,10 +274,10 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, | |||
273 | return true; | 274 | return true; |
274 | } | 275 | } |
275 | 276 | ||
276 | static inline int task_in_mem_cgroup(struct task_struct *task, | 277 | static inline bool task_in_mem_cgroup(struct task_struct *task, |
277 | const struct mem_cgroup *memcg) | 278 | const struct mem_cgroup *memcg) |
278 | { | 279 | { |
279 | return 1; | 280 | return true; |
280 | } | 281 | } |
281 | 282 | ||
282 | static inline struct cgroup_subsys_state | 283 | static inline struct cgroup_subsys_state |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 66d881f1d576..b87681adf0ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -25,11 +25,17 @@ struct file_ra_state; | |||
25 | struct user_struct; | 25 | struct user_struct; |
26 | struct writeback_control; | 26 | struct writeback_control; |
27 | 27 | ||
28 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 28 | #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ |
29 | extern unsigned long max_mapnr; | 29 | extern unsigned long max_mapnr; |
30 | |||
31 | static inline void set_max_mapnr(unsigned long limit) | ||
32 | { | ||
33 | max_mapnr = limit; | ||
34 | } | ||
35 | #else | ||
36 | static inline void set_max_mapnr(unsigned long limit) { } | ||
30 | #endif | 37 | #endif |
31 | 38 | ||
32 | extern unsigned long num_physpages; | ||
33 | extern unsigned long totalram_pages; | 39 | extern unsigned long totalram_pages; |
34 | extern void * high_memory; | 40 | extern void * high_memory; |
35 | extern int page_cluster; | 41 | extern int page_cluster; |
@@ -52,6 +58,9 @@ extern unsigned long sysctl_admin_reserve_kbytes; | |||
52 | /* to align the pointer to the (next) page boundary */ | 58 | /* to align the pointer to the (next) page boundary */ |
53 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) | 59 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) |
54 | 60 | ||
61 | /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ | ||
62 | #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE) | ||
63 | |||
55 | /* | 64 | /* |
56 | * Linux kernel virtual memory manager primitives. | 65 | * Linux kernel virtual memory manager primitives. |
57 | * The idea being to have a "virtual" mm in the same way | 66 | * The idea being to have a "virtual" mm in the same way |
@@ -1305,11 +1314,12 @@ extern void free_initmem(void); | |||
1305 | /* | 1314 | /* |
1306 | * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) | 1315 | * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) |
1307 | * into the buddy system. The freed pages will be poisoned with pattern | 1316 | * into the buddy system. The freed pages will be poisoned with pattern |
1308 | * "poison" if it's non-zero. | 1317 | * "poison" if it's within range [0, UCHAR_MAX]. |
1309 | * Return pages freed into the buddy system. | 1318 | * Return pages freed into the buddy system. |
1310 | */ | 1319 | */ |
1311 | extern unsigned long free_reserved_area(unsigned long start, unsigned long end, | 1320 | extern unsigned long free_reserved_area(void *start, void *end, |
1312 | int poison, char *s); | 1321 | int poison, char *s); |
1322 | |||
1313 | #ifdef CONFIG_HIGHMEM | 1323 | #ifdef CONFIG_HIGHMEM |
1314 | /* | 1324 | /* |
1315 | * Free a highmem page into the buddy system, adjusting totalhigh_pages | 1325 | * Free a highmem page into the buddy system, adjusting totalhigh_pages |
@@ -1318,10 +1328,8 @@ extern unsigned long free_reserved_area(unsigned long start, unsigned long end, | |||
1318 | extern void free_highmem_page(struct page *page); | 1328 | extern void free_highmem_page(struct page *page); |
1319 | #endif | 1329 | #endif |
1320 | 1330 | ||
1321 | static inline void adjust_managed_page_count(struct page *page, long count) | 1331 | extern void adjust_managed_page_count(struct page *page, long count); |
1322 | { | 1332 | extern void mem_init_print_info(const char *str); |
1323 | totalram_pages += count; | ||
1324 | } | ||
1325 | 1333 | ||
1326 | /* Free the reserved page into the buddy system, so it gets managed. */ | 1334 | /* Free the reserved page into the buddy system, so it gets managed. */ |
1327 | static inline void __free_reserved_page(struct page *page) | 1335 | static inline void __free_reserved_page(struct page *page) |
@@ -1345,18 +1353,29 @@ static inline void mark_page_reserved(struct page *page) | |||
1345 | 1353 | ||
1346 | /* | 1354 | /* |
1347 | * Default method to free all the __init memory into the buddy system. | 1355 | * Default method to free all the __init memory into the buddy system. |
1348 | * The freed pages will be poisoned with pattern "poison" if it is | 1356 | * The freed pages will be poisoned with pattern "poison" if it's within |
1349 | * non-zero. Return pages freed into the buddy system. | 1357 | * range [0, UCHAR_MAX]. |
1358 | * Return pages freed into the buddy system. | ||
1350 | */ | 1359 | */ |
1351 | static inline unsigned long free_initmem_default(int poison) | 1360 | static inline unsigned long free_initmem_default(int poison) |
1352 | { | 1361 | { |
1353 | extern char __init_begin[], __init_end[]; | 1362 | extern char __init_begin[], __init_end[]; |
1354 | 1363 | ||
1355 | return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) , | 1364 | return free_reserved_area(&__init_begin, &__init_end, |
1356 | ((unsigned long)&__init_end) & PAGE_MASK, | ||
1357 | poison, "unused kernel"); | 1365 | poison, "unused kernel"); |
1358 | } | 1366 | } |
1359 | 1367 | ||
1368 | static inline unsigned long get_num_physpages(void) | ||
1369 | { | ||
1370 | int nid; | ||
1371 | unsigned long phys_pages = 0; | ||
1372 | |||
1373 | for_each_online_node(nid) | ||
1374 | phys_pages += node_present_pages(nid); | ||
1375 | |||
1376 | return phys_pages; | ||
1377 | } | ||
1378 | |||
1360 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 1379 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
1361 | /* | 1380 | /* |
1362 | * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its | 1381 | * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its |
diff --git a/include/linux/mman.h b/include/linux/mman.h index 9aa863da287f..92dc257251e4 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -11,11 +11,17 @@ extern int sysctl_overcommit_memory; | |||
11 | extern int sysctl_overcommit_ratio; | 11 | extern int sysctl_overcommit_ratio; |
12 | extern struct percpu_counter vm_committed_as; | 12 | extern struct percpu_counter vm_committed_as; |
13 | 13 | ||
14 | #ifdef CONFIG_SMP | ||
15 | extern s32 vm_committed_as_batch; | ||
16 | #else | ||
17 | #define vm_committed_as_batch 0 | ||
18 | #endif | ||
19 | |||
14 | unsigned long vm_memory_committed(void); | 20 | unsigned long vm_memory_committed(void); |
15 | 21 | ||
16 | static inline void vm_acct_memory(long pages) | 22 | static inline void vm_acct_memory(long pages) |
17 | { | 23 | { |
18 | percpu_counter_add(&vm_committed_as, pages); | 24 | __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch); |
19 | } | 25 | } |
20 | 26 | ||
21 | static inline void vm_unacct_memory(long pages) | 27 | static inline void vm_unacct_memory(long pages) |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5c76737d836b..ae19af5ec02c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -474,10 +474,16 @@ struct zone { | |||
474 | * frequently read in proximity to zone->lock. It's good to | 474 | * frequently read in proximity to zone->lock. It's good to |
475 | * give them a chance of being in the same cacheline. | 475 | * give them a chance of being in the same cacheline. |
476 | * | 476 | * |
477 | * Write access to present_pages and managed_pages at runtime should | 477 | * Write access to present_pages at runtime should be protected by |
478 | * be protected by lock_memory_hotplug()/unlock_memory_hotplug(). | 478 | * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't |
479 | * Any reader who can't tolerant drift of present_pages and | 479 | * tolerant drift of present_pages should hold memory hotplug lock to |
480 | * managed_pages should hold memory hotplug lock to get a stable value. | 480 | * get a stable value. |
481 | * | ||
482 | * Read access to managed_pages should be safe because it's unsigned | ||
483 | * long. Write access to zone->managed_pages and totalram_pages are | ||
484 | * protected by managed_page_count_lock at runtime. Idealy only | ||
485 | * adjust_managed_page_count() should be used instead of directly | ||
486 | * touching zone->managed_pages and totalram_pages. | ||
481 | */ | 487 | */ |
482 | unsigned long spanned_pages; | 488 | unsigned long spanned_pages; |
483 | unsigned long present_pages; | 489 | unsigned long present_pages; |
@@ -495,6 +501,13 @@ typedef enum { | |||
495 | ZONE_CONGESTED, /* zone has many dirty pages backed by | 501 | ZONE_CONGESTED, /* zone has many dirty pages backed by |
496 | * a congested BDI | 502 | * a congested BDI |
497 | */ | 503 | */ |
504 | ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found | ||
505 | * many dirty file pages at the tail | ||
506 | * of the LRU. | ||
507 | */ | ||
508 | ZONE_WRITEBACK, /* reclaim scanning has recently found | ||
509 | * many pages under writeback | ||
510 | */ | ||
498 | } zone_flags_t; | 511 | } zone_flags_t; |
499 | 512 | ||
500 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | 513 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) |
@@ -517,6 +530,16 @@ static inline int zone_is_reclaim_congested(const struct zone *zone) | |||
517 | return test_bit(ZONE_CONGESTED, &zone->flags); | 530 | return test_bit(ZONE_CONGESTED, &zone->flags); |
518 | } | 531 | } |
519 | 532 | ||
533 | static inline int zone_is_reclaim_dirty(const struct zone *zone) | ||
534 | { | ||
535 | return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); | ||
536 | } | ||
537 | |||
538 | static inline int zone_is_reclaim_writeback(const struct zone *zone) | ||
539 | { | ||
540 | return test_bit(ZONE_WRITEBACK, &zone->flags); | ||
541 | } | ||
542 | |||
520 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 543 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
521 | { | 544 | { |
522 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 545 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); |
@@ -716,7 +739,10 @@ typedef struct pglist_data { | |||
716 | * or node_spanned_pages stay constant. Holding this will also | 739 | * or node_spanned_pages stay constant. Holding this will also |
717 | * guarantee that any pfn_valid() stays that way. | 740 | * guarantee that any pfn_valid() stays that way. |
718 | * | 741 | * |
719 | * Nests above zone->lock and zone->size_seqlock. | 742 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
743 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. | ||
744 | * | ||
745 | * Nests above zone->lock and zone->span_seqlock | ||
720 | */ | 746 | */ |
721 | spinlock_t node_size_lock; | 747 | spinlock_t node_size_lock; |
722 | #endif | 748 | #endif |
@@ -1111,6 +1137,10 @@ struct mem_section { | |||
1111 | struct page_cgroup *page_cgroup; | 1137 | struct page_cgroup *page_cgroup; |
1112 | unsigned long pad; | 1138 | unsigned long pad; |
1113 | #endif | 1139 | #endif |
1140 | /* | ||
1141 | * WARNING: mem_section must be a power-of-2 in size for the | ||
1142 | * calculation and use of SECTION_ROOT_MASK to make sense. | ||
1143 | */ | ||
1114 | }; | 1144 | }; |
1115 | 1145 | ||
1116 | #ifdef CONFIG_SPARSEMEM_EXTREME | 1146 | #ifdef CONFIG_SPARSEMEM_EXTREME |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b508016fb76d..b62d4af6c667 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -456,7 +456,8 @@ enum dmi_field { | |||
456 | }; | 456 | }; |
457 | 457 | ||
458 | struct dmi_strmatch { | 458 | struct dmi_strmatch { |
459 | unsigned char slot; | 459 | unsigned char slot:7; |
460 | unsigned char exact_match:1; | ||
460 | char substr[79]; | 461 | char substr[79]; |
461 | }; | 462 | }; |
462 | 463 | ||
@@ -474,7 +475,8 @@ struct dmi_system_id { | |||
474 | */ | 475 | */ |
475 | #define dmi_device_id dmi_system_id | 476 | #define dmi_device_id dmi_system_id |
476 | 477 | ||
477 | #define DMI_MATCH(a, b) { a, b } | 478 | #define DMI_MATCH(a, b) { .slot = a, .substr = b } |
479 | #define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 } | ||
478 | 480 | ||
479 | #define PLATFORM_NAME_SIZE 20 | 481 | #define PLATFORM_NAME_SIZE 20 |
480 | #define PLATFORM_MODULE_PREFIX "platform:" | 482 | #define PLATFORM_MODULE_PREFIX "platform:" |
@@ -577,4 +579,23 @@ struct mei_cl_device_id { | |||
577 | kernel_ulong_t driver_info; | 579 | kernel_ulong_t driver_info; |
578 | }; | 580 | }; |
579 | 581 | ||
582 | /* RapidIO */ | ||
583 | |||
584 | #define RIO_ANY_ID 0xffff | ||
585 | |||
586 | /** | ||
587 | * struct rio_device_id - RIO device identifier | ||
588 | * @did: RapidIO device ID | ||
589 | * @vid: RapidIO vendor ID | ||
590 | * @asm_did: RapidIO assembly device ID | ||
591 | * @asm_vid: RapidIO assembly vendor ID | ||
592 | * | ||
593 | * Identifies a RapidIO device based on both the device/vendor IDs and | ||
594 | * the assembly device/vendor IDs. | ||
595 | */ | ||
596 | struct rio_device_id { | ||
597 | __u16 did, vid; | ||
598 | __u16 asm_did, asm_vid; | ||
599 | }; | ||
600 | |||
580 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 601 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 4871170a04a0..ae4981ebd18e 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h | |||
@@ -41,6 +41,7 @@ struct nbd_device { | |||
41 | u64 bytesize; | 41 | u64 bytesize; |
42 | pid_t pid; /* pid of nbd-client, if attached */ | 42 | pid_t pid; /* pid of nbd-client, if attached */ |
43 | int xmit_timeout; | 43 | int xmit_timeout; |
44 | int disconnect; /* a disconnect has been requested by user */ | ||
44 | }; | 45 | }; |
45 | 46 | ||
46 | #endif | 47 | #endif |
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index be655e4a2a75..2ee8cd2466b5 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h | |||
@@ -80,10 +80,4 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
80 | PB_migrate_skip) | 80 | PB_migrate_skip) |
81 | #endif /* CONFIG_COMPACTION */ | 81 | #endif /* CONFIG_COMPACTION */ |
82 | 82 | ||
83 | #define get_pageblock_flags(page) \ | ||
84 | get_pageblock_flags_group(page, 0, PB_migrate_end) | ||
85 | #define set_pageblock_flags(page, flags) \ | ||
86 | set_pageblock_flags_group(page, flags, \ | ||
87 | 0, PB_migrate_end) | ||
88 | |||
89 | #endif /* PAGEBLOCK_FLAGS_H */ | 83 | #endif /* PAGEBLOCK_FLAGS_H */ |
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 2aa12b8499c0..e4dbfab37729 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h | |||
@@ -21,7 +21,7 @@ struct pagevec { | |||
21 | }; | 21 | }; |
22 | 22 | ||
23 | void __pagevec_release(struct pagevec *pvec); | 23 | void __pagevec_release(struct pagevec *pvec); |
24 | void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); | 24 | void __pagevec_lru_add(struct pagevec *pvec); |
25 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 25 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, |
26 | pgoff_t start, unsigned nr_pages); | 26 | pgoff_t start, unsigned nr_pages); |
27 | unsigned pagevec_lookup_tag(struct pagevec *pvec, | 27 | unsigned pagevec_lookup_tag(struct pagevec *pvec, |
@@ -64,36 +64,4 @@ static inline void pagevec_release(struct pagevec *pvec) | |||
64 | __pagevec_release(pvec); | 64 | __pagevec_release(pvec); |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline void __pagevec_lru_add_anon(struct pagevec *pvec) | ||
68 | { | ||
69 | __pagevec_lru_add(pvec, LRU_INACTIVE_ANON); | ||
70 | } | ||
71 | |||
72 | static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec) | ||
73 | { | ||
74 | __pagevec_lru_add(pvec, LRU_ACTIVE_ANON); | ||
75 | } | ||
76 | |||
77 | static inline void __pagevec_lru_add_file(struct pagevec *pvec) | ||
78 | { | ||
79 | __pagevec_lru_add(pvec, LRU_INACTIVE_FILE); | ||
80 | } | ||
81 | |||
82 | static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) | ||
83 | { | ||
84 | __pagevec_lru_add(pvec, LRU_ACTIVE_FILE); | ||
85 | } | ||
86 | |||
87 | static inline void pagevec_lru_add_file(struct pagevec *pvec) | ||
88 | { | ||
89 | if (pagevec_count(pvec)) | ||
90 | __pagevec_lru_add_file(pvec); | ||
91 | } | ||
92 | |||
93 | static inline void pagevec_lru_add_anon(struct pagevec *pvec) | ||
94 | { | ||
95 | if (pagevec_count(pvec)) | ||
96 | __pagevec_lru_add_anon(pvec); | ||
97 | } | ||
98 | |||
99 | #endif /* _LINUX_PAGEVEC_H */ | 67 | #endif /* _LINUX_PAGEVEC_H */ |
diff --git a/include/linux/pid.h b/include/linux/pid.h index a089a3c447fc..23705a53abba 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
@@ -86,11 +86,9 @@ extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); | |||
86 | extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); | 86 | extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * attach_pid() and detach_pid() must be called with the tasklist_lock | 89 | * these helpers must be called with the tasklist_lock write-held. |
90 | * write-held. | ||
91 | */ | 90 | */ |
92 | extern void attach_pid(struct task_struct *task, enum pid_type type, | 91 | extern void attach_pid(struct task_struct *task, enum pid_type); |
93 | struct pid *pid); | ||
94 | extern void detach_pid(struct task_struct *task, enum pid_type); | 92 | extern void detach_pid(struct task_struct *task, enum pid_type); |
95 | extern void change_pid(struct task_struct *task, enum pid_type, | 93 | extern void change_pid(struct task_struct *task, enum pid_type, |
96 | struct pid *pid); | 94 | struct pid *pid); |
diff --git a/include/linux/rio.h b/include/linux/rio.h index 18e099342e6f..b71d5738e683 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/rio_regs.h> | 22 | #include <linux/rio_regs.h> |
23 | #include <linux/mod_devicetable.h> | ||
23 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | 24 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
24 | #include <linux/dmaengine.h> | 25 | #include <linux/dmaengine.h> |
25 | #endif | 26 | #endif |
@@ -91,9 +92,24 @@ union rio_pw_msg; | |||
91 | /** | 92 | /** |
92 | * struct rio_switch - RIO switch info | 93 | * struct rio_switch - RIO switch info |
93 | * @node: Node in global list of switches | 94 | * @node: Node in global list of switches |
94 | * @switchid: Switch ID that is unique across a network | ||
95 | * @route_table: Copy of switch routing table | 95 | * @route_table: Copy of switch routing table |
96 | * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 | 96 | * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 |
97 | * @ops: pointer to switch-specific operations | ||
98 | * @lock: lock to serialize operations updates | ||
99 | * @nextdev: Array of per-port pointers to the next attached device | ||
100 | */ | ||
101 | struct rio_switch { | ||
102 | struct list_head node; | ||
103 | u8 *route_table; | ||
104 | u32 port_ok; | ||
105 | struct rio_switch_ops *ops; | ||
106 | spinlock_t lock; | ||
107 | struct rio_dev *nextdev[0]; | ||
108 | }; | ||
109 | |||
110 | /** | ||
111 | * struct rio_switch_ops - Per-switch operations | ||
112 | * @owner: The module owner of this structure | ||
97 | * @add_entry: Callback for switch-specific route add function | 113 | * @add_entry: Callback for switch-specific route add function |
98 | * @get_entry: Callback for switch-specific route get function | 114 | * @get_entry: Callback for switch-specific route get function |
99 | * @clr_table: Callback for switch-specific clear route table function | 115 | * @clr_table: Callback for switch-specific clear route table function |
@@ -101,14 +117,12 @@ union rio_pw_msg; | |||
101 | * @get_domain: Callback for switch-specific domain get function | 117 | * @get_domain: Callback for switch-specific domain get function |
102 | * @em_init: Callback for switch-specific error management init function | 118 | * @em_init: Callback for switch-specific error management init function |
103 | * @em_handle: Callback for switch-specific error management handler function | 119 | * @em_handle: Callback for switch-specific error management handler function |
104 | * @sw_sysfs: Callback that initializes switch-specific sysfs attributes | 120 | * |
105 | * @nextdev: Array of per-port pointers to the next attached device | 121 | * Defines the operations that are necessary to initialize/control |
122 | * a particular RIO switch device. | ||
106 | */ | 123 | */ |
107 | struct rio_switch { | 124 | struct rio_switch_ops { |
108 | struct list_head node; | 125 | struct module *owner; |
109 | u16 switchid; | ||
110 | u8 *route_table; | ||
111 | u32 port_ok; | ||
112 | int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, | 126 | int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, |
113 | u16 table, u16 route_destid, u8 route_port); | 127 | u16 table, u16 route_destid, u8 route_port); |
114 | int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, | 128 | int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, |
@@ -121,8 +135,6 @@ struct rio_switch { | |||
121 | u8 *sw_domain); | 135 | u8 *sw_domain); |
122 | int (*em_init) (struct rio_dev *dev); | 136 | int (*em_init) (struct rio_dev *dev); |
123 | int (*em_handle) (struct rio_dev *dev, u8 swport); | 137 | int (*em_handle) (struct rio_dev *dev, u8 swport); |
124 | int (*sw_sysfs) (struct rio_dev *dev, int create); | ||
125 | struct rio_dev *nextdev[0]; | ||
126 | }; | 138 | }; |
127 | 139 | ||
128 | /** | 140 | /** |
@@ -130,6 +142,7 @@ struct rio_switch { | |||
130 | * @global_list: Node in list of all RIO devices | 142 | * @global_list: Node in list of all RIO devices |
131 | * @net_list: Node in list of RIO devices in a network | 143 | * @net_list: Node in list of RIO devices in a network |
132 | * @net: Network this device is a part of | 144 | * @net: Network this device is a part of |
145 | * @do_enum: Enumeration flag | ||
133 | * @did: Device ID | 146 | * @did: Device ID |
134 | * @vid: Vendor ID | 147 | * @vid: Vendor ID |
135 | * @device_rev: Device revision | 148 | * @device_rev: Device revision |
@@ -158,6 +171,7 @@ struct rio_dev { | |||
158 | struct list_head global_list; /* node in list of all RIO devices */ | 171 | struct list_head global_list; /* node in list of all RIO devices */ |
159 | struct list_head net_list; /* node in per net list */ | 172 | struct list_head net_list; /* node in per net list */ |
160 | struct rio_net *net; /* RIO net this device resides in */ | 173 | struct rio_net *net; /* RIO net this device resides in */ |
174 | bool do_enum; | ||
161 | u16 did; | 175 | u16 did; |
162 | u16 vid; | 176 | u16 vid; |
163 | u32 device_rev; | 177 | u32 device_rev; |
@@ -297,10 +311,6 @@ struct rio_net { | |||
297 | struct rio_id_table destid_table; /* destID allocation table */ | 311 | struct rio_id_table destid_table; /* destID allocation table */ |
298 | }; | 312 | }; |
299 | 313 | ||
300 | /* Definitions used by switch sysfs initialization callback */ | ||
301 | #define RIO_SW_SYSFS_CREATE 1 /* Create switch attributes */ | ||
302 | #define RIO_SW_SYSFS_REMOVE 0 /* Remove switch attributes */ | ||
303 | |||
304 | /* Low-level architecture-dependent routines */ | 314 | /* Low-level architecture-dependent routines */ |
305 | 315 | ||
306 | /** | 316 | /** |
@@ -385,35 +395,6 @@ struct rio_driver { | |||
385 | 395 | ||
386 | #define to_rio_driver(drv) container_of(drv,struct rio_driver, driver) | 396 | #define to_rio_driver(drv) container_of(drv,struct rio_driver, driver) |
387 | 397 | ||
388 | /** | ||
389 | * struct rio_device_id - RIO device identifier | ||
390 | * @did: RIO device ID | ||
391 | * @vid: RIO vendor ID | ||
392 | * @asm_did: RIO assembly device ID | ||
393 | * @asm_vid: RIO assembly vendor ID | ||
394 | * | ||
395 | * Identifies a RIO device based on both the device/vendor IDs and | ||
396 | * the assembly device/vendor IDs. | ||
397 | */ | ||
398 | struct rio_device_id { | ||
399 | u16 did, vid; | ||
400 | u16 asm_did, asm_vid; | ||
401 | }; | ||
402 | |||
403 | /** | ||
404 | * struct rio_switch_ops - Per-switch operations | ||
405 | * @vid: RIO vendor ID | ||
406 | * @did: RIO device ID | ||
407 | * @init_hook: Callback that performs switch device initialization | ||
408 | * | ||
409 | * Defines the operations that are necessary to initialize/control | ||
410 | * a particular RIO switch device. | ||
411 | */ | ||
412 | struct rio_switch_ops { | ||
413 | u16 vid, did; | ||
414 | int (*init_hook) (struct rio_dev *rdev, int do_enum); | ||
415 | }; | ||
416 | |||
417 | union rio_pw_msg { | 398 | union rio_pw_msg { |
418 | struct { | 399 | struct { |
419 | u32 comptag; /* Component Tag CSR */ | 400 | u32 comptag; /* Component Tag CSR */ |
@@ -468,14 +449,29 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) | |||
468 | 449 | ||
469 | /** | 450 | /** |
470 | * struct rio_scan - RIO enumeration and discovery operations | 451 | * struct rio_scan - RIO enumeration and discovery operations |
452 | * @owner: The module owner of this structure | ||
471 | * @enumerate: Callback to perform RapidIO fabric enumeration. | 453 | * @enumerate: Callback to perform RapidIO fabric enumeration. |
472 | * @discover: Callback to perform RapidIO fabric discovery. | 454 | * @discover: Callback to perform RapidIO fabric discovery. |
473 | */ | 455 | */ |
474 | struct rio_scan { | 456 | struct rio_scan { |
457 | struct module *owner; | ||
475 | int (*enumerate)(struct rio_mport *mport, u32 flags); | 458 | int (*enumerate)(struct rio_mport *mport, u32 flags); |
476 | int (*discover)(struct rio_mport *mport, u32 flags); | 459 | int (*discover)(struct rio_mport *mport, u32 flags); |
477 | }; | 460 | }; |
478 | 461 | ||
462 | /** | ||
463 | * struct rio_scan_node - list node to register RapidIO enumeration and | ||
464 | * discovery methods with RapidIO core. | ||
465 | * @mport_id: ID of an mport (net) serviced by this enumerator | ||
466 | * @node: node in global list of registered enumerators | ||
467 | * @ops: RIO enumeration and discovery operations | ||
468 | */ | ||
469 | struct rio_scan_node { | ||
470 | int mport_id; | ||
471 | struct list_head node; | ||
472 | struct rio_scan *ops; | ||
473 | }; | ||
474 | |||
479 | /* Architecture and hardware-specific functions */ | 475 | /* Architecture and hardware-specific functions */ |
480 | extern int rio_register_mport(struct rio_mport *); | 476 | extern int rio_register_mport(struct rio_mport *); |
481 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); | 477 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); |
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index b66d13d1bdc0..2543bc163d54 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h | |||
@@ -13,8 +13,6 @@ | |||
13 | #ifndef LINUX_RIO_IDS_H | 13 | #ifndef LINUX_RIO_IDS_H |
14 | #define LINUX_RIO_IDS_H | 14 | #define LINUX_RIO_IDS_H |
15 | 15 | ||
16 | #define RIO_ANY_ID 0xffff | ||
17 | |||
18 | #define RIO_VID_FREESCALE 0x0002 | 16 | #define RIO_VID_FREESCALE 0x0002 |
19 | #define RIO_DID_MPC8560 0x0003 | 17 | #define RIO_DID_MPC8560 0x0003 |
20 | 18 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index ec80684a0127..cdd5407b37e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1950,8 +1950,6 @@ extern struct task_struct *find_task_by_vpid(pid_t nr); | |||
1950 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | 1950 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
1951 | struct pid_namespace *ns); | 1951 | struct pid_namespace *ns); |
1952 | 1952 | ||
1953 | extern void __set_special_pids(struct pid *pid); | ||
1954 | |||
1955 | /* per-UID process charging. */ | 1953 | /* per-UID process charging. */ |
1956 | extern struct user_struct * alloc_uid(kuid_t); | 1954 | extern struct user_struct * alloc_uid(kuid_t); |
1957 | static inline struct user_struct *get_uid(struct user_struct *u) | 1955 | static inline struct user_struct *get_uid(struct user_struct *u) |
diff --git a/include/linux/smp.h b/include/linux/smp.h index c8488763277f..c181399f2c20 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/irqflags.h> | ||
15 | 14 | ||
16 | extern void cpu_idle(void); | 15 | extern void cpu_idle(void); |
17 | 16 | ||
@@ -140,17 +139,14 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) | |||
140 | } | 139 | } |
141 | #define smp_call_function(func, info, wait) \ | 140 | #define smp_call_function(func, info, wait) \ |
142 | (up_smp_call_function(func, info)) | 141 | (up_smp_call_function(func, info)) |
143 | 142 | #define on_each_cpu(func, info, wait) \ | |
144 | static inline int on_each_cpu(smp_call_func_t func, void *info, int wait) | 143 | ({ \ |
145 | { | 144 | unsigned long __flags; \ |
146 | unsigned long flags; | 145 | local_irq_save(__flags); \ |
147 | 146 | func(info); \ | |
148 | local_irq_save(flags); | 147 | local_irq_restore(__flags); \ |
149 | func(info); | 148 | 0; \ |
150 | local_irq_restore(flags); | 149 | }) |
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* | 150 | /* |
155 | * Note we still need to test the mask even for UP | 151 | * Note we still need to test the mask even for UP |
156 | * because we actually can get an empty mask from | 152 | * because we actually can get an empty mask from |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 1701ce4be746..d95cde5e257d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/node.h> | 10 | #include <linux/node.h> |
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/atomic.h> | 12 | #include <linux/atomic.h> |
13 | #include <linux/page-flags.h> | ||
13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
14 | 15 | ||
15 | struct notifier_block; | 16 | struct notifier_block; |
@@ -19,10 +20,13 @@ struct bio; | |||
19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | 20 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | 21 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
21 | #define SWAP_FLAG_PRIO_SHIFT 0 | 22 | #define SWAP_FLAG_PRIO_SHIFT 0 |
22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | 23 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
24 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ | ||
25 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ | ||
23 | 26 | ||
24 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ | 27 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
25 | SWAP_FLAG_DISCARD) | 28 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
29 | SWAP_FLAG_DISCARD_PAGES) | ||
26 | 30 | ||
27 | static inline int current_is_kswapd(void) | 31 | static inline int current_is_kswapd(void) |
28 | { | 32 | { |
@@ -146,14 +150,16 @@ struct swap_extent { | |||
146 | enum { | 150 | enum { |
147 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | 151 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
148 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | 152 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
149 | SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ | 153 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
150 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | 154 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
151 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 155 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
152 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | 156 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
153 | SWP_BLKDEV = (1 << 6), /* its a block device */ | 157 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
154 | SWP_FILE = (1 << 7), /* set after swap_activate success */ | 158 | SWP_FILE = (1 << 7), /* set after swap_activate success */ |
159 | SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ | ||
160 | SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ | ||
155 | /* add others here before... */ | 161 | /* add others here before... */ |
156 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ | 162 | SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */ |
157 | }; | 163 | }; |
158 | 164 | ||
159 | #define SWAP_CLUSTER_MAX 32UL | 165 | #define SWAP_CLUSTER_MAX 32UL |
@@ -233,8 +239,8 @@ extern unsigned long nr_free_pagecache_pages(void); | |||
233 | 239 | ||
234 | 240 | ||
235 | /* linux/mm/swap.c */ | 241 | /* linux/mm/swap.c */ |
236 | extern void __lru_cache_add(struct page *, enum lru_list lru); | 242 | extern void __lru_cache_add(struct page *); |
237 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | 243 | extern void lru_cache_add(struct page *); |
238 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, | 244 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, |
239 | struct lruvec *lruvec, struct list_head *head); | 245 | struct lruvec *lruvec, struct list_head *head); |
240 | extern void activate_page(struct page *); | 246 | extern void activate_page(struct page *); |
@@ -254,12 +260,14 @@ extern void add_page_to_unevictable_list(struct page *page); | |||
254 | */ | 260 | */ |
255 | static inline void lru_cache_add_anon(struct page *page) | 261 | static inline void lru_cache_add_anon(struct page *page) |
256 | { | 262 | { |
257 | __lru_cache_add(page, LRU_INACTIVE_ANON); | 263 | ClearPageActive(page); |
264 | __lru_cache_add(page); | ||
258 | } | 265 | } |
259 | 266 | ||
260 | static inline void lru_cache_add_file(struct page *page) | 267 | static inline void lru_cache_add_file(struct page *page) |
261 | { | 268 | { |
262 | __lru_cache_add(page, LRU_INACTIVE_FILE); | 269 | ClearPageActive(page); |
270 | __lru_cache_add(page); | ||
263 | } | 271 | } |
264 | 272 | ||
265 | /* linux/mm/vmscan.c */ | 273 | /* linux/mm/vmscan.c */ |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d5773a99f20..dd0a2c810529 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -82,6 +82,10 @@ extern void *vmap(struct page **pages, unsigned int count, | |||
82 | unsigned long flags, pgprot_t prot); | 82 | unsigned long flags, pgprot_t prot); |
83 | extern void vunmap(const void *addr); | 83 | extern void vunmap(const void *addr); |
84 | 84 | ||
85 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, | ||
86 | unsigned long uaddr, void *kaddr, | ||
87 | unsigned long size); | ||
88 | |||
85 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 89 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
86 | unsigned long pgoff); | 90 | unsigned long pgoff); |
87 | void vmalloc_sync_all(void); | 91 | void vmalloc_sync_all(void); |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a9f4119c7e2e..a0ed78ab54d7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -445,11 +445,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
445 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) | 445 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) |
446 | 446 | ||
447 | #define create_workqueue(name) \ | 447 | #define create_workqueue(name) \ |
448 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 448 | alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) |
449 | #define create_freezable_workqueue(name) \ | 449 | #define create_freezable_workqueue(name) \ |
450 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 450 | alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ |
451 | 1, (name)) | ||
451 | #define create_singlethread_workqueue(name) \ | 452 | #define create_singlethread_workqueue(name) \ |
452 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 453 | alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) |
453 | 454 | ||
454 | extern void destroy_workqueue(struct workqueue_struct *wq); | 455 | extern void destroy_workqueue(struct workqueue_struct *wq); |
455 | 456 | ||
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h new file mode 100644 index 000000000000..1c9fabde69e4 --- /dev/null +++ b/include/trace/events/pagemap.h | |||
@@ -0,0 +1,89 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM pagemap | ||
3 | |||
4 | #if !defined(_TRACE_PAGEMAP_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_PAGEMAP_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | #include <linux/mm.h> | ||
9 | |||
10 | #define PAGEMAP_MAPPED 0x0001u | ||
11 | #define PAGEMAP_ANONYMOUS 0x0002u | ||
12 | #define PAGEMAP_FILE 0x0004u | ||
13 | #define PAGEMAP_SWAPCACHE 0x0008u | ||
14 | #define PAGEMAP_SWAPBACKED 0x0010u | ||
15 | #define PAGEMAP_MAPPEDDISK 0x0020u | ||
16 | #define PAGEMAP_BUFFERS 0x0040u | ||
17 | |||
18 | #define trace_pagemap_flags(page) ( \ | ||
19 | (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ | ||
20 | (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \ | ||
21 | (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \ | ||
22 | (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \ | ||
23 | (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \ | ||
24 | (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \ | ||
25 | ) | ||
26 | |||
27 | TRACE_EVENT(mm_lru_insertion, | ||
28 | |||
29 | TP_PROTO( | ||
30 | struct page *page, | ||
31 | unsigned long pfn, | ||
32 | int lru, | ||
33 | unsigned long flags | ||
34 | ), | ||
35 | |||
36 | TP_ARGS(page, pfn, lru, flags), | ||
37 | |||
38 | TP_STRUCT__entry( | ||
39 | __field(struct page *, page ) | ||
40 | __field(unsigned long, pfn ) | ||
41 | __field(int, lru ) | ||
42 | __field(unsigned long, flags ) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __entry->page = page; | ||
47 | __entry->pfn = pfn; | ||
48 | __entry->lru = lru; | ||
49 | __entry->flags = flags; | ||
50 | ), | ||
51 | |||
52 | /* Flag format is based on page-types.c formatting for pagemap */ | ||
53 | TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s", | ||
54 | __entry->page, | ||
55 | __entry->pfn, | ||
56 | __entry->lru, | ||
57 | __entry->flags & PAGEMAP_MAPPED ? "M" : " ", | ||
58 | __entry->flags & PAGEMAP_ANONYMOUS ? "a" : "f", | ||
59 | __entry->flags & PAGEMAP_SWAPCACHE ? "s" : " ", | ||
60 | __entry->flags & PAGEMAP_SWAPBACKED ? "b" : " ", | ||
61 | __entry->flags & PAGEMAP_MAPPEDDISK ? "d" : " ", | ||
62 | __entry->flags & PAGEMAP_BUFFERS ? "B" : " ") | ||
63 | ); | ||
64 | |||
65 | TRACE_EVENT(mm_lru_activate, | ||
66 | |||
67 | TP_PROTO(struct page *page, unsigned long pfn), | ||
68 | |||
69 | TP_ARGS(page, pfn), | ||
70 | |||
71 | TP_STRUCT__entry( | ||
72 | __field(struct page *, page ) | ||
73 | __field(unsigned long, pfn ) | ||
74 | ), | ||
75 | |||
76 | TP_fast_assign( | ||
77 | __entry->page = page; | ||
78 | __entry->pfn = pfn; | ||
79 | ), | ||
80 | |||
81 | /* Flag format is based on page-types.c formatting for pagemap */ | ||
82 | TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn) | ||
83 | |||
84 | ); | ||
85 | |||
86 | #endif /* _TRACE_PAGEMAP_H */ | ||
87 | |||
88 | /* This part must be outside protection */ | ||
89 | #include <trace/define_trace.h> | ||
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index 52ebcc89f306..cf1019e15f5b 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h | |||
@@ -61,6 +61,9 @@ struct ptrace_peeksiginfo_args { | |||
61 | __s32 nr; /* how may siginfos to take */ | 61 | __s32 nr; /* how may siginfos to take */ |
62 | }; | 62 | }; |
63 | 63 | ||
64 | #define PTRACE_GETSIGMASK 0x420a | ||
65 | #define PTRACE_SETSIGMASK 0x420b | ||
66 | |||
64 | /* Read signals from a shared (process wide) queue */ | 67 | /* Read signals from a shared (process wide) queue */ |
65 | #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) | 68 | #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) |
66 | 69 | ||
diff --git a/init/Kconfig b/init/Kconfig index 118895cc1f67..ef10d83bc379 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -899,7 +899,7 @@ config MEMCG | |||
899 | 899 | ||
900 | Note that setting this option increases fixed memory overhead | 900 | Note that setting this option increases fixed memory overhead |
901 | associated with each page of memory in the system. By this, | 901 | associated with each page of memory in the system. By this, |
902 | 20(40)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory | 902 | 8(16)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory |
903 | usage tracking struct at boot. Total amount of this is printed out | 903 | usage tracking struct at boot. Total amount of this is printed out |
904 | at boot. | 904 | at boot. |
905 | 905 | ||
diff --git a/init/do_mounts.c b/init/do_mounts.c index a2b49f2c1bd8..816014c4627e 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c | |||
@@ -536,7 +536,7 @@ void __init prepare_namespace(void) | |||
536 | int is_floppy; | 536 | int is_floppy; |
537 | 537 | ||
538 | if (root_delay) { | 538 | if (root_delay) { |
539 | printk(KERN_INFO "Waiting %dsec before mounting root device...\n", | 539 | printk(KERN_INFO "Waiting %d sec before mounting root device...\n", |
540 | root_delay); | 540 | root_delay); |
541 | ssleep(root_delay); | 541 | ssleep(root_delay); |
542 | } | 542 | } |
diff --git a/init/main.c b/init/main.c index ec549581d732..f2366533c922 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -655,8 +655,6 @@ static void __init do_ctors(void) | |||
655 | bool initcall_debug; | 655 | bool initcall_debug; |
656 | core_param(initcall_debug, initcall_debug, bool, 0644); | 656 | core_param(initcall_debug, initcall_debug, bool, 0644); |
657 | 657 | ||
658 | static char msgbuf[64]; | ||
659 | |||
660 | static int __init_or_module do_one_initcall_debug(initcall_t fn) | 658 | static int __init_or_module do_one_initcall_debug(initcall_t fn) |
661 | { | 659 | { |
662 | ktime_t calltime, delta, rettime; | 660 | ktime_t calltime, delta, rettime; |
@@ -679,6 +677,7 @@ int __init_or_module do_one_initcall(initcall_t fn) | |||
679 | { | 677 | { |
680 | int count = preempt_count(); | 678 | int count = preempt_count(); |
681 | int ret; | 679 | int ret; |
680 | char msgbuf[64]; | ||
682 | 681 | ||
683 | if (initcall_debug) | 682 | if (initcall_debug) |
684 | ret = do_one_initcall_debug(fn); | 683 | ret = do_one_initcall_debug(fn); |
diff --git a/kernel/exit.c b/kernel/exit.c index 6a057750ebbb..fafe75d9e6f6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -312,17 +312,6 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) | |||
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | void __set_special_pids(struct pid *pid) | ||
316 | { | ||
317 | struct task_struct *curr = current->group_leader; | ||
318 | |||
319 | if (task_session(curr) != pid) | ||
320 | change_pid(curr, PIDTYPE_SID, pid); | ||
321 | |||
322 | if (task_pgrp(curr) != pid) | ||
323 | change_pid(curr, PIDTYPE_PGID, pid); | ||
324 | } | ||
325 | |||
326 | /* | 315 | /* |
327 | * Let kernel threads use this to say that they allow a certain signal. | 316 | * Let kernel threads use this to say that they allow a certain signal. |
328 | * Must not be used if kthread was cloned with CLONE_SIGHAND. | 317 | * Must not be used if kthread was cloned with CLONE_SIGHAND. |
diff --git a/kernel/fork.c b/kernel/fork.c index 987b28a1f01b..6e6a1c11b3e5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1121,6 +1121,12 @@ static void posix_cpu_timers_init(struct task_struct *tsk) | |||
1121 | INIT_LIST_HEAD(&tsk->cpu_timers[2]); | 1121 | INIT_LIST_HEAD(&tsk->cpu_timers[2]); |
1122 | } | 1122 | } |
1123 | 1123 | ||
1124 | static inline void | ||
1125 | init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) | ||
1126 | { | ||
1127 | task->pids[type].pid = pid; | ||
1128 | } | ||
1129 | |||
1124 | /* | 1130 | /* |
1125 | * This creates a new process as a copy of the old one, | 1131 | * This creates a new process as a copy of the old one, |
1126 | * but does not actually start it yet. | 1132 | * but does not actually start it yet. |
@@ -1199,8 +1205,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1199 | retval = -EAGAIN; | 1205 | retval = -EAGAIN; |
1200 | if (atomic_read(&p->real_cred->user->processes) >= | 1206 | if (atomic_read(&p->real_cred->user->processes) >= |
1201 | task_rlimit(p, RLIMIT_NPROC)) { | 1207 | task_rlimit(p, RLIMIT_NPROC)) { |
1202 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 1208 | if (p->real_cred->user != INIT_USER && |
1203 | p->real_cred->user != INIT_USER) | 1209 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) |
1204 | goto bad_fork_free; | 1210 | goto bad_fork_free; |
1205 | } | 1211 | } |
1206 | current->flags &= ~PF_NPROC_EXCEEDED; | 1212 | current->flags &= ~PF_NPROC_EXCEEDED; |
@@ -1354,11 +1360,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1354 | goto bad_fork_cleanup_io; | 1360 | goto bad_fork_cleanup_io; |
1355 | } | 1361 | } |
1356 | 1362 | ||
1357 | p->pid = pid_nr(pid); | ||
1358 | p->tgid = p->pid; | ||
1359 | if (clone_flags & CLONE_THREAD) | ||
1360 | p->tgid = current->tgid; | ||
1361 | |||
1362 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1363 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
1363 | /* | 1364 | /* |
1364 | * Clear TID on mm_release()? | 1365 | * Clear TID on mm_release()? |
@@ -1394,12 +1395,19 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1394 | clear_all_latency_tracing(p); | 1395 | clear_all_latency_tracing(p); |
1395 | 1396 | ||
1396 | /* ok, now we should be set up.. */ | 1397 | /* ok, now we should be set up.. */ |
1397 | if (clone_flags & CLONE_THREAD) | 1398 | p->pid = pid_nr(pid); |
1399 | if (clone_flags & CLONE_THREAD) { | ||
1398 | p->exit_signal = -1; | 1400 | p->exit_signal = -1; |
1399 | else if (clone_flags & CLONE_PARENT) | 1401 | p->group_leader = current->group_leader; |
1400 | p->exit_signal = current->group_leader->exit_signal; | 1402 | p->tgid = current->tgid; |
1401 | else | 1403 | } else { |
1402 | p->exit_signal = (clone_flags & CSIGNAL); | 1404 | if (clone_flags & CLONE_PARENT) |
1405 | p->exit_signal = current->group_leader->exit_signal; | ||
1406 | else | ||
1407 | p->exit_signal = (clone_flags & CSIGNAL); | ||
1408 | p->group_leader = p; | ||
1409 | p->tgid = p->pid; | ||
1410 | } | ||
1403 | 1411 | ||
1404 | p->pdeath_signal = 0; | 1412 | p->pdeath_signal = 0; |
1405 | p->exit_state = 0; | 1413 | p->exit_state = 0; |
@@ -1408,15 +1416,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1408 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); | 1416 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); |
1409 | p->dirty_paused_when = 0; | 1417 | p->dirty_paused_when = 0; |
1410 | 1418 | ||
1411 | /* | ||
1412 | * Ok, make it visible to the rest of the system. | ||
1413 | * We dont wake it up yet. | ||
1414 | */ | ||
1415 | p->group_leader = p; | ||
1416 | INIT_LIST_HEAD(&p->thread_group); | 1419 | INIT_LIST_HEAD(&p->thread_group); |
1417 | p->task_works = NULL; | 1420 | p->task_works = NULL; |
1418 | 1421 | ||
1419 | /* Need tasklist lock for parent etc handling! */ | 1422 | /* |
1423 | * Make it visible to the rest of the system, but dont wake it up yet. | ||
1424 | * Need tasklist lock for parent etc handling! | ||
1425 | */ | ||
1420 | write_lock_irq(&tasklist_lock); | 1426 | write_lock_irq(&tasklist_lock); |
1421 | 1427 | ||
1422 | /* CLONE_PARENT re-uses the old parent */ | 1428 | /* CLONE_PARENT re-uses the old parent */ |
@@ -1446,18 +1452,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1446 | goto bad_fork_free_pid; | 1452 | goto bad_fork_free_pid; |
1447 | } | 1453 | } |
1448 | 1454 | ||
1449 | if (clone_flags & CLONE_THREAD) { | ||
1450 | current->signal->nr_threads++; | ||
1451 | atomic_inc(¤t->signal->live); | ||
1452 | atomic_inc(¤t->signal->sigcnt); | ||
1453 | p->group_leader = current->group_leader; | ||
1454 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | ||
1455 | } | ||
1456 | |||
1457 | if (likely(p->pid)) { | 1455 | if (likely(p->pid)) { |
1458 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); | 1456 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
1459 | 1457 | ||
1458 | init_task_pid(p, PIDTYPE_PID, pid); | ||
1460 | if (thread_group_leader(p)) { | 1459 | if (thread_group_leader(p)) { |
1460 | init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); | ||
1461 | init_task_pid(p, PIDTYPE_SID, task_session(current)); | ||
1462 | |||
1461 | if (is_child_reaper(pid)) { | 1463 | if (is_child_reaper(pid)) { |
1462 | ns_of_pid(pid)->child_reaper = p; | 1464 | ns_of_pid(pid)->child_reaper = p; |
1463 | p->signal->flags |= SIGNAL_UNKILLABLE; | 1465 | p->signal->flags |= SIGNAL_UNKILLABLE; |
@@ -1465,13 +1467,19 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1465 | 1467 | ||
1466 | p->signal->leader_pid = pid; | 1468 | p->signal->leader_pid = pid; |
1467 | p->signal->tty = tty_kref_get(current->signal->tty); | 1469 | p->signal->tty = tty_kref_get(current->signal->tty); |
1468 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | ||
1469 | attach_pid(p, PIDTYPE_SID, task_session(current)); | ||
1470 | list_add_tail(&p->sibling, &p->real_parent->children); | 1470 | list_add_tail(&p->sibling, &p->real_parent->children); |
1471 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1471 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
1472 | attach_pid(p, PIDTYPE_PGID); | ||
1473 | attach_pid(p, PIDTYPE_SID); | ||
1472 | __this_cpu_inc(process_counts); | 1474 | __this_cpu_inc(process_counts); |
1475 | } else { | ||
1476 | current->signal->nr_threads++; | ||
1477 | atomic_inc(¤t->signal->live); | ||
1478 | atomic_inc(¤t->signal->sigcnt); | ||
1479 | list_add_tail_rcu(&p->thread_group, | ||
1480 | &p->group_leader->thread_group); | ||
1473 | } | 1481 | } |
1474 | attach_pid(p, PIDTYPE_PID, pid); | 1482 | attach_pid(p, PIDTYPE_PID); |
1475 | nr_threads++; | 1483 | nr_threads++; |
1476 | } | 1484 | } |
1477 | 1485 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 8241906c4b61..fb326365b694 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -147,6 +147,9 @@ int __request_module(bool wait, const char *fmt, ...) | |||
147 | */ | 147 | */ |
148 | WARN_ON_ONCE(wait && current_is_async()); | 148 | WARN_ON_ONCE(wait && current_is_async()); |
149 | 149 | ||
150 | if (!modprobe_path[0]) | ||
151 | return 0; | ||
152 | |||
150 | va_start(args, fmt); | 153 | va_start(args, fmt); |
151 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); | 154 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); |
152 | va_end(args); | 155 | va_end(args); |
@@ -569,14 +572,6 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) | |||
569 | int retval = 0; | 572 | int retval = 0; |
570 | 573 | ||
571 | helper_lock(); | 574 | helper_lock(); |
572 | if (!sub_info->path) { | ||
573 | retval = -EINVAL; | ||
574 | goto out; | ||
575 | } | ||
576 | |||
577 | if (sub_info->path[0] == '\0') | ||
578 | goto out; | ||
579 | |||
580 | if (!khelper_wq || usermodehelper_disabled) { | 575 | if (!khelper_wq || usermodehelper_disabled) { |
581 | retval = -EBUSY; | 576 | retval = -EBUSY; |
582 | goto out; | 577 | goto out; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index bddf3b201a48..6e33498d665c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -2332,6 +2332,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
2332 | if (copy_from_user(buf, user_buf, buf_size)) | 2332 | if (copy_from_user(buf, user_buf, buf_size)) |
2333 | return -EFAULT; | 2333 | return -EFAULT; |
2334 | 2334 | ||
2335 | buf[buf_size] = '\0'; | ||
2335 | switch (buf[0]) { | 2336 | switch (buf[0]) { |
2336 | case 'y': | 2337 | case 'y': |
2337 | case 'Y': | 2338 | case 'Y': |
@@ -2343,6 +2344,8 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
2343 | case '0': | 2344 | case '0': |
2344 | disarm_all_kprobes(); | 2345 | disarm_all_kprobes(); |
2345 | break; | 2346 | break; |
2347 | default: | ||
2348 | return -EINVAL; | ||
2346 | } | 2349 | } |
2347 | 2350 | ||
2348 | return count; | 2351 | return count; |
diff --git a/kernel/pid.c b/kernel/pid.c index 0db3e791a06d..66505c1dfc51 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -75,6 +75,7 @@ struct pid_namespace init_pid_ns = { | |||
75 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } | 75 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } |
76 | }, | 76 | }, |
77 | .last_pid = 0, | 77 | .last_pid = 0, |
78 | .nr_hashed = PIDNS_HASH_ADDING, | ||
78 | .level = 0, | 79 | .level = 0, |
79 | .child_reaper = &init_task, | 80 | .child_reaper = &init_task, |
80 | .user_ns = &init_user_ns, | 81 | .user_ns = &init_user_ns, |
@@ -373,14 +374,10 @@ EXPORT_SYMBOL_GPL(find_vpid); | |||
373 | /* | 374 | /* |
374 | * attach_pid() must be called with the tasklist_lock write-held. | 375 | * attach_pid() must be called with the tasklist_lock write-held. |
375 | */ | 376 | */ |
376 | void attach_pid(struct task_struct *task, enum pid_type type, | 377 | void attach_pid(struct task_struct *task, enum pid_type type) |
377 | struct pid *pid) | ||
378 | { | 378 | { |
379 | struct pid_link *link; | 379 | struct pid_link *link = &task->pids[type]; |
380 | 380 | hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); | |
381 | link = &task->pids[type]; | ||
382 | link->pid = pid; | ||
383 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); | ||
384 | } | 381 | } |
385 | 382 | ||
386 | static void __change_pid(struct task_struct *task, enum pid_type type, | 383 | static void __change_pid(struct task_struct *task, enum pid_type type, |
@@ -412,7 +409,7 @@ void change_pid(struct task_struct *task, enum pid_type type, | |||
412 | struct pid *pid) | 409 | struct pid *pid) |
413 | { | 410 | { |
414 | __change_pid(task, type, pid); | 411 | __change_pid(task, type, pid); |
415 | attach_pid(task, type, pid); | 412 | attach_pid(task, type); |
416 | } | 413 | } |
417 | 414 | ||
418 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ | 415 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
@@ -594,7 +591,6 @@ void __init pidmap_init(void) | |||
594 | /* Reserve PID 0. We never call free_pidmap(0) */ | 591 | /* Reserve PID 0. We never call free_pidmap(0) */ |
595 | set_bit(0, init_pid_ns.pidmap[0].page); | 592 | set_bit(0, init_pid_ns.pidmap[0].page); |
596 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); | 593 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); |
597 | init_pid_ns.nr_hashed = PIDNS_HASH_ADDING; | ||
598 | 594 | ||
599 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, | 595 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, |
600 | SLAB_HWCACHE_ALIGN | SLAB_PANIC); | 596 | SLAB_HWCACHE_ALIGN | SLAB_PANIC); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 7872a35eafe7..349587bb03e1 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1652,7 +1652,7 @@ unsigned long snapshot_get_image_size(void) | |||
1652 | static int init_header(struct swsusp_info *info) | 1652 | static int init_header(struct swsusp_info *info) |
1653 | { | 1653 | { |
1654 | memset(info, 0, sizeof(struct swsusp_info)); | 1654 | memset(info, 0, sizeof(struct swsusp_info)); |
1655 | info->num_physpages = num_physpages; | 1655 | info->num_physpages = get_num_physpages(); |
1656 | info->image_pages = nr_copy_pages; | 1656 | info->image_pages = nr_copy_pages; |
1657 | info->pages = snapshot_get_image_size(); | 1657 | info->pages = snapshot_get_image_size(); |
1658 | info->size = info->pages; | 1658 | info->size = info->pages; |
@@ -1796,7 +1796,7 @@ static int check_header(struct swsusp_info *info) | |||
1796 | char *reason; | 1796 | char *reason; |
1797 | 1797 | ||
1798 | reason = check_image_kernel(info); | 1798 | reason = check_image_kernel(info); |
1799 | if (!reason && info->num_physpages != num_physpages) | 1799 | if (!reason && info->num_physpages != get_num_physpages()) |
1800 | reason = "memory size"; | 1800 | reason = "memory size"; |
1801 | if (reason) { | 1801 | if (reason) { |
1802 | printk(KERN_ERR "PM: Image mismatch: %s\n", reason); | 1802 | printk(KERN_ERR "PM: Image mismatch: %s\n", reason); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 335a7ae697f5..ba5e6cea181a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -844,6 +844,47 @@ int ptrace_request(struct task_struct *child, long request, | |||
844 | ret = ptrace_setsiginfo(child, &siginfo); | 844 | ret = ptrace_setsiginfo(child, &siginfo); |
845 | break; | 845 | break; |
846 | 846 | ||
847 | case PTRACE_GETSIGMASK: | ||
848 | if (addr != sizeof(sigset_t)) { | ||
849 | ret = -EINVAL; | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) | ||
854 | ret = -EFAULT; | ||
855 | else | ||
856 | ret = 0; | ||
857 | |||
858 | break; | ||
859 | |||
860 | case PTRACE_SETSIGMASK: { | ||
861 | sigset_t new_set; | ||
862 | |||
863 | if (addr != sizeof(sigset_t)) { | ||
864 | ret = -EINVAL; | ||
865 | break; | ||
866 | } | ||
867 | |||
868 | if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { | ||
869 | ret = -EFAULT; | ||
870 | break; | ||
871 | } | ||
872 | |||
873 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
874 | |||
875 | /* | ||
876 | * Every thread does recalc_sigpending() after resume, so | ||
877 | * retarget_shared_pending() and recalc_sigpending() are not | ||
878 | * called here. | ||
879 | */ | ||
880 | spin_lock_irq(&child->sighand->siglock); | ||
881 | child->blocked = new_set; | ||
882 | spin_unlock_irq(&child->sighand->siglock); | ||
883 | |||
884 | ret = 0; | ||
885 | break; | ||
886 | } | ||
887 | |||
847 | case PTRACE_INTERRUPT: | 888 | case PTRACE_INTERRUPT: |
848 | /* | 889 | /* |
849 | * Stop tracee without any side-effect on signal or job | 890 | * Stop tracee without any side-effect on signal or job |
@@ -948,8 +989,7 @@ int ptrace_request(struct task_struct *child, long request, | |||
948 | 989 | ||
949 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK | 990 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
950 | case PTRACE_GETREGSET: | 991 | case PTRACE_GETREGSET: |
951 | case PTRACE_SETREGSET: | 992 | case PTRACE_SETREGSET: { |
952 | { | ||
953 | struct iovec kiov; | 993 | struct iovec kiov; |
954 | struct iovec __user *uiov = datavp; | 994 | struct iovec __user *uiov = datavp; |
955 | 995 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index cf3adc6fe001..e08abb9461ac 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -3026,7 +3026,7 @@ static int __init rcu_spawn_gp_kthread(void) | |||
3026 | struct task_struct *t; | 3026 | struct task_struct *t; |
3027 | 3027 | ||
3028 | for_each_rcu_flavor(rsp) { | 3028 | for_each_rcu_flavor(rsp) { |
3029 | t = kthread_run(rcu_gp_kthread, rsp, rsp->name); | 3029 | t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); |
3030 | BUG_ON(IS_ERR(t)); | 3030 | BUG_ON(IS_ERR(t)); |
3031 | rnp = rcu_get_root(rsp); | 3031 | rnp = rcu_get_root(rsp); |
3032 | raw_spin_lock_irqsave(&rnp->lock, flags); | 3032 | raw_spin_lock_irqsave(&rnp->lock, flags); |
diff --git a/kernel/resource.c b/kernel/resource.c index 77bf11a86c7d..3f285dce9347 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -449,7 +449,6 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
449 | struct resource *this = root->child; | 449 | struct resource *this = root->child; |
450 | struct resource tmp = *new, avail, alloc; | 450 | struct resource tmp = *new, avail, alloc; |
451 | 451 | ||
452 | tmp.flags = new->flags; | ||
453 | tmp.start = root->start; | 452 | tmp.start = root->start; |
454 | /* | 453 | /* |
455 | * Skip past an allocated resource that starts at 0, since the assignment | 454 | * Skip past an allocated resource that starts at 0, since the assignment |
diff --git a/kernel/sys.c b/kernel/sys.c index 2bbd9a73b54c..071de900c824 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -511,7 +511,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | |||
511 | case LINUX_REBOOT_CMD_HALT: | 511 | case LINUX_REBOOT_CMD_HALT: |
512 | kernel_halt(); | 512 | kernel_halt(); |
513 | do_exit(0); | 513 | do_exit(0); |
514 | panic("cannot halt"); | 514 | panic("cannot halt.\n"); |
515 | 515 | ||
516 | case LINUX_REBOOT_CMD_POWER_OFF: | 516 | case LINUX_REBOOT_CMD_POWER_OFF: |
517 | kernel_power_off(); | 517 | kernel_power_off(); |
@@ -1309,6 +1309,17 @@ out: | |||
1309 | return retval; | 1309 | return retval; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | static void set_special_pids(struct pid *pid) | ||
1313 | { | ||
1314 | struct task_struct *curr = current->group_leader; | ||
1315 | |||
1316 | if (task_session(curr) != pid) | ||
1317 | change_pid(curr, PIDTYPE_SID, pid); | ||
1318 | |||
1319 | if (task_pgrp(curr) != pid) | ||
1320 | change_pid(curr, PIDTYPE_PGID, pid); | ||
1321 | } | ||
1322 | |||
1312 | SYSCALL_DEFINE0(setsid) | 1323 | SYSCALL_DEFINE0(setsid) |
1313 | { | 1324 | { |
1314 | struct task_struct *group_leader = current->group_leader; | 1325 | struct task_struct *group_leader = current->group_leader; |
@@ -1328,7 +1339,7 @@ SYSCALL_DEFINE0(setsid) | |||
1328 | goto out; | 1339 | goto out; |
1329 | 1340 | ||
1330 | group_leader->signal->leader = 1; | 1341 | group_leader->signal->leader = 1; |
1331 | __set_special_pids(sid); | 1342 | set_special_pids(sid); |
1332 | 1343 | ||
1333 | proc_clear_tty(group_leader); | 1344 | proc_clear_tty(group_leader); |
1334 | 1345 | ||
@@ -2355,8 +2366,7 @@ static int do_sysinfo(struct sysinfo *info) | |||
2355 | 2366 | ||
2356 | memset(info, 0, sizeof(struct sysinfo)); | 2367 | memset(info, 0, sizeof(struct sysinfo)); |
2357 | 2368 | ||
2358 | ktime_get_ts(&tp); | 2369 | get_monotonic_boottime(&tp); |
2359 | monotonic_to_bootbased(&tp); | ||
2360 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | 2370 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
2361 | 2371 | ||
2362 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); | 2372 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); |
diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 53bad099ebd6..c03154173cc7 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c | |||
@@ -6,15 +6,58 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/export.h> | 7 | #include <linux/export.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/smp.h> | ||
10 | #include <linux/atomic.h> | ||
11 | |||
12 | static void __dump_stack(void) | ||
13 | { | ||
14 | dump_stack_print_info(KERN_DEFAULT); | ||
15 | show_stack(NULL, NULL); | ||
16 | } | ||
9 | 17 | ||
10 | /** | 18 | /** |
11 | * dump_stack - dump the current task information and its stack trace | 19 | * dump_stack - dump the current task information and its stack trace |
12 | * | 20 | * |
13 | * Architectures can override this implementation by implementing its own. | 21 | * Architectures can override this implementation by implementing its own. |
14 | */ | 22 | */ |
23 | #ifdef CONFIG_SMP | ||
24 | static atomic_t dump_lock = ATOMIC_INIT(-1); | ||
25 | |||
15 | void dump_stack(void) | 26 | void dump_stack(void) |
16 | { | 27 | { |
17 | dump_stack_print_info(KERN_DEFAULT); | 28 | int was_locked; |
18 | show_stack(NULL, NULL); | 29 | int old; |
30 | int cpu; | ||
31 | |||
32 | /* | ||
33 | * Permit this cpu to perform nested stack dumps while serialising | ||
34 | * against other CPUs | ||
35 | */ | ||
36 | preempt_disable(); | ||
37 | |||
38 | retry: | ||
39 | cpu = smp_processor_id(); | ||
40 | old = atomic_cmpxchg(&dump_lock, -1, cpu); | ||
41 | if (old == -1) { | ||
42 | was_locked = 0; | ||
43 | } else if (old == cpu) { | ||
44 | was_locked = 1; | ||
45 | } else { | ||
46 | cpu_relax(); | ||
47 | goto retry; | ||
48 | } | ||
49 | |||
50 | __dump_stack(); | ||
51 | |||
52 | if (!was_locked) | ||
53 | atomic_set(&dump_lock, -1); | ||
54 | |||
55 | preempt_enable(); | ||
56 | } | ||
57 | #else | ||
58 | void dump_stack(void) | ||
59 | { | ||
60 | __dump_stack(); | ||
19 | } | 61 | } |
62 | #endif | ||
20 | EXPORT_SYMBOL(dump_stack); | 63 | EXPORT_SYMBOL(dump_stack); |
@@ -524,9 +524,7 @@ EXPORT_SYMBOL(idr_alloc_cyclic); | |||
524 | 524 | ||
525 | static void idr_remove_warning(int id) | 525 | static void idr_remove_warning(int id) |
526 | { | 526 | { |
527 | printk(KERN_WARNING | 527 | WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); |
528 | "idr_remove called for id=%d which is not allocated.\n", id); | ||
529 | dump_stack(); | ||
530 | } | 528 | } |
531 | 529 | ||
532 | static void sub_remove(struct idr *idp, int shift, int id) | 530 | static void sub_remove(struct idr *idp, int shift, int id) |
@@ -1064,8 +1062,7 @@ void ida_remove(struct ida *ida, int id) | |||
1064 | return; | 1062 | return; |
1065 | 1063 | ||
1066 | err: | 1064 | err: |
1067 | printk(KERN_WARNING | 1065 | WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); |
1068 | "ida_remove called for id=%d which is not allocated.\n", id); | ||
1069 | } | 1066 | } |
1070 | EXPORT_SYMBOL(ida_remove); | 1067 | EXPORT_SYMBOL(ida_remove); |
1071 | 1068 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index ba6085d9c741..1fc23a3277e1 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -80,8 +80,8 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
80 | if (count >= batch || count <= -batch) { | 80 | if (count >= batch || count <= -batch) { |
81 | raw_spin_lock(&fbc->lock); | 81 | raw_spin_lock(&fbc->lock); |
82 | fbc->count += count; | 82 | fbc->count += count; |
83 | __this_cpu_write(*fbc->counters, 0); | ||
84 | raw_spin_unlock(&fbc->lock); | 83 | raw_spin_unlock(&fbc->lock); |
84 | __this_cpu_write(*fbc->counters, 0); | ||
85 | } else { | 85 | } else { |
86 | __this_cpu_write(*fbc->counters, count); | 86 | __this_cpu_write(*fbc->counters, count); |
87 | } | 87 | } |
diff --git a/mm/Kconfig b/mm/Kconfig index f5e698e30d4a..7e28ecfa8aa4 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -477,3 +477,15 @@ config FRONTSWAP | |||
477 | and swap data is stored as normal on the matching swap device. | 477 | and swap data is stored as normal on the matching swap device. |
478 | 478 | ||
479 | If unsure, say Y to enable frontswap. | 479 | If unsure, say Y to enable frontswap. |
480 | |||
481 | config MEM_SOFT_DIRTY | ||
482 | bool "Track memory changes" | ||
483 | depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY | ||
484 | select PROC_PAGE_MONITOR | ||
485 | help | ||
486 | This option enables memory changes tracking by introducing a | ||
487 | soft-dirty bit on pte-s. This bit it set when someone writes | ||
488 | into a page just as regular dirty bit, but unlike the latter | ||
489 | it can be cleared by hands. | ||
490 | |||
491 | See Documentation/vm/soft-dirty.txt for more details. | ||
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 502517492258..d014ee5fcbbd 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy); | |||
515 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, | 515 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, |
516 | unsigned int cap) | 516 | unsigned int cap) |
517 | { | 517 | { |
518 | char tmp[32]; | ||
519 | int err; | 518 | int err; |
520 | 519 | ||
521 | bdi->name = name; | 520 | bdi->name = name; |
@@ -524,8 +523,8 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, | |||
524 | if (err) | 523 | if (err) |
525 | return err; | 524 | return err; |
526 | 525 | ||
527 | sprintf(tmp, "%.28s%s", name, "-%d"); | 526 | err = bdi_register(bdi, NULL, "%.28s-%ld", name, |
528 | err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); | 527 | atomic_long_inc_return(&bdi_seq)); |
529 | if (err) { | 528 | if (err) { |
530 | bdi_destroy(bdi); | 529 | bdi_destroy(bdi); |
531 | return err; | 530 | return err; |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 2b0bcb019ec2..6ab7744e692e 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -241,33 +241,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
241 | return count; | 241 | return count; |
242 | } | 242 | } |
243 | 243 | ||
244 | static void reset_node_lowmem_managed_pages(pg_data_t *pgdat) | 244 | static int reset_managed_pages_done __initdata; |
245 | |||
246 | static inline void __init reset_node_managed_pages(pg_data_t *pgdat) | ||
245 | { | 247 | { |
246 | struct zone *z; | 248 | struct zone *z; |
247 | 249 | ||
248 | /* | 250 | if (reset_managed_pages_done) |
249 | * In free_area_init_core(), highmem zone's managed_pages is set to | 251 | return; |
250 | * present_pages, and bootmem allocator doesn't allocate from highmem | 252 | |
251 | * zones. So there's no need to recalculate managed_pages because all | ||
252 | * highmem pages will be managed by the buddy system. Here highmem | ||
253 | * zone also includes highmem movable zone. | ||
254 | */ | ||
255 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | 253 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
256 | if (!is_highmem(z)) | 254 | z->managed_pages = 0; |
257 | z->managed_pages = 0; | ||
258 | } | 255 | } |
259 | 256 | ||
260 | /** | 257 | void __init reset_all_zones_managed_pages(void) |
261 | * free_all_bootmem_node - release a node's free pages to the buddy allocator | ||
262 | * @pgdat: node to be released | ||
263 | * | ||
264 | * Returns the number of pages actually released. | ||
265 | */ | ||
266 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | ||
267 | { | 258 | { |
268 | register_page_bootmem_info_node(pgdat); | 259 | struct pglist_data *pgdat; |
269 | reset_node_lowmem_managed_pages(pgdat); | 260 | |
270 | return free_all_bootmem_core(pgdat->bdata); | 261 | for_each_online_pgdat(pgdat) |
262 | reset_node_managed_pages(pgdat); | ||
263 | reset_managed_pages_done = 1; | ||
271 | } | 264 | } |
272 | 265 | ||
273 | /** | 266 | /** |
@@ -279,14 +272,14 @@ unsigned long __init free_all_bootmem(void) | |||
279 | { | 272 | { |
280 | unsigned long total_pages = 0; | 273 | unsigned long total_pages = 0; |
281 | bootmem_data_t *bdata; | 274 | bootmem_data_t *bdata; |
282 | struct pglist_data *pgdat; | ||
283 | 275 | ||
284 | for_each_online_pgdat(pgdat) | 276 | reset_all_zones_managed_pages(); |
285 | reset_node_lowmem_managed_pages(pgdat); | ||
286 | 277 | ||
287 | list_for_each_entry(bdata, &bdata_list, list) | 278 | list_for_each_entry(bdata, &bdata_list, list) |
288 | total_pages += free_all_bootmem_core(bdata); | 279 | total_pages += free_all_bootmem_core(bdata); |
289 | 280 | ||
281 | totalram_pages += total_pages; | ||
282 | |||
290 | return total_pages; | 283 | return total_pages; |
291 | } | 284 | } |
292 | 285 | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 362c329b83fe..d8b3b850150c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1429,7 +1429,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, | |||
1429 | if (ret == 1) { | 1429 | if (ret == 1) { |
1430 | pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); | 1430 | pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); |
1431 | VM_BUG_ON(!pmd_none(*new_pmd)); | 1431 | VM_BUG_ON(!pmd_none(*new_pmd)); |
1432 | set_pmd_at(mm, new_addr, new_pmd, pmd); | 1432 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); |
1433 | spin_unlock(&mm->page_table_lock); | 1433 | spin_unlock(&mm->page_table_lock); |
1434 | } | 1434 | } |
1435 | out: | 1435 | out: |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index aed085ad11a8..83aff0a4d093 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -319,7 +319,7 @@ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) | |||
319 | 319 | ||
320 | hstate = hstate_vma(vma); | 320 | hstate = hstate_vma(vma); |
321 | 321 | ||
322 | return 1UL << (hstate->order + PAGE_SHIFT); | 322 | return 1UL << huge_page_shift(hstate); |
323 | } | 323 | } |
324 | EXPORT_SYMBOL_GPL(vma_kernel_pagesize); | 324 | EXPORT_SYMBOL_GPL(vma_kernel_pagesize); |
325 | 325 | ||
@@ -1263,7 +1263,7 @@ static void __init gather_bootmem_prealloc(void) | |||
1263 | * side-effects, like CommitLimit going negative. | 1263 | * side-effects, like CommitLimit going negative. |
1264 | */ | 1264 | */ |
1265 | if (h->order > (MAX_ORDER - 1)) | 1265 | if (h->order > (MAX_ORDER - 1)) |
1266 | totalram_pages += 1 << h->order; | 1266 | adjust_managed_page_count(page, 1 << h->order); |
1267 | } | 1267 | } |
1268 | } | 1268 | } |
1269 | 1269 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 194721839cf5..2e851f453814 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1148,6 +1148,58 @@ skip_node: | |||
1148 | return NULL; | 1148 | return NULL; |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | static void mem_cgroup_iter_invalidate(struct mem_cgroup *root) | ||
1152 | { | ||
1153 | /* | ||
1154 | * When a group in the hierarchy below root is destroyed, the | ||
1155 | * hierarchy iterator can no longer be trusted since it might | ||
1156 | * have pointed to the destroyed group. Invalidate it. | ||
1157 | */ | ||
1158 | atomic_inc(&root->dead_count); | ||
1159 | } | ||
1160 | |||
1161 | static struct mem_cgroup * | ||
1162 | mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, | ||
1163 | struct mem_cgroup *root, | ||
1164 | int *sequence) | ||
1165 | { | ||
1166 | struct mem_cgroup *position = NULL; | ||
1167 | /* | ||
1168 | * A cgroup destruction happens in two stages: offlining and | ||
1169 | * release. They are separated by a RCU grace period. | ||
1170 | * | ||
1171 | * If the iterator is valid, we may still race with an | ||
1172 | * offlining. The RCU lock ensures the object won't be | ||
1173 | * released, tryget will fail if we lost the race. | ||
1174 | */ | ||
1175 | *sequence = atomic_read(&root->dead_count); | ||
1176 | if (iter->last_dead_count == *sequence) { | ||
1177 | smp_rmb(); | ||
1178 | position = iter->last_visited; | ||
1179 | if (position && !css_tryget(&position->css)) | ||
1180 | position = NULL; | ||
1181 | } | ||
1182 | return position; | ||
1183 | } | ||
1184 | |||
1185 | static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, | ||
1186 | struct mem_cgroup *last_visited, | ||
1187 | struct mem_cgroup *new_position, | ||
1188 | int sequence) | ||
1189 | { | ||
1190 | if (last_visited) | ||
1191 | css_put(&last_visited->css); | ||
1192 | /* | ||
1193 | * We store the sequence count from the time @last_visited was | ||
1194 | * loaded successfully instead of rereading it here so that we | ||
1195 | * don't lose destruction events in between. We could have | ||
1196 | * raced with the destruction of @new_position after all. | ||
1197 | */ | ||
1198 | iter->last_visited = new_position; | ||
1199 | smp_wmb(); | ||
1200 | iter->last_dead_count = sequence; | ||
1201 | } | ||
1202 | |||
1151 | /** | 1203 | /** |
1152 | * mem_cgroup_iter - iterate over memory cgroup hierarchy | 1204 | * mem_cgroup_iter - iterate over memory cgroup hierarchy |
1153 | * @root: hierarchy root | 1205 | * @root: hierarchy root |
@@ -1171,7 +1223,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, | |||
1171 | { | 1223 | { |
1172 | struct mem_cgroup *memcg = NULL; | 1224 | struct mem_cgroup *memcg = NULL; |
1173 | struct mem_cgroup *last_visited = NULL; | 1225 | struct mem_cgroup *last_visited = NULL; |
1174 | unsigned long uninitialized_var(dead_count); | ||
1175 | 1226 | ||
1176 | if (mem_cgroup_disabled()) | 1227 | if (mem_cgroup_disabled()) |
1177 | return NULL; | 1228 | return NULL; |
@@ -1191,6 +1242,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, | |||
1191 | rcu_read_lock(); | 1242 | rcu_read_lock(); |
1192 | while (!memcg) { | 1243 | while (!memcg) { |
1193 | struct mem_cgroup_reclaim_iter *uninitialized_var(iter); | 1244 | struct mem_cgroup_reclaim_iter *uninitialized_var(iter); |
1245 | int uninitialized_var(seq); | ||
1194 | 1246 | ||
1195 | if (reclaim) { | 1247 | if (reclaim) { |
1196 | int nid = zone_to_nid(reclaim->zone); | 1248 | int nid = zone_to_nid(reclaim->zone); |
@@ -1204,37 +1256,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, | |||
1204 | goto out_unlock; | 1256 | goto out_unlock; |
1205 | } | 1257 | } |
1206 | 1258 | ||
1207 | /* | 1259 | last_visited = mem_cgroup_iter_load(iter, root, &seq); |
1208 | * If the dead_count mismatches, a destruction | ||
1209 | * has happened or is happening concurrently. | ||
1210 | * If the dead_count matches, a destruction | ||
1211 | * might still happen concurrently, but since | ||
1212 | * we checked under RCU, that destruction | ||
1213 | * won't free the object until we release the | ||
1214 | * RCU reader lock. Thus, the dead_count | ||
1215 | * check verifies the pointer is still valid, | ||
1216 | * css_tryget() verifies the cgroup pointed to | ||
1217 | * is alive. | ||
1218 | */ | ||
1219 | dead_count = atomic_read(&root->dead_count); | ||
1220 | if (dead_count == iter->last_dead_count) { | ||
1221 | smp_rmb(); | ||
1222 | last_visited = iter->last_visited; | ||
1223 | if (last_visited && | ||
1224 | !css_tryget(&last_visited->css)) | ||
1225 | last_visited = NULL; | ||
1226 | } | ||
1227 | } | 1260 | } |
1228 | 1261 | ||
1229 | memcg = __mem_cgroup_iter_next(root, last_visited); | 1262 | memcg = __mem_cgroup_iter_next(root, last_visited); |
1230 | 1263 | ||
1231 | if (reclaim) { | 1264 | if (reclaim) { |
1232 | if (last_visited) | 1265 | mem_cgroup_iter_update(iter, last_visited, memcg, seq); |
1233 | css_put(&last_visited->css); | ||
1234 | |||
1235 | iter->last_visited = memcg; | ||
1236 | smp_wmb(); | ||
1237 | iter->last_dead_count = dead_count; | ||
1238 | 1266 | ||
1239 | if (!memcg) | 1267 | if (!memcg) |
1240 | iter->generation++; | 1268 | iter->generation++; |
@@ -1448,11 +1476,12 @@ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | |||
1448 | return ret; | 1476 | return ret; |
1449 | } | 1477 | } |
1450 | 1478 | ||
1451 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) | 1479 | bool task_in_mem_cgroup(struct task_struct *task, |
1480 | const struct mem_cgroup *memcg) | ||
1452 | { | 1481 | { |
1453 | int ret; | ||
1454 | struct mem_cgroup *curr = NULL; | 1482 | struct mem_cgroup *curr = NULL; |
1455 | struct task_struct *p; | 1483 | struct task_struct *p; |
1484 | bool ret; | ||
1456 | 1485 | ||
1457 | p = find_lock_task_mm(task); | 1486 | p = find_lock_task_mm(task); |
1458 | if (p) { | 1487 | if (p) { |
@@ -1464,14 +1493,14 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) | |||
1464 | * killer still needs to detect if they have already been oom | 1493 | * killer still needs to detect if they have already been oom |
1465 | * killed to prevent needlessly killing additional tasks. | 1494 | * killed to prevent needlessly killing additional tasks. |
1466 | */ | 1495 | */ |
1467 | task_lock(task); | 1496 | rcu_read_lock(); |
1468 | curr = mem_cgroup_from_task(task); | 1497 | curr = mem_cgroup_from_task(task); |
1469 | if (curr) | 1498 | if (curr) |
1470 | css_get(&curr->css); | 1499 | css_get(&curr->css); |
1471 | task_unlock(task); | 1500 | rcu_read_unlock(); |
1472 | } | 1501 | } |
1473 | if (!curr) | 1502 | if (!curr) |
1474 | return 0; | 1503 | return false; |
1475 | /* | 1504 | /* |
1476 | * We should check use_hierarchy of "memcg" not "curr". Because checking | 1505 | * We should check use_hierarchy of "memcg" not "curr". Because checking |
1477 | * use_hierarchy of "curr" here make this function true if hierarchy is | 1506 | * use_hierarchy of "curr" here make this function true if hierarchy is |
@@ -6317,14 +6346,14 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) | |||
6317 | struct mem_cgroup *parent = memcg; | 6346 | struct mem_cgroup *parent = memcg; |
6318 | 6347 | ||
6319 | while ((parent = parent_mem_cgroup(parent))) | 6348 | while ((parent = parent_mem_cgroup(parent))) |
6320 | atomic_inc(&parent->dead_count); | 6349 | mem_cgroup_iter_invalidate(parent); |
6321 | 6350 | ||
6322 | /* | 6351 | /* |
6323 | * if the root memcg is not hierarchical we have to check it | 6352 | * if the root memcg is not hierarchical we have to check it |
6324 | * explicitely. | 6353 | * explicitely. |
6325 | */ | 6354 | */ |
6326 | if (!root_mem_cgroup->use_hierarchy) | 6355 | if (!root_mem_cgroup->use_hierarchy) |
6327 | atomic_inc(&root_mem_cgroup->dead_count); | 6356 | mem_cgroup_iter_invalidate(root_mem_cgroup); |
6328 | } | 6357 | } |
6329 | 6358 | ||
6330 | static void mem_cgroup_css_offline(struct cgroup *cont) | 6359 | static void mem_cgroup_css_offline(struct cgroup *cont) |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ceb0c7f1932f..2c13aa7a0164 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1410,7 +1410,8 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags) | |||
1410 | 1410 | ||
1411 | /* | 1411 | /* |
1412 | * Isolate the page, so that it doesn't get reallocated if it | 1412 | * Isolate the page, so that it doesn't get reallocated if it |
1413 | * was free. | 1413 | * was free. This flag should be kept set until the source page |
1414 | * is freed and PG_hwpoison on it is set. | ||
1414 | */ | 1415 | */ |
1415 | set_migratetype_isolate(p, true); | 1416 | set_migratetype_isolate(p, true); |
1416 | /* | 1417 | /* |
@@ -1433,7 +1434,6 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags) | |||
1433 | /* Not a free page */ | 1434 | /* Not a free page */ |
1434 | ret = 1; | 1435 | ret = 1; |
1435 | } | 1436 | } |
1436 | unset_migratetype_isolate(p, MIGRATE_MOVABLE); | ||
1437 | unlock_memory_hotplug(); | 1437 | unlock_memory_hotplug(); |
1438 | return ret; | 1438 | return ret; |
1439 | } | 1439 | } |
@@ -1494,7 +1494,6 @@ static int soft_offline_huge_page(struct page *page, int flags) | |||
1494 | atomic_long_add(1 << compound_trans_order(hpage), | 1494 | atomic_long_add(1 << compound_trans_order(hpage), |
1495 | &num_poisoned_pages); | 1495 | &num_poisoned_pages); |
1496 | } | 1496 | } |
1497 | /* keep elevated page count for bad page */ | ||
1498 | return ret; | 1497 | return ret; |
1499 | } | 1498 | } |
1500 | 1499 | ||
@@ -1559,7 +1558,7 @@ int soft_offline_page(struct page *page, int flags) | |||
1559 | atomic_long_inc(&num_poisoned_pages); | 1558 | atomic_long_inc(&num_poisoned_pages); |
1560 | } | 1559 | } |
1561 | } | 1560 | } |
1562 | /* keep elevated page count for bad page */ | 1561 | unset_migratetype_isolate(page, MIGRATE_MOVABLE); |
1563 | return ret; | 1562 | return ret; |
1564 | } | 1563 | } |
1565 | 1564 | ||
@@ -1625,7 +1624,22 @@ static int __soft_offline_page(struct page *page, int flags) | |||
1625 | if (ret > 0) | 1624 | if (ret > 0) |
1626 | ret = -EIO; | 1625 | ret = -EIO; |
1627 | } else { | 1626 | } else { |
1627 | /* | ||
1628 | * After page migration succeeds, the source page can | ||
1629 | * be trapped in pagevec and actual freeing is delayed. | ||
1630 | * Freeing code works differently based on PG_hwpoison, | ||
1631 | * so there's a race. We need to make sure that the | ||
1632 | * source page should be freed back to buddy before | ||
1633 | * setting PG_hwpoison. | ||
1634 | */ | ||
1635 | if (!is_free_buddy_page(page)) | ||
1636 | lru_add_drain_all(); | ||
1637 | if (!is_free_buddy_page(page)) | ||
1638 | drain_all_pages(); | ||
1628 | SetPageHWPoison(page); | 1639 | SetPageHWPoison(page); |
1640 | if (!is_free_buddy_page(page)) | ||
1641 | pr_info("soft offline: %#lx: page leaked\n", | ||
1642 | pfn); | ||
1629 | atomic_long_inc(&num_poisoned_pages); | 1643 | atomic_long_inc(&num_poisoned_pages); |
1630 | } | 1644 | } |
1631 | } else { | 1645 | } else { |
diff --git a/mm/memory.c b/mm/memory.c index 95d0cce63583..b68812d682b6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -82,7 +82,6 @@ EXPORT_SYMBOL(max_mapnr); | |||
82 | EXPORT_SYMBOL(mem_map); | 82 | EXPORT_SYMBOL(mem_map); |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | unsigned long num_physpages; | ||
86 | /* | 85 | /* |
87 | * A number of key systems in x86 including ioremap() rely on the assumption | 86 | * A number of key systems in x86 including ioremap() rely on the assumption |
88 | * that high_memory defines the upper bound on direct map memory, then end | 87 | * that high_memory defines the upper bound on direct map memory, then end |
@@ -92,7 +91,6 @@ unsigned long num_physpages; | |||
92 | */ | 91 | */ |
93 | void * high_memory; | 92 | void * high_memory; |
94 | 93 | ||
95 | EXPORT_SYMBOL(num_physpages); | ||
96 | EXPORT_SYMBOL(high_memory); | 94 | EXPORT_SYMBOL(high_memory); |
97 | 95 | ||
98 | /* | 96 | /* |
@@ -1101,6 +1099,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
1101 | spinlock_t *ptl; | 1099 | spinlock_t *ptl; |
1102 | pte_t *start_pte; | 1100 | pte_t *start_pte; |
1103 | pte_t *pte; | 1101 | pte_t *pte; |
1102 | unsigned long range_start = addr; | ||
1104 | 1103 | ||
1105 | again: | 1104 | again: |
1106 | init_rss_vec(rss); | 1105 | init_rss_vec(rss); |
@@ -1206,12 +1205,14 @@ again: | |||
1206 | force_flush = 0; | 1205 | force_flush = 0; |
1207 | 1206 | ||
1208 | #ifdef HAVE_GENERIC_MMU_GATHER | 1207 | #ifdef HAVE_GENERIC_MMU_GATHER |
1209 | tlb->start = addr; | 1208 | tlb->start = range_start; |
1210 | tlb->end = end; | 1209 | tlb->end = addr; |
1211 | #endif | 1210 | #endif |
1212 | tlb_flush_mmu(tlb); | 1211 | tlb_flush_mmu(tlb); |
1213 | if (addr != end) | 1212 | if (addr != end) { |
1213 | range_start = addr; | ||
1214 | goto again; | 1214 | goto again; |
1215 | } | ||
1215 | } | 1216 | } |
1216 | 1217 | ||
1217 | return addr; | 1218 | return addr; |
@@ -2904,7 +2905,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root, | |||
2904 | details->first_index, details->last_index) { | 2905 | details->first_index, details->last_index) { |
2905 | 2906 | ||
2906 | vba = vma->vm_pgoff; | 2907 | vba = vma->vm_pgoff; |
2907 | vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; | 2908 | vea = vba + vma_pages(vma) - 1; |
2908 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ | 2909 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ |
2909 | zba = details->first_index; | 2910 | zba = details->first_index; |
2910 | if (zba < vba) | 2911 | if (zba < vba) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 081b4d654ed6..f5ba127b2051 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -75,7 +75,7 @@ static struct resource *register_memory_resource(u64 start, u64 size) | |||
75 | res->end = start + size - 1; | 75 | res->end = start + size - 1; |
76 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 76 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
77 | if (request_resource(&iomem_resource, res) < 0) { | 77 | if (request_resource(&iomem_resource, res) < 0) { |
78 | printk("System RAM resource %pR cannot be added\n", res); | 78 | pr_debug("System RAM resource %pR cannot be added\n", res); |
79 | kfree(res); | 79 | kfree(res); |
80 | res = NULL; | 80 | res = NULL; |
81 | } | 81 | } |
@@ -101,12 +101,9 @@ void get_page_bootmem(unsigned long info, struct page *page, | |||
101 | atomic_inc(&page->_count); | 101 | atomic_inc(&page->_count); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* reference to __meminit __free_pages_bootmem is valid | 104 | void put_page_bootmem(struct page *page) |
105 | * so use __ref to tell modpost not to generate a warning */ | ||
106 | void __ref put_page_bootmem(struct page *page) | ||
107 | { | 105 | { |
108 | unsigned long type; | 106 | unsigned long type; |
109 | static DEFINE_MUTEX(ppb_lock); | ||
110 | 107 | ||
111 | type = (unsigned long) page->lru.next; | 108 | type = (unsigned long) page->lru.next; |
112 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | 109 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
@@ -116,17 +113,8 @@ void __ref put_page_bootmem(struct page *page) | |||
116 | ClearPagePrivate(page); | 113 | ClearPagePrivate(page); |
117 | set_page_private(page, 0); | 114 | set_page_private(page, 0); |
118 | INIT_LIST_HEAD(&page->lru); | 115 | INIT_LIST_HEAD(&page->lru); |
119 | 116 | free_reserved_page(page); | |
120 | /* | ||
121 | * Please refer to comment for __free_pages_bootmem() | ||
122 | * for why we serialize here. | ||
123 | */ | ||
124 | mutex_lock(&ppb_lock); | ||
125 | __free_pages_bootmem(page, 0); | ||
126 | mutex_unlock(&ppb_lock); | ||
127 | totalram_pages++; | ||
128 | } | 117 | } |
129 | |||
130 | } | 118 | } |
131 | 119 | ||
132 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | 120 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
@@ -309,7 +297,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, | |||
309 | /* can't move pfns which are higher than @z2 */ | 297 | /* can't move pfns which are higher than @z2 */ |
310 | if (end_pfn > zone_end_pfn(z2)) | 298 | if (end_pfn > zone_end_pfn(z2)) |
311 | goto out_fail; | 299 | goto out_fail; |
312 | /* the move out part mast at the left most of @z2 */ | 300 | /* the move out part must be at the left most of @z2 */ |
313 | if (start_pfn > z2->zone_start_pfn) | 301 | if (start_pfn > z2->zone_start_pfn) |
314 | goto out_fail; | 302 | goto out_fail; |
315 | /* must included/overlap */ | 303 | /* must included/overlap */ |
@@ -775,29 +763,18 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback); | |||
775 | 763 | ||
776 | void __online_page_set_limits(struct page *page) | 764 | void __online_page_set_limits(struct page *page) |
777 | { | 765 | { |
778 | unsigned long pfn = page_to_pfn(page); | ||
779 | |||
780 | if (pfn >= num_physpages) | ||
781 | num_physpages = pfn + 1; | ||
782 | } | 766 | } |
783 | EXPORT_SYMBOL_GPL(__online_page_set_limits); | 767 | EXPORT_SYMBOL_GPL(__online_page_set_limits); |
784 | 768 | ||
785 | void __online_page_increment_counters(struct page *page) | 769 | void __online_page_increment_counters(struct page *page) |
786 | { | 770 | { |
787 | totalram_pages++; | 771 | adjust_managed_page_count(page, 1); |
788 | |||
789 | #ifdef CONFIG_HIGHMEM | ||
790 | if (PageHighMem(page)) | ||
791 | totalhigh_pages++; | ||
792 | #endif | ||
793 | } | 772 | } |
794 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); | 773 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); |
795 | 774 | ||
796 | void __online_page_free(struct page *page) | 775 | void __online_page_free(struct page *page) |
797 | { | 776 | { |
798 | ClearPageReserved(page); | 777 | __free_reserved_page(page); |
799 | init_page_count(page); | ||
800 | __free_page(page); | ||
801 | } | 778 | } |
802 | EXPORT_SYMBOL_GPL(__online_page_free); | 779 | EXPORT_SYMBOL_GPL(__online_page_free); |
803 | 780 | ||
@@ -918,6 +895,7 @@ static void node_states_set_node(int node, struct memory_notify *arg) | |||
918 | 895 | ||
919 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) | 896 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) |
920 | { | 897 | { |
898 | unsigned long flags; | ||
921 | unsigned long onlined_pages = 0; | 899 | unsigned long onlined_pages = 0; |
922 | struct zone *zone; | 900 | struct zone *zone; |
923 | int need_zonelists_rebuild = 0; | 901 | int need_zonelists_rebuild = 0; |
@@ -994,9 +972,12 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ | |||
994 | return ret; | 972 | return ret; |
995 | } | 973 | } |
996 | 974 | ||
997 | zone->managed_pages += onlined_pages; | ||
998 | zone->present_pages += onlined_pages; | 975 | zone->present_pages += onlined_pages; |
976 | |||
977 | pgdat_resize_lock(zone->zone_pgdat, &flags); | ||
999 | zone->zone_pgdat->node_present_pages += onlined_pages; | 978 | zone->zone_pgdat->node_present_pages += onlined_pages; |
979 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | ||
980 | |||
1000 | if (onlined_pages) { | 981 | if (onlined_pages) { |
1001 | node_states_set_node(zone_to_nid(zone), &arg); | 982 | node_states_set_node(zone_to_nid(zone), &arg); |
1002 | if (need_zonelists_rebuild) | 983 | if (need_zonelists_rebuild) |
@@ -1487,6 +1468,7 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1487 | unsigned long pfn, nr_pages, expire; | 1468 | unsigned long pfn, nr_pages, expire; |
1488 | long offlined_pages; | 1469 | long offlined_pages; |
1489 | int ret, drain, retry_max, node; | 1470 | int ret, drain, retry_max, node; |
1471 | unsigned long flags; | ||
1490 | struct zone *zone; | 1472 | struct zone *zone; |
1491 | struct memory_notify arg; | 1473 | struct memory_notify arg; |
1492 | 1474 | ||
@@ -1578,10 +1560,12 @@ repeat: | |||
1578 | /* reset pagetype flags and makes migrate type to be MOVABLE */ | 1560 | /* reset pagetype flags and makes migrate type to be MOVABLE */ |
1579 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | 1561 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); |
1580 | /* removal success */ | 1562 | /* removal success */ |
1581 | zone->managed_pages -= offlined_pages; | 1563 | adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); |
1582 | zone->present_pages -= offlined_pages; | 1564 | zone->present_pages -= offlined_pages; |
1565 | |||
1566 | pgdat_resize_lock(zone->zone_pgdat, &flags); | ||
1583 | zone->zone_pgdat->node_present_pages -= offlined_pages; | 1567 | zone->zone_pgdat->node_present_pages -= offlined_pages; |
1584 | totalram_pages -= offlined_pages; | 1568 | pgdat_resize_unlock(zone->zone_pgdat, &flags); |
1585 | 1569 | ||
1586 | init_per_zone_wmark_min(); | 1570 | init_per_zone_wmark_min(); |
1587 | 1571 | ||
diff --git a/mm/mm_init.c b/mm/mm_init.c index c280a02ea11e..633c08863fd8 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/kobject.h> | 10 | #include <linux/kobject.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/memory.h> | ||
13 | #include <linux/notifier.h> | ||
12 | #include "internal.h" | 14 | #include "internal.h" |
13 | 15 | ||
14 | #ifdef CONFIG_DEBUG_MEMORY_INIT | 16 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
@@ -147,6 +149,51 @@ early_param("mminit_loglevel", set_mminit_loglevel); | |||
147 | struct kobject *mm_kobj; | 149 | struct kobject *mm_kobj; |
148 | EXPORT_SYMBOL_GPL(mm_kobj); | 150 | EXPORT_SYMBOL_GPL(mm_kobj); |
149 | 151 | ||
152 | #ifdef CONFIG_SMP | ||
153 | s32 vm_committed_as_batch = 32; | ||
154 | |||
155 | static void __meminit mm_compute_batch(void) | ||
156 | { | ||
157 | u64 memsized_batch; | ||
158 | s32 nr = num_present_cpus(); | ||
159 | s32 batch = max_t(s32, nr*2, 32); | ||
160 | |||
161 | /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ | ||
162 | memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff); | ||
163 | |||
164 | vm_committed_as_batch = max_t(s32, memsized_batch, batch); | ||
165 | } | ||
166 | |||
167 | static int __meminit mm_compute_batch_notifier(struct notifier_block *self, | ||
168 | unsigned long action, void *arg) | ||
169 | { | ||
170 | switch (action) { | ||
171 | case MEM_ONLINE: | ||
172 | case MEM_OFFLINE: | ||
173 | mm_compute_batch(); | ||
174 | default: | ||
175 | break; | ||
176 | } | ||
177 | return NOTIFY_OK; | ||
178 | } | ||
179 | |||
180 | static struct notifier_block compute_batch_nb __meminitdata = { | ||
181 | .notifier_call = mm_compute_batch_notifier, | ||
182 | .priority = IPC_CALLBACK_PRI, /* use lowest priority */ | ||
183 | }; | ||
184 | |||
185 | static int __init mm_compute_batch_init(void) | ||
186 | { | ||
187 | mm_compute_batch(); | ||
188 | register_hotmemory_notifier(&compute_batch_nb); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | __initcall(mm_compute_batch_init); | ||
194 | |||
195 | #endif | ||
196 | |||
150 | static int __init mm_sysfs_init(void) | 197 | static int __init mm_sysfs_init(void) |
151 | { | 198 | { |
152 | mm_kobj = kobject_create_and_add("mm", kernel_kobj); | 199 | mm_kobj = kobject_create_and_add("mm", kernel_kobj); |
@@ -955,7 +955,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, | |||
955 | if (is_mergeable_vma(vma, file, vm_flags) && | 955 | if (is_mergeable_vma(vma, file, vm_flags) && |
956 | is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { | 956 | is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { |
957 | pgoff_t vm_pglen; | 957 | pgoff_t vm_pglen; |
958 | vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 958 | vm_pglen = vma_pages(vma); |
959 | if (vma->vm_pgoff + vm_pglen == vm_pgoff) | 959 | if (vma->vm_pgoff + vm_pglen == vm_pgoff) |
960 | return 1; | 960 | return 1; |
961 | } | 961 | } |
diff --git a/mm/mremap.c b/mm/mremap.c index 463a25705ac6..3708655378e9 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -126,7 +126,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
126 | continue; | 126 | continue; |
127 | pte = ptep_get_and_clear(mm, old_addr, old_pte); | 127 | pte = ptep_get_and_clear(mm, old_addr, old_pte); |
128 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); | 128 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
129 | set_pte_at(mm, new_addr, new_pte, pte); | 129 | set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); |
130 | } | 130 | } |
131 | 131 | ||
132 | arch_leave_lazy_mmu_mode(); | 132 | arch_leave_lazy_mmu_mode(); |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index bdd3fa2fc73b..61107cf55bb3 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -137,20 +137,25 @@ static unsigned long __init free_low_memory_core_early(void) | |||
137 | return count; | 137 | return count; |
138 | } | 138 | } |
139 | 139 | ||
140 | static void reset_node_lowmem_managed_pages(pg_data_t *pgdat) | 140 | static int reset_managed_pages_done __initdata; |
141 | |||
142 | static inline void __init reset_node_managed_pages(pg_data_t *pgdat) | ||
141 | { | 143 | { |
142 | struct zone *z; | 144 | struct zone *z; |
143 | 145 | ||
144 | /* | 146 | if (reset_managed_pages_done) |
145 | * In free_area_init_core(), highmem zone's managed_pages is set to | 147 | return; |
146 | * present_pages, and bootmem allocator doesn't allocate from highmem | ||
147 | * zones. So there's no need to recalculate managed_pages because all | ||
148 | * highmem pages will be managed by the buddy system. Here highmem | ||
149 | * zone also includes highmem movable zone. | ||
150 | */ | ||
151 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | 148 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
152 | if (!is_highmem(z)) | 149 | z->managed_pages = 0; |
153 | z->managed_pages = 0; | 150 | } |
151 | |||
152 | void __init reset_all_zones_managed_pages(void) | ||
153 | { | ||
154 | struct pglist_data *pgdat; | ||
155 | |||
156 | for_each_online_pgdat(pgdat) | ||
157 | reset_node_managed_pages(pgdat); | ||
158 | reset_managed_pages_done = 1; | ||
154 | } | 159 | } |
155 | 160 | ||
156 | /** | 161 | /** |
@@ -160,17 +165,19 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat) | |||
160 | */ | 165 | */ |
161 | unsigned long __init free_all_bootmem(void) | 166 | unsigned long __init free_all_bootmem(void) |
162 | { | 167 | { |
163 | struct pglist_data *pgdat; | 168 | unsigned long pages; |
164 | 169 | ||
165 | for_each_online_pgdat(pgdat) | 170 | reset_all_zones_managed_pages(); |
166 | reset_node_lowmem_managed_pages(pgdat); | ||
167 | 171 | ||
168 | /* | 172 | /* |
169 | * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id | 173 | * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id |
170 | * because in some case like Node0 doesn't have RAM installed | 174 | * because in some case like Node0 doesn't have RAM installed |
171 | * low ram will be on Node1 | 175 | * low ram will be on Node1 |
172 | */ | 176 | */ |
173 | return free_low_memory_core_early(); | 177 | pages = free_low_memory_core_early(); |
178 | totalram_pages += pages; | ||
179 | |||
180 | return pages; | ||
174 | } | 181 | } |
175 | 182 | ||
176 | /** | 183 | /** |
diff --git a/mm/nommu.c b/mm/nommu.c index 298884dcd6e7..e44e6e0a125c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -56,7 +56,6 @@ | |||
56 | void *high_memory; | 56 | void *high_memory; |
57 | struct page *mem_map; | 57 | struct page *mem_map; |
58 | unsigned long max_mapnr; | 58 | unsigned long max_mapnr; |
59 | unsigned long num_physpages; | ||
60 | unsigned long highest_memmap_pfn; | 59 | unsigned long highest_memmap_pfn; |
61 | struct percpu_counter vm_committed_as; | 60 | struct percpu_counter vm_committed_as; |
62 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 61 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
@@ -85,7 +84,6 @@ unsigned long vm_memory_committed(void) | |||
85 | EXPORT_SYMBOL_GPL(vm_memory_committed); | 84 | EXPORT_SYMBOL_GPL(vm_memory_committed); |
86 | 85 | ||
87 | EXPORT_SYMBOL(mem_map); | 86 | EXPORT_SYMBOL(mem_map); |
88 | EXPORT_SYMBOL(num_physpages); | ||
89 | 87 | ||
90 | /* list of mapped, potentially shareable regions */ | 88 | /* list of mapped, potentially shareable regions */ |
91 | static struct kmem_cache *vm_region_jar; | 89 | static struct kmem_cache *vm_region_jar; |
@@ -282,6 +280,10 @@ EXPORT_SYMBOL(vmalloc_to_pfn); | |||
282 | 280 | ||
283 | long vread(char *buf, char *addr, unsigned long count) | 281 | long vread(char *buf, char *addr, unsigned long count) |
284 | { | 282 | { |
283 | /* Don't allow overflow */ | ||
284 | if ((unsigned long) buf + count < count) | ||
285 | count = -(unsigned long) buf; | ||
286 | |||
285 | memcpy(buf, addr, count); | 287 | memcpy(buf, addr, count); |
286 | return count; | 288 | return count; |
287 | } | 289 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c3edb624fccf..327516b7aee9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -61,10 +61,14 @@ | |||
61 | #include <linux/hugetlb.h> | 61 | #include <linux/hugetlb.h> |
62 | #include <linux/sched/rt.h> | 62 | #include <linux/sched/rt.h> |
63 | 63 | ||
64 | #include <asm/sections.h> | ||
64 | #include <asm/tlbflush.h> | 65 | #include <asm/tlbflush.h> |
65 | #include <asm/div64.h> | 66 | #include <asm/div64.h> |
66 | #include "internal.h" | 67 | #include "internal.h" |
67 | 68 | ||
69 | /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ | ||
70 | static DEFINE_MUTEX(pcp_batch_high_lock); | ||
71 | |||
68 | #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID | 72 | #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID |
69 | DEFINE_PER_CPU(int, numa_node); | 73 | DEFINE_PER_CPU(int, numa_node); |
70 | EXPORT_PER_CPU_SYMBOL(numa_node); | 74 | EXPORT_PER_CPU_SYMBOL(numa_node); |
@@ -100,6 +104,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { | |||
100 | }; | 104 | }; |
101 | EXPORT_SYMBOL(node_states); | 105 | EXPORT_SYMBOL(node_states); |
102 | 106 | ||
107 | /* Protect totalram_pages and zone->managed_pages */ | ||
108 | static DEFINE_SPINLOCK(managed_page_count_lock); | ||
109 | |||
103 | unsigned long totalram_pages __read_mostly; | 110 | unsigned long totalram_pages __read_mostly; |
104 | unsigned long totalreserve_pages __read_mostly; | 111 | unsigned long totalreserve_pages __read_mostly; |
105 | /* | 112 | /* |
@@ -739,14 +746,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
739 | local_irq_restore(flags); | 746 | local_irq_restore(flags); |
740 | } | 747 | } |
741 | 748 | ||
742 | /* | 749 | void __init __free_pages_bootmem(struct page *page, unsigned int order) |
743 | * Read access to zone->managed_pages is safe because it's unsigned long, | ||
744 | * but we still need to serialize writers. Currently all callers of | ||
745 | * __free_pages_bootmem() except put_page_bootmem() should only be used | ||
746 | * at boot time. So for shorter boot time, we shift the burden to | ||
747 | * put_page_bootmem() to serialize writers. | ||
748 | */ | ||
749 | void __meminit __free_pages_bootmem(struct page *page, unsigned int order) | ||
750 | { | 750 | { |
751 | unsigned int nr_pages = 1 << order; | 751 | unsigned int nr_pages = 1 << order; |
752 | unsigned int loop; | 752 | unsigned int loop; |
@@ -781,11 +781,7 @@ void __init init_cma_reserved_pageblock(struct page *page) | |||
781 | set_page_refcounted(page); | 781 | set_page_refcounted(page); |
782 | set_pageblock_migratetype(page, MIGRATE_CMA); | 782 | set_pageblock_migratetype(page, MIGRATE_CMA); |
783 | __free_pages(page, pageblock_order); | 783 | __free_pages(page, pageblock_order); |
784 | totalram_pages += pageblock_nr_pages; | 784 | adjust_managed_page_count(page, pageblock_nr_pages); |
785 | #ifdef CONFIG_HIGHMEM | ||
786 | if (PageHighMem(page)) | ||
787 | totalhigh_pages += pageblock_nr_pages; | ||
788 | #endif | ||
789 | } | 785 | } |
790 | #endif | 786 | #endif |
791 | 787 | ||
@@ -1179,10 +1175,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) | |||
1179 | { | 1175 | { |
1180 | unsigned long flags; | 1176 | unsigned long flags; |
1181 | int to_drain; | 1177 | int to_drain; |
1178 | unsigned long batch; | ||
1182 | 1179 | ||
1183 | local_irq_save(flags); | 1180 | local_irq_save(flags); |
1184 | if (pcp->count >= pcp->batch) | 1181 | batch = ACCESS_ONCE(pcp->batch); |
1185 | to_drain = pcp->batch; | 1182 | if (pcp->count >= batch) |
1183 | to_drain = batch; | ||
1186 | else | 1184 | else |
1187 | to_drain = pcp->count; | 1185 | to_drain = pcp->count; |
1188 | if (to_drain > 0) { | 1186 | if (to_drain > 0) { |
@@ -1350,8 +1348,9 @@ void free_hot_cold_page(struct page *page, int cold) | |||
1350 | list_add(&page->lru, &pcp->lists[migratetype]); | 1348 | list_add(&page->lru, &pcp->lists[migratetype]); |
1351 | pcp->count++; | 1349 | pcp->count++; |
1352 | if (pcp->count >= pcp->high) { | 1350 | if (pcp->count >= pcp->high) { |
1353 | free_pcppages_bulk(zone, pcp->batch, pcp); | 1351 | unsigned long batch = ACCESS_ONCE(pcp->batch); |
1354 | pcp->count -= pcp->batch; | 1352 | free_pcppages_bulk(zone, batch, pcp); |
1353 | pcp->count -= batch; | ||
1355 | } | 1354 | } |
1356 | 1355 | ||
1357 | out: | 1356 | out: |
@@ -2839,7 +2838,7 @@ EXPORT_SYMBOL(free_pages_exact); | |||
2839 | * nr_free_zone_pages() counts the number of counts pages which are beyond the | 2838 | * nr_free_zone_pages() counts the number of counts pages which are beyond the |
2840 | * high watermark within all zones at or below a given zone index. For each | 2839 | * high watermark within all zones at or below a given zone index. For each |
2841 | * zone, the number of pages is calculated as: | 2840 | * zone, the number of pages is calculated as: |
2842 | * present_pages - high_pages | 2841 | * managed_pages - high_pages |
2843 | */ | 2842 | */ |
2844 | static unsigned long nr_free_zone_pages(int offset) | 2843 | static unsigned long nr_free_zone_pages(int offset) |
2845 | { | 2844 | { |
@@ -2906,9 +2905,13 @@ EXPORT_SYMBOL(si_meminfo); | |||
2906 | #ifdef CONFIG_NUMA | 2905 | #ifdef CONFIG_NUMA |
2907 | void si_meminfo_node(struct sysinfo *val, int nid) | 2906 | void si_meminfo_node(struct sysinfo *val, int nid) |
2908 | { | 2907 | { |
2908 | int zone_type; /* needs to be signed */ | ||
2909 | unsigned long managed_pages = 0; | ||
2909 | pg_data_t *pgdat = NODE_DATA(nid); | 2910 | pg_data_t *pgdat = NODE_DATA(nid); |
2910 | 2911 | ||
2911 | val->totalram = pgdat->node_present_pages; | 2912 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) |
2913 | managed_pages += pgdat->node_zones[zone_type].managed_pages; | ||
2914 | val->totalram = managed_pages; | ||
2912 | val->freeram = node_page_state(nid, NR_FREE_PAGES); | 2915 | val->freeram = node_page_state(nid, NR_FREE_PAGES); |
2913 | #ifdef CONFIG_HIGHMEM | 2916 | #ifdef CONFIG_HIGHMEM |
2914 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; | 2917 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; |
@@ -3250,18 +3253,25 @@ int numa_zonelist_order_handler(ctl_table *table, int write, | |||
3250 | static DEFINE_MUTEX(zl_order_mutex); | 3253 | static DEFINE_MUTEX(zl_order_mutex); |
3251 | 3254 | ||
3252 | mutex_lock(&zl_order_mutex); | 3255 | mutex_lock(&zl_order_mutex); |
3253 | if (write) | 3256 | if (write) { |
3254 | strcpy(saved_string, (char*)table->data); | 3257 | if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { |
3258 | ret = -EINVAL; | ||
3259 | goto out; | ||
3260 | } | ||
3261 | strcpy(saved_string, (char *)table->data); | ||
3262 | } | ||
3255 | ret = proc_dostring(table, write, buffer, length, ppos); | 3263 | ret = proc_dostring(table, write, buffer, length, ppos); |
3256 | if (ret) | 3264 | if (ret) |
3257 | goto out; | 3265 | goto out; |
3258 | if (write) { | 3266 | if (write) { |
3259 | int oldval = user_zonelist_order; | 3267 | int oldval = user_zonelist_order; |
3260 | if (__parse_numa_zonelist_order((char*)table->data)) { | 3268 | |
3269 | ret = __parse_numa_zonelist_order((char *)table->data); | ||
3270 | if (ret) { | ||
3261 | /* | 3271 | /* |
3262 | * bogus value. restore saved string | 3272 | * bogus value. restore saved string |
3263 | */ | 3273 | */ |
3264 | strncpy((char*)table->data, saved_string, | 3274 | strncpy((char *)table->data, saved_string, |
3265 | NUMA_ZONELIST_ORDER_LEN); | 3275 | NUMA_ZONELIST_ORDER_LEN); |
3266 | user_zonelist_order = oldval; | 3276 | user_zonelist_order = oldval; |
3267 | } else if (oldval != user_zonelist_order) { | 3277 | } else if (oldval != user_zonelist_order) { |
@@ -3425,8 +3435,8 @@ static int default_zonelist_order(void) | |||
3425 | z = &NODE_DATA(nid)->node_zones[zone_type]; | 3435 | z = &NODE_DATA(nid)->node_zones[zone_type]; |
3426 | if (populated_zone(z)) { | 3436 | if (populated_zone(z)) { |
3427 | if (zone_type < ZONE_NORMAL) | 3437 | if (zone_type < ZONE_NORMAL) |
3428 | low_kmem_size += z->present_pages; | 3438 | low_kmem_size += z->managed_pages; |
3429 | total_size += z->present_pages; | 3439 | total_size += z->managed_pages; |
3430 | } else if (zone_type == ZONE_NORMAL) { | 3440 | } else if (zone_type == ZONE_NORMAL) { |
3431 | /* | 3441 | /* |
3432 | * If any node has only lowmem, then node order | 3442 | * If any node has only lowmem, then node order |
@@ -3705,12 +3715,12 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) | |||
3705 | mminit_verify_zonelist(); | 3715 | mminit_verify_zonelist(); |
3706 | cpuset_init_current_mems_allowed(); | 3716 | cpuset_init_current_mems_allowed(); |
3707 | } else { | 3717 | } else { |
3708 | /* we have to stop all cpus to guarantee there is no user | ||
3709 | of zonelist */ | ||
3710 | #ifdef CONFIG_MEMORY_HOTPLUG | 3718 | #ifdef CONFIG_MEMORY_HOTPLUG |
3711 | if (zone) | 3719 | if (zone) |
3712 | setup_zone_pageset(zone); | 3720 | setup_zone_pageset(zone); |
3713 | #endif | 3721 | #endif |
3722 | /* we have to stop all cpus to guarantee there is no user | ||
3723 | of zonelist */ | ||
3714 | stop_machine(__build_all_zonelists, pgdat, NULL); | 3724 | stop_machine(__build_all_zonelists, pgdat, NULL); |
3715 | /* cpuset refresh routine should be here */ | 3725 | /* cpuset refresh routine should be here */ |
3716 | } | 3726 | } |
@@ -4032,7 +4042,40 @@ static int __meminit zone_batchsize(struct zone *zone) | |||
4032 | #endif | 4042 | #endif |
4033 | } | 4043 | } |
4034 | 4044 | ||
4035 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 4045 | /* |
4046 | * pcp->high and pcp->batch values are related and dependent on one another: | ||
4047 | * ->batch must never be higher then ->high. | ||
4048 | * The following function updates them in a safe manner without read side | ||
4049 | * locking. | ||
4050 | * | ||
4051 | * Any new users of pcp->batch and pcp->high should ensure they can cope with | ||
4052 | * those fields changing asynchronously (acording the the above rule). | ||
4053 | * | ||
4054 | * mutex_is_locked(&pcp_batch_high_lock) required when calling this function | ||
4055 | * outside of boot time (or some other assurance that no concurrent updaters | ||
4056 | * exist). | ||
4057 | */ | ||
4058 | static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, | ||
4059 | unsigned long batch) | ||
4060 | { | ||
4061 | /* start with a fail safe value for batch */ | ||
4062 | pcp->batch = 1; | ||
4063 | smp_wmb(); | ||
4064 | |||
4065 | /* Update high, then batch, in order */ | ||
4066 | pcp->high = high; | ||
4067 | smp_wmb(); | ||
4068 | |||
4069 | pcp->batch = batch; | ||
4070 | } | ||
4071 | |||
4072 | /* a companion to pageset_set_high() */ | ||
4073 | static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) | ||
4074 | { | ||
4075 | pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); | ||
4076 | } | ||
4077 | |||
4078 | static void pageset_init(struct per_cpu_pageset *p) | ||
4036 | { | 4079 | { |
4037 | struct per_cpu_pages *pcp; | 4080 | struct per_cpu_pages *pcp; |
4038 | int migratetype; | 4081 | int migratetype; |
@@ -4041,45 +4084,55 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | |||
4041 | 4084 | ||
4042 | pcp = &p->pcp; | 4085 | pcp = &p->pcp; |
4043 | pcp->count = 0; | 4086 | pcp->count = 0; |
4044 | pcp->high = 6 * batch; | ||
4045 | pcp->batch = max(1UL, 1 * batch); | ||
4046 | for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) | 4087 | for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) |
4047 | INIT_LIST_HEAD(&pcp->lists[migratetype]); | 4088 | INIT_LIST_HEAD(&pcp->lists[migratetype]); |
4048 | } | 4089 | } |
4049 | 4090 | ||
4091 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | ||
4092 | { | ||
4093 | pageset_init(p); | ||
4094 | pageset_set_batch(p, batch); | ||
4095 | } | ||
4096 | |||
4050 | /* | 4097 | /* |
4051 | * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist | 4098 | * pageset_set_high() sets the high water mark for hot per_cpu_pagelist |
4052 | * to the value high for the pageset p. | 4099 | * to the value high for the pageset p. |
4053 | */ | 4100 | */ |
4054 | 4101 | static void pageset_set_high(struct per_cpu_pageset *p, | |
4055 | static void setup_pagelist_highmark(struct per_cpu_pageset *p, | ||
4056 | unsigned long high) | 4102 | unsigned long high) |
4057 | { | 4103 | { |
4058 | struct per_cpu_pages *pcp; | 4104 | unsigned long batch = max(1UL, high / 4); |
4105 | if ((high / 4) > (PAGE_SHIFT * 8)) | ||
4106 | batch = PAGE_SHIFT * 8; | ||
4059 | 4107 | ||
4060 | pcp = &p->pcp; | 4108 | pageset_update(&p->pcp, high, batch); |
4061 | pcp->high = high; | ||
4062 | pcp->batch = max(1UL, high/4); | ||
4063 | if ((high/4) > (PAGE_SHIFT * 8)) | ||
4064 | pcp->batch = PAGE_SHIFT * 8; | ||
4065 | } | 4109 | } |
4066 | 4110 | ||
4067 | static void __meminit setup_zone_pageset(struct zone *zone) | 4111 | static void __meminit pageset_set_high_and_batch(struct zone *zone, |
4112 | struct per_cpu_pageset *pcp) | ||
4068 | { | 4113 | { |
4069 | int cpu; | 4114 | if (percpu_pagelist_fraction) |
4070 | 4115 | pageset_set_high(pcp, | |
4071 | zone->pageset = alloc_percpu(struct per_cpu_pageset); | 4116 | (zone->managed_pages / |
4117 | percpu_pagelist_fraction)); | ||
4118 | else | ||
4119 | pageset_set_batch(pcp, zone_batchsize(zone)); | ||
4120 | } | ||
4072 | 4121 | ||
4073 | for_each_possible_cpu(cpu) { | 4122 | static void __meminit zone_pageset_init(struct zone *zone, int cpu) |
4074 | struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); | 4123 | { |
4124 | struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); | ||
4075 | 4125 | ||
4076 | setup_pageset(pcp, zone_batchsize(zone)); | 4126 | pageset_init(pcp); |
4127 | pageset_set_high_and_batch(zone, pcp); | ||
4128 | } | ||
4077 | 4129 | ||
4078 | if (percpu_pagelist_fraction) | 4130 | static void __meminit setup_zone_pageset(struct zone *zone) |
4079 | setup_pagelist_highmark(pcp, | 4131 | { |
4080 | (zone->managed_pages / | 4132 | int cpu; |
4081 | percpu_pagelist_fraction)); | 4133 | zone->pageset = alloc_percpu(struct per_cpu_pageset); |
4082 | } | 4134 | for_each_possible_cpu(cpu) |
4135 | zone_pageset_init(zone, cpu); | ||
4083 | } | 4136 | } |
4084 | 4137 | ||
4085 | /* | 4138 | /* |
@@ -5150,35 +5203,101 @@ early_param("movablecore", cmdline_parse_movablecore); | |||
5150 | 5203 | ||
5151 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 5204 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
5152 | 5205 | ||
5153 | unsigned long free_reserved_area(unsigned long start, unsigned long end, | 5206 | void adjust_managed_page_count(struct page *page, long count) |
5154 | int poison, char *s) | 5207 | { |
5208 | spin_lock(&managed_page_count_lock); | ||
5209 | page_zone(page)->managed_pages += count; | ||
5210 | totalram_pages += count; | ||
5211 | #ifdef CONFIG_HIGHMEM | ||
5212 | if (PageHighMem(page)) | ||
5213 | totalhigh_pages += count; | ||
5214 | #endif | ||
5215 | spin_unlock(&managed_page_count_lock); | ||
5216 | } | ||
5217 | EXPORT_SYMBOL(adjust_managed_page_count); | ||
5218 | |||
5219 | unsigned long free_reserved_area(void *start, void *end, int poison, char *s) | ||
5155 | { | 5220 | { |
5156 | unsigned long pages, pos; | 5221 | void *pos; |
5222 | unsigned long pages = 0; | ||
5157 | 5223 | ||
5158 | pos = start = PAGE_ALIGN(start); | 5224 | start = (void *)PAGE_ALIGN((unsigned long)start); |
5159 | end &= PAGE_MASK; | 5225 | end = (void *)((unsigned long)end & PAGE_MASK); |
5160 | for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) { | 5226 | for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { |
5161 | if (poison) | 5227 | if ((unsigned int)poison <= 0xFF) |
5162 | memset((void *)pos, poison, PAGE_SIZE); | 5228 | memset(pos, poison, PAGE_SIZE); |
5163 | free_reserved_page(virt_to_page((void *)pos)); | 5229 | free_reserved_page(virt_to_page(pos)); |
5164 | } | 5230 | } |
5165 | 5231 | ||
5166 | if (pages && s) | 5232 | if (pages && s) |
5167 | pr_info("Freeing %s memory: %ldK (%lx - %lx)\n", | 5233 | pr_info("Freeing %s memory: %ldK (%p - %p)\n", |
5168 | s, pages << (PAGE_SHIFT - 10), start, end); | 5234 | s, pages << (PAGE_SHIFT - 10), start, end); |
5169 | 5235 | ||
5170 | return pages; | 5236 | return pages; |
5171 | } | 5237 | } |
5238 | EXPORT_SYMBOL(free_reserved_area); | ||
5172 | 5239 | ||
5173 | #ifdef CONFIG_HIGHMEM | 5240 | #ifdef CONFIG_HIGHMEM |
5174 | void free_highmem_page(struct page *page) | 5241 | void free_highmem_page(struct page *page) |
5175 | { | 5242 | { |
5176 | __free_reserved_page(page); | 5243 | __free_reserved_page(page); |
5177 | totalram_pages++; | 5244 | totalram_pages++; |
5245 | page_zone(page)->managed_pages++; | ||
5178 | totalhigh_pages++; | 5246 | totalhigh_pages++; |
5179 | } | 5247 | } |
5180 | #endif | 5248 | #endif |
5181 | 5249 | ||
5250 | |||
5251 | void __init mem_init_print_info(const char *str) | ||
5252 | { | ||
5253 | unsigned long physpages, codesize, datasize, rosize, bss_size; | ||
5254 | unsigned long init_code_size, init_data_size; | ||
5255 | |||
5256 | physpages = get_num_physpages(); | ||
5257 | codesize = _etext - _stext; | ||
5258 | datasize = _edata - _sdata; | ||
5259 | rosize = __end_rodata - __start_rodata; | ||
5260 | bss_size = __bss_stop - __bss_start; | ||
5261 | init_data_size = __init_end - __init_begin; | ||
5262 | init_code_size = _einittext - _sinittext; | ||
5263 | |||
5264 | /* | ||
5265 | * Detect special cases and adjust section sizes accordingly: | ||
5266 | * 1) .init.* may be embedded into .data sections | ||
5267 | * 2) .init.text.* may be out of [__init_begin, __init_end], | ||
5268 | * please refer to arch/tile/kernel/vmlinux.lds.S. | ||
5269 | * 3) .rodata.* may be embedded into .text or .data sections. | ||
5270 | */ | ||
5271 | #define adj_init_size(start, end, size, pos, adj) \ | ||
5272 | if (start <= pos && pos < end && size > adj) \ | ||
5273 | size -= adj; | ||
5274 | |||
5275 | adj_init_size(__init_begin, __init_end, init_data_size, | ||
5276 | _sinittext, init_code_size); | ||
5277 | adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); | ||
5278 | adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); | ||
5279 | adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); | ||
5280 | adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); | ||
5281 | |||
5282 | #undef adj_init_size | ||
5283 | |||
5284 | printk("Memory: %luK/%luK available " | ||
5285 | "(%luK kernel code, %luK rwdata, %luK rodata, " | ||
5286 | "%luK init, %luK bss, %luK reserved" | ||
5287 | #ifdef CONFIG_HIGHMEM | ||
5288 | ", %luK highmem" | ||
5289 | #endif | ||
5290 | "%s%s)\n", | ||
5291 | nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), | ||
5292 | codesize >> 10, datasize >> 10, rosize >> 10, | ||
5293 | (init_data_size + init_code_size) >> 10, bss_size >> 10, | ||
5294 | (physpages - totalram_pages) << (PAGE_SHIFT-10), | ||
5295 | #ifdef CONFIG_HIGHMEM | ||
5296 | totalhigh_pages << (PAGE_SHIFT-10), | ||
5297 | #endif | ||
5298 | str ? ", " : "", str ? str : ""); | ||
5299 | } | ||
5300 | |||
5182 | /** | 5301 | /** |
5183 | * set_dma_reserve - set the specified number of pages reserved in the first zone | 5302 | * set_dma_reserve - set the specified number of pages reserved in the first zone |
5184 | * @new_dma_reserve: The number of pages to mark reserved | 5303 | * @new_dma_reserve: The number of pages to mark reserved |
@@ -5540,7 +5659,6 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, | |||
5540 | * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist | 5659 | * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist |
5541 | * can have before it gets flushed back to buddy allocator. | 5660 | * can have before it gets flushed back to buddy allocator. |
5542 | */ | 5661 | */ |
5543 | |||
5544 | int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | 5662 | int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, |
5545 | void __user *buffer, size_t *length, loff_t *ppos) | 5663 | void __user *buffer, size_t *length, loff_t *ppos) |
5546 | { | 5664 | { |
@@ -5551,14 +5669,16 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | |||
5551 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); | 5669 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
5552 | if (!write || (ret < 0)) | 5670 | if (!write || (ret < 0)) |
5553 | return ret; | 5671 | return ret; |
5672 | |||
5673 | mutex_lock(&pcp_batch_high_lock); | ||
5554 | for_each_populated_zone(zone) { | 5674 | for_each_populated_zone(zone) { |
5555 | for_each_possible_cpu(cpu) { | 5675 | unsigned long high; |
5556 | unsigned long high; | 5676 | high = zone->managed_pages / percpu_pagelist_fraction; |
5557 | high = zone->managed_pages / percpu_pagelist_fraction; | 5677 | for_each_possible_cpu(cpu) |
5558 | setup_pagelist_highmark( | 5678 | pageset_set_high(per_cpu_ptr(zone->pageset, cpu), |
5559 | per_cpu_ptr(zone->pageset, cpu), high); | 5679 | high); |
5560 | } | ||
5561 | } | 5680 | } |
5681 | mutex_unlock(&pcp_batch_high_lock); | ||
5562 | return 0; | 5682 | return 0; |
5563 | } | 5683 | } |
5564 | 5684 | ||
@@ -6047,32 +6167,18 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages) | |||
6047 | #endif | 6167 | #endif |
6048 | 6168 | ||
6049 | #ifdef CONFIG_MEMORY_HOTPLUG | 6169 | #ifdef CONFIG_MEMORY_HOTPLUG |
6050 | static int __meminit __zone_pcp_update(void *data) | 6170 | /* |
6051 | { | 6171 | * The zone indicated has a new number of managed_pages; batch sizes and percpu |
6052 | struct zone *zone = data; | 6172 | * page high values need to be recalulated. |
6053 | int cpu; | 6173 | */ |
6054 | unsigned long batch = zone_batchsize(zone), flags; | ||
6055 | |||
6056 | for_each_possible_cpu(cpu) { | ||
6057 | struct per_cpu_pageset *pset; | ||
6058 | struct per_cpu_pages *pcp; | ||
6059 | |||
6060 | pset = per_cpu_ptr(zone->pageset, cpu); | ||
6061 | pcp = &pset->pcp; | ||
6062 | |||
6063 | local_irq_save(flags); | ||
6064 | if (pcp->count > 0) | ||
6065 | free_pcppages_bulk(zone, pcp->count, pcp); | ||
6066 | drain_zonestat(zone, pset); | ||
6067 | setup_pageset(pset, batch); | ||
6068 | local_irq_restore(flags); | ||
6069 | } | ||
6070 | return 0; | ||
6071 | } | ||
6072 | |||
6073 | void __meminit zone_pcp_update(struct zone *zone) | 6174 | void __meminit zone_pcp_update(struct zone *zone) |
6074 | { | 6175 | { |
6075 | stop_machine(__zone_pcp_update, zone, NULL); | 6176 | unsigned cpu; |
6177 | mutex_lock(&pcp_batch_high_lock); | ||
6178 | for_each_possible_cpu(cpu) | ||
6179 | pageset_set_high_and_batch(zone, | ||
6180 | per_cpu_ptr(zone->pageset, cpu)); | ||
6181 | mutex_unlock(&pcp_batch_high_lock); | ||
6076 | } | 6182 | } |
6077 | #endif | 6183 | #endif |
6078 | 6184 | ||
@@ -6142,6 +6248,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |||
6142 | list_del(&page->lru); | 6248 | list_del(&page->lru); |
6143 | rmv_page_order(page); | 6249 | rmv_page_order(page); |
6144 | zone->free_area[order].nr_free--; | 6250 | zone->free_area[order].nr_free--; |
6251 | #ifdef CONFIG_HIGHMEM | ||
6252 | if (PageHighMem(page)) | ||
6253 | totalhigh_pages -= 1 << order; | ||
6254 | #endif | ||
6145 | for (i = 0; i < (1 << order); i++) | 6255 | for (i = 0; i < (1 << order); i++) |
6146 | SetPageReserved((page+i)); | 6256 | SetPageReserved((page+i)); |
6147 | pfn += (1 << order); | 6257 | pfn += (1 << order); |
diff --git a/mm/page_io.c b/mm/page_io.c index a8a3ef45fed7..ba05b64e5d8d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/writeback.h> | 21 | #include <linux/writeback.h> |
22 | #include <linux/frontswap.h> | 22 | #include <linux/frontswap.h> |
23 | #include <linux/aio.h> | 23 | #include <linux/aio.h> |
24 | #include <linux/blkdev.h> | ||
24 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
25 | 26 | ||
26 | static struct bio *get_swap_bio(gfp_t gfp_flags, | 27 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
@@ -80,9 +81,54 @@ void end_swap_bio_read(struct bio *bio, int err) | |||
80 | imajor(bio->bi_bdev->bd_inode), | 81 | imajor(bio->bi_bdev->bd_inode), |
81 | iminor(bio->bi_bdev->bd_inode), | 82 | iminor(bio->bi_bdev->bd_inode), |
82 | (unsigned long long)bio->bi_sector); | 83 | (unsigned long long)bio->bi_sector); |
83 | } else { | 84 | goto out; |
84 | SetPageUptodate(page); | ||
85 | } | 85 | } |
86 | |||
87 | SetPageUptodate(page); | ||
88 | |||
89 | /* | ||
90 | * There is no guarantee that the page is in swap cache - the software | ||
91 | * suspend code (at least) uses end_swap_bio_read() against a non- | ||
92 | * swapcache page. So we must check PG_swapcache before proceeding with | ||
93 | * this optimization. | ||
94 | */ | ||
95 | if (likely(PageSwapCache(page))) { | ||
96 | struct swap_info_struct *sis; | ||
97 | |||
98 | sis = page_swap_info(page); | ||
99 | if (sis->flags & SWP_BLKDEV) { | ||
100 | /* | ||
101 | * The swap subsystem performs lazy swap slot freeing, | ||
102 | * expecting that the page will be swapped out again. | ||
103 | * So we can avoid an unnecessary write if the page | ||
104 | * isn't redirtied. | ||
105 | * This is good for real swap storage because we can | ||
106 | * reduce unnecessary I/O and enhance wear-leveling | ||
107 | * if an SSD is used as the as swap device. | ||
108 | * But if in-memory swap device (eg zram) is used, | ||
109 | * this causes a duplicated copy between uncompressed | ||
110 | * data in VM-owned memory and compressed data in | ||
111 | * zram-owned memory. So let's free zram-owned memory | ||
112 | * and make the VM-owned decompressed page *dirty*, | ||
113 | * so the page should be swapped out somewhere again if | ||
114 | * we again wish to reclaim it. | ||
115 | */ | ||
116 | struct gendisk *disk = sis->bdev->bd_disk; | ||
117 | if (disk->fops->swap_slot_free_notify) { | ||
118 | swp_entry_t entry; | ||
119 | unsigned long offset; | ||
120 | |||
121 | entry.val = page_private(page); | ||
122 | offset = swp_offset(entry); | ||
123 | |||
124 | SetPageDirty(page); | ||
125 | disk->fops->swap_slot_free_notify(sis->bdev, | ||
126 | offset); | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | |||
131 | out: | ||
86 | unlock_page(page); | 132 | unlock_page(page); |
87 | bio_put(bio); | 133 | bio_put(bio); |
88 | } | 134 | } |
@@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page, | |||
1093 | else | 1093 | else |
1094 | __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); | 1094 | __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); |
1095 | __page_set_anon_rmap(page, vma, address, 1); | 1095 | __page_set_anon_rmap(page, vma, address, 1); |
1096 | if (!mlocked_vma_newpage(vma, page)) | 1096 | if (!mlocked_vma_newpage(vma, page)) { |
1097 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); | 1097 | SetPageActive(page); |
1098 | else | 1098 | lru_cache_add(page); |
1099 | } else | ||
1099 | add_page_to_unevictable_list(page); | 1100 | add_page_to_unevictable_list(page); |
1100 | } | 1101 | } |
1101 | 1102 | ||
diff --git a/mm/sparse.c b/mm/sparse.c index 1c91f0d3f6ab..3194ec414728 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -481,6 +481,9 @@ void __init sparse_init(void) | |||
481 | struct page **map_map; | 481 | struct page **map_map; |
482 | #endif | 482 | #endif |
483 | 483 | ||
484 | /* see include/linux/mmzone.h 'struct mem_section' definition */ | ||
485 | BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); | ||
486 | |||
484 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | 487 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
485 | set_pageblock_order(); | 488 | set_pageblock_order(); |
486 | 489 | ||
@@ -34,10 +34,13 @@ | |||
34 | 34 | ||
35 | #include "internal.h" | 35 | #include "internal.h" |
36 | 36 | ||
37 | #define CREATE_TRACE_POINTS | ||
38 | #include <trace/events/pagemap.h> | ||
39 | |||
37 | /* How many pages do we try to swap or page in/out together? */ | 40 | /* How many pages do we try to swap or page in/out together? */ |
38 | int page_cluster; | 41 | int page_cluster; |
39 | 42 | ||
40 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); | 43 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); |
41 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); | 44 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
42 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); | 45 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
43 | 46 | ||
@@ -384,6 +387,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, | |||
384 | SetPageActive(page); | 387 | SetPageActive(page); |
385 | lru += LRU_ACTIVE; | 388 | lru += LRU_ACTIVE; |
386 | add_page_to_lru_list(page, lruvec, lru); | 389 | add_page_to_lru_list(page, lruvec, lru); |
390 | trace_mm_lru_activate(page, page_to_pfn(page)); | ||
387 | 391 | ||
388 | __count_vm_event(PGACTIVATE); | 392 | __count_vm_event(PGACTIVATE); |
389 | update_page_reclaim_stat(lruvec, file, 1); | 393 | update_page_reclaim_stat(lruvec, file, 1); |
@@ -428,6 +432,33 @@ void activate_page(struct page *page) | |||
428 | } | 432 | } |
429 | #endif | 433 | #endif |
430 | 434 | ||
435 | static void __lru_cache_activate_page(struct page *page) | ||
436 | { | ||
437 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); | ||
438 | int i; | ||
439 | |||
440 | /* | ||
441 | * Search backwards on the optimistic assumption that the page being | ||
442 | * activated has just been added to this pagevec. Note that only | ||
443 | * the local pagevec is examined as a !PageLRU page could be in the | ||
444 | * process of being released, reclaimed, migrated or on a remote | ||
445 | * pagevec that is currently being drained. Furthermore, marking | ||
446 | * a remote pagevec's page PageActive potentially hits a race where | ||
447 | * a page is marked PageActive just after it is added to the inactive | ||
448 | * list causing accounting errors and BUG_ON checks to trigger. | ||
449 | */ | ||
450 | for (i = pagevec_count(pvec) - 1; i >= 0; i--) { | ||
451 | struct page *pagevec_page = pvec->pages[i]; | ||
452 | |||
453 | if (pagevec_page == page) { | ||
454 | SetPageActive(page); | ||
455 | break; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | put_cpu_var(lru_add_pvec); | ||
460 | } | ||
461 | |||
431 | /* | 462 | /* |
432 | * Mark a page as having seen activity. | 463 | * Mark a page as having seen activity. |
433 | * | 464 | * |
@@ -438,8 +469,18 @@ void activate_page(struct page *page) | |||
438 | void mark_page_accessed(struct page *page) | 469 | void mark_page_accessed(struct page *page) |
439 | { | 470 | { |
440 | if (!PageActive(page) && !PageUnevictable(page) && | 471 | if (!PageActive(page) && !PageUnevictable(page) && |
441 | PageReferenced(page) && PageLRU(page)) { | 472 | PageReferenced(page)) { |
442 | activate_page(page); | 473 | |
474 | /* | ||
475 | * If the page is on the LRU, queue it for activation via | ||
476 | * activate_page_pvecs. Otherwise, assume the page is on a | ||
477 | * pagevec, mark it active and it'll be moved to the active | ||
478 | * LRU on the next drain. | ||
479 | */ | ||
480 | if (PageLRU(page)) | ||
481 | activate_page(page); | ||
482 | else | ||
483 | __lru_cache_activate_page(page); | ||
443 | ClearPageReferenced(page); | 484 | ClearPageReferenced(page); |
444 | } else if (!PageReferenced(page)) { | 485 | } else if (!PageReferenced(page)) { |
445 | SetPageReferenced(page); | 486 | SetPageReferenced(page); |
@@ -448,42 +489,37 @@ void mark_page_accessed(struct page *page) | |||
448 | EXPORT_SYMBOL(mark_page_accessed); | 489 | EXPORT_SYMBOL(mark_page_accessed); |
449 | 490 | ||
450 | /* | 491 | /* |
451 | * Order of operations is important: flush the pagevec when it's already | 492 | * Queue the page for addition to the LRU via pagevec. The decision on whether |
452 | * full, not when adding the last page, to make sure that last page is | 493 | * to add the page to the [in]active [file|anon] list is deferred until the |
453 | * not added to the LRU directly when passed to this function. Because | 494 | * pagevec is drained. This gives a chance for the caller of __lru_cache_add() |
454 | * mark_page_accessed() (called after this when writing) only activates | 495 | * have the page added to the active list using mark_page_accessed(). |
455 | * pages that are on the LRU, linear writes in subpage chunks would see | ||
456 | * every PAGEVEC_SIZE page activated, which is unexpected. | ||
457 | */ | 496 | */ |
458 | void __lru_cache_add(struct page *page, enum lru_list lru) | 497 | void __lru_cache_add(struct page *page) |
459 | { | 498 | { |
460 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | 499 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
461 | 500 | ||
462 | page_cache_get(page); | 501 | page_cache_get(page); |
463 | if (!pagevec_space(pvec)) | 502 | if (!pagevec_space(pvec)) |
464 | __pagevec_lru_add(pvec, lru); | 503 | __pagevec_lru_add(pvec); |
465 | pagevec_add(pvec, page); | 504 | pagevec_add(pvec, page); |
466 | put_cpu_var(lru_add_pvecs); | 505 | put_cpu_var(lru_add_pvec); |
467 | } | 506 | } |
468 | EXPORT_SYMBOL(__lru_cache_add); | 507 | EXPORT_SYMBOL(__lru_cache_add); |
469 | 508 | ||
470 | /** | 509 | /** |
471 | * lru_cache_add_lru - add a page to a page list | 510 | * lru_cache_add - add a page to a page list |
472 | * @page: the page to be added to the LRU. | 511 | * @page: the page to be added to the LRU. |
473 | * @lru: the LRU list to which the page is added. | ||
474 | */ | 512 | */ |
475 | void lru_cache_add_lru(struct page *page, enum lru_list lru) | 513 | void lru_cache_add(struct page *page) |
476 | { | 514 | { |
477 | if (PageActive(page)) { | 515 | if (PageActive(page)) { |
478 | VM_BUG_ON(PageUnevictable(page)); | 516 | VM_BUG_ON(PageUnevictable(page)); |
479 | ClearPageActive(page); | ||
480 | } else if (PageUnevictable(page)) { | 517 | } else if (PageUnevictable(page)) { |
481 | VM_BUG_ON(PageActive(page)); | 518 | VM_BUG_ON(PageActive(page)); |
482 | ClearPageUnevictable(page); | ||
483 | } | 519 | } |
484 | 520 | ||
485 | VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); | 521 | VM_BUG_ON(PageLRU(page)); |
486 | __lru_cache_add(page, lru); | 522 | __lru_cache_add(page); |
487 | } | 523 | } |
488 | 524 | ||
489 | /** | 525 | /** |
@@ -583,15 +619,10 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, | |||
583 | */ | 619 | */ |
584 | void lru_add_drain_cpu(int cpu) | 620 | void lru_add_drain_cpu(int cpu) |
585 | { | 621 | { |
586 | struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); | 622 | struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); |
587 | struct pagevec *pvec; | ||
588 | int lru; | ||
589 | 623 | ||
590 | for_each_lru(lru) { | 624 | if (pagevec_count(pvec)) |
591 | pvec = &pvecs[lru - LRU_BASE]; | 625 | __pagevec_lru_add(pvec); |
592 | if (pagevec_count(pvec)) | ||
593 | __pagevec_lru_add(pvec, lru); | ||
594 | } | ||
595 | 626 | ||
596 | pvec = &per_cpu(lru_rotate_pvecs, cpu); | 627 | pvec = &per_cpu(lru_rotate_pvecs, cpu); |
597 | if (pagevec_count(pvec)) { | 628 | if (pagevec_count(pvec)) { |
@@ -708,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold) | |||
708 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); | 739 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
709 | } | 740 | } |
710 | 741 | ||
742 | /* Clear Active bit in case of parallel mark_page_accessed */ | ||
743 | ClearPageActive(page); | ||
744 | |||
711 | list_add(&page->lru, &pages_to_free); | 745 | list_add(&page->lru, &pages_to_free); |
712 | } | 746 | } |
713 | if (zone) | 747 | if (zone) |
@@ -795,30 +829,26 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
795 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, | 829 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
796 | void *arg) | 830 | void *arg) |
797 | { | 831 | { |
798 | enum lru_list lru = (enum lru_list)arg; | 832 | int file = page_is_file_cache(page); |
799 | int file = is_file_lru(lru); | 833 | int active = PageActive(page); |
800 | int active = is_active_lru(lru); | 834 | enum lru_list lru = page_lru(page); |
801 | 835 | ||
802 | VM_BUG_ON(PageActive(page)); | ||
803 | VM_BUG_ON(PageUnevictable(page)); | 836 | VM_BUG_ON(PageUnevictable(page)); |
804 | VM_BUG_ON(PageLRU(page)); | 837 | VM_BUG_ON(PageLRU(page)); |
805 | 838 | ||
806 | SetPageLRU(page); | 839 | SetPageLRU(page); |
807 | if (active) | ||
808 | SetPageActive(page); | ||
809 | add_page_to_lru_list(page, lruvec, lru); | 840 | add_page_to_lru_list(page, lruvec, lru); |
810 | update_page_reclaim_stat(lruvec, file, active); | 841 | update_page_reclaim_stat(lruvec, file, active); |
842 | trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page)); | ||
811 | } | 843 | } |
812 | 844 | ||
813 | /* | 845 | /* |
814 | * Add the passed pages to the LRU, then drop the caller's refcount | 846 | * Add the passed pages to the LRU, then drop the caller's refcount |
815 | * on them. Reinitialises the caller's pagevec. | 847 | * on them. Reinitialises the caller's pagevec. |
816 | */ | 848 | */ |
817 | void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | 849 | void __pagevec_lru_add(struct pagevec *pvec) |
818 | { | 850 | { |
819 | VM_BUG_ON(is_unevictable_lru(lru)); | 851 | pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); |
820 | |||
821 | pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); | ||
822 | } | 852 | } |
823 | EXPORT_SYMBOL(__pagevec_lru_add); | 853 | EXPORT_SYMBOL(__pagevec_lru_add); |
824 | 854 | ||
diff --git a/mm/swapfile.c b/mm/swapfile.c index 746af55b8455..36af6eeaa67e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -212,7 +212,7 @@ static unsigned long scan_swap_map(struct swap_info_struct *si, | |||
212 | si->cluster_nr = SWAPFILE_CLUSTER - 1; | 212 | si->cluster_nr = SWAPFILE_CLUSTER - 1; |
213 | goto checks; | 213 | goto checks; |
214 | } | 214 | } |
215 | if (si->flags & SWP_DISCARDABLE) { | 215 | if (si->flags & SWP_PAGE_DISCARD) { |
216 | /* | 216 | /* |
217 | * Start range check on racing allocations, in case | 217 | * Start range check on racing allocations, in case |
218 | * they overlap the cluster we eventually decide on | 218 | * they overlap the cluster we eventually decide on |
@@ -322,7 +322,7 @@ checks: | |||
322 | 322 | ||
323 | if (si->lowest_alloc) { | 323 | if (si->lowest_alloc) { |
324 | /* | 324 | /* |
325 | * Only set when SWP_DISCARDABLE, and there's a scan | 325 | * Only set when SWP_PAGE_DISCARD, and there's a scan |
326 | * for a free cluster in progress or just completed. | 326 | * for a free cluster in progress or just completed. |
327 | */ | 327 | */ |
328 | if (found_free_cluster) { | 328 | if (found_free_cluster) { |
@@ -2016,6 +2016,20 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, | |||
2016 | return nr_extents; | 2016 | return nr_extents; |
2017 | } | 2017 | } |
2018 | 2018 | ||
2019 | /* | ||
2020 | * Helper to sys_swapon determining if a given swap | ||
2021 | * backing device queue supports DISCARD operations. | ||
2022 | */ | ||
2023 | static bool swap_discardable(struct swap_info_struct *si) | ||
2024 | { | ||
2025 | struct request_queue *q = bdev_get_queue(si->bdev); | ||
2026 | |||
2027 | if (!q || !blk_queue_discard(q)) | ||
2028 | return false; | ||
2029 | |||
2030 | return true; | ||
2031 | } | ||
2032 | |||
2019 | SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | 2033 | SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) |
2020 | { | 2034 | { |
2021 | struct swap_info_struct *p; | 2035 | struct swap_info_struct *p; |
@@ -2123,8 +2137,37 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2123 | p->flags |= SWP_SOLIDSTATE; | 2137 | p->flags |= SWP_SOLIDSTATE; |
2124 | p->cluster_next = 1 + (prandom_u32() % p->highest_bit); | 2138 | p->cluster_next = 1 + (prandom_u32() % p->highest_bit); |
2125 | } | 2139 | } |
2126 | if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0) | 2140 | |
2127 | p->flags |= SWP_DISCARDABLE; | 2141 | if ((swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { |
2142 | /* | ||
2143 | * When discard is enabled for swap with no particular | ||
2144 | * policy flagged, we set all swap discard flags here in | ||
2145 | * order to sustain backward compatibility with older | ||
2146 | * swapon(8) releases. | ||
2147 | */ | ||
2148 | p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | | ||
2149 | SWP_PAGE_DISCARD); | ||
2150 | |||
2151 | /* | ||
2152 | * By flagging sys_swapon, a sysadmin can tell us to | ||
2153 | * either do single-time area discards only, or to just | ||
2154 | * perform discards for released swap page-clusters. | ||
2155 | * Now it's time to adjust the p->flags accordingly. | ||
2156 | */ | ||
2157 | if (swap_flags & SWAP_FLAG_DISCARD_ONCE) | ||
2158 | p->flags &= ~SWP_PAGE_DISCARD; | ||
2159 | else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) | ||
2160 | p->flags &= ~SWP_AREA_DISCARD; | ||
2161 | |||
2162 | /* issue a swapon-time discard if it's still required */ | ||
2163 | if (p->flags & SWP_AREA_DISCARD) { | ||
2164 | int err = discard_swap(p); | ||
2165 | if (unlikely(err)) | ||
2166 | printk(KERN_ERR | ||
2167 | "swapon: discard_swap(%p): %d\n", | ||
2168 | p, err); | ||
2169 | } | ||
2170 | } | ||
2128 | } | 2171 | } |
2129 | 2172 | ||
2130 | mutex_lock(&swapon_mutex); | 2173 | mutex_lock(&swapon_mutex); |
@@ -2135,11 +2178,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2135 | enable_swap_info(p, prio, swap_map, frontswap_map); | 2178 | enable_swap_info(p, prio, swap_map, frontswap_map); |
2136 | 2179 | ||
2137 | printk(KERN_INFO "Adding %uk swap on %s. " | 2180 | printk(KERN_INFO "Adding %uk swap on %s. " |
2138 | "Priority:%d extents:%d across:%lluk %s%s%s\n", | 2181 | "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", |
2139 | p->pages<<(PAGE_SHIFT-10), name->name, p->prio, | 2182 | p->pages<<(PAGE_SHIFT-10), name->name, p->prio, |
2140 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), | 2183 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), |
2141 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", | 2184 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", |
2142 | (p->flags & SWP_DISCARDABLE) ? "D" : "", | 2185 | (p->flags & SWP_DISCARDABLE) ? "D" : "", |
2186 | (p->flags & SWP_AREA_DISCARD) ? "s" : "", | ||
2187 | (p->flags & SWP_PAGE_DISCARD) ? "c" : "", | ||
2143 | (frontswap_map) ? "FS" : ""); | 2188 | (frontswap_map) ? "FS" : ""); |
2144 | 2189 | ||
2145 | mutex_unlock(&swapon_mutex); | 2190 | mutex_unlock(&swapon_mutex); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d365724feb05..91a10472a39a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -292,7 +292,7 @@ static struct vmap_area *__find_vmap_area(unsigned long addr) | |||
292 | va = rb_entry(n, struct vmap_area, rb_node); | 292 | va = rb_entry(n, struct vmap_area, rb_node); |
293 | if (addr < va->va_start) | 293 | if (addr < va->va_start) |
294 | n = n->rb_left; | 294 | n = n->rb_left; |
295 | else if (addr > va->va_start) | 295 | else if (addr >= va->va_end) |
296 | n = n->rb_right; | 296 | n = n->rb_right; |
297 | else | 297 | else |
298 | return va; | 298 | return va; |
@@ -1322,13 +1322,6 @@ static void clear_vm_unlist(struct vm_struct *vm) | |||
1322 | vm->flags &= ~VM_UNLIST; | 1322 | vm->flags &= ~VM_UNLIST; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | ||
1326 | unsigned long flags, const void *caller) | ||
1327 | { | ||
1328 | setup_vmalloc_vm(vm, va, flags, caller); | ||
1329 | clear_vm_unlist(vm); | ||
1330 | } | ||
1331 | |||
1332 | static struct vm_struct *__get_vm_area_node(unsigned long size, | 1325 | static struct vm_struct *__get_vm_area_node(unsigned long size, |
1333 | unsigned long align, unsigned long flags, unsigned long start, | 1326 | unsigned long align, unsigned long flags, unsigned long start, |
1334 | unsigned long end, int node, gfp_t gfp_mask, const void *caller) | 1327 | unsigned long end, int node, gfp_t gfp_mask, const void *caller) |
@@ -1337,16 +1330,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, | |||
1337 | struct vm_struct *area; | 1330 | struct vm_struct *area; |
1338 | 1331 | ||
1339 | BUG_ON(in_interrupt()); | 1332 | BUG_ON(in_interrupt()); |
1340 | if (flags & VM_IOREMAP) { | 1333 | if (flags & VM_IOREMAP) |
1341 | int bit = fls(size); | 1334 | align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); |
1342 | |||
1343 | if (bit > IOREMAP_MAX_ORDER) | ||
1344 | bit = IOREMAP_MAX_ORDER; | ||
1345 | else if (bit < PAGE_SHIFT) | ||
1346 | bit = PAGE_SHIFT; | ||
1347 | |||
1348 | align = 1ul << bit; | ||
1349 | } | ||
1350 | 1335 | ||
1351 | size = PAGE_ALIGN(size); | 1336 | size = PAGE_ALIGN(size); |
1352 | if (unlikely(!size)) | 1337 | if (unlikely(!size)) |
@@ -1367,16 +1352,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, | |||
1367 | return NULL; | 1352 | return NULL; |
1368 | } | 1353 | } |
1369 | 1354 | ||
1370 | /* | 1355 | setup_vmalloc_vm(area, va, flags, caller); |
1371 | * When this function is called from __vmalloc_node_range, | ||
1372 | * we add VM_UNLIST flag to avoid accessing uninitialized | ||
1373 | * members of vm_struct such as pages and nr_pages fields. | ||
1374 | * They will be set later. | ||
1375 | */ | ||
1376 | if (flags & VM_UNLIST) | ||
1377 | setup_vmalloc_vm(area, va, flags, caller); | ||
1378 | else | ||
1379 | insert_vmalloc_vm(area, va, flags, caller); | ||
1380 | 1356 | ||
1381 | return area; | 1357 | return area; |
1382 | } | 1358 | } |
@@ -1476,10 +1452,9 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
1476 | if (!addr) | 1452 | if (!addr) |
1477 | return; | 1453 | return; |
1478 | 1454 | ||
1479 | if ((PAGE_SIZE-1) & (unsigned long)addr) { | 1455 | if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", |
1480 | WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); | 1456 | addr)); |
1481 | return; | 1457 | return; |
1482 | } | ||
1483 | 1458 | ||
1484 | area = remove_vm_area(addr); | 1459 | area = remove_vm_area(addr); |
1485 | if (unlikely(!area)) { | 1460 | if (unlikely(!area)) { |
@@ -2148,42 +2123,43 @@ finished: | |||
2148 | } | 2123 | } |
2149 | 2124 | ||
2150 | /** | 2125 | /** |
2151 | * remap_vmalloc_range - map vmalloc pages to userspace | 2126 | * remap_vmalloc_range_partial - map vmalloc pages to userspace |
2152 | * @vma: vma to cover (map full range of vma) | 2127 | * @vma: vma to cover |
2153 | * @addr: vmalloc memory | 2128 | * @uaddr: target user address to start at |
2154 | * @pgoff: number of pages into addr before first page to map | 2129 | * @kaddr: virtual address of vmalloc kernel memory |
2130 | * @size: size of map area | ||
2155 | * | 2131 | * |
2156 | * Returns: 0 for success, -Exxx on failure | 2132 | * Returns: 0 for success, -Exxx on failure |
2157 | * | 2133 | * |
2158 | * This function checks that addr is a valid vmalloc'ed area, and | 2134 | * This function checks that @kaddr is a valid vmalloc'ed area, |
2159 | * that it is big enough to cover the vma. Will return failure if | 2135 | * and that it is big enough to cover the range starting at |
2160 | * that criteria isn't met. | 2136 | * @uaddr in @vma. Will return failure if that criteria isn't |
2137 | * met. | ||
2161 | * | 2138 | * |
2162 | * Similar to remap_pfn_range() (see mm/memory.c) | 2139 | * Similar to remap_pfn_range() (see mm/memory.c) |
2163 | */ | 2140 | */ |
2164 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 2141 | int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, |
2165 | unsigned long pgoff) | 2142 | void *kaddr, unsigned long size) |
2166 | { | 2143 | { |
2167 | struct vm_struct *area; | 2144 | struct vm_struct *area; |
2168 | unsigned long uaddr = vma->vm_start; | ||
2169 | unsigned long usize = vma->vm_end - vma->vm_start; | ||
2170 | 2145 | ||
2171 | if ((PAGE_SIZE-1) & (unsigned long)addr) | 2146 | size = PAGE_ALIGN(size); |
2147 | |||
2148 | if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) | ||
2172 | return -EINVAL; | 2149 | return -EINVAL; |
2173 | 2150 | ||
2174 | area = find_vm_area(addr); | 2151 | area = find_vm_area(kaddr); |
2175 | if (!area) | 2152 | if (!area) |
2176 | return -EINVAL; | 2153 | return -EINVAL; |
2177 | 2154 | ||
2178 | if (!(area->flags & VM_USERMAP)) | 2155 | if (!(area->flags & VM_USERMAP)) |
2179 | return -EINVAL; | 2156 | return -EINVAL; |
2180 | 2157 | ||
2181 | if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) | 2158 | if (kaddr + size > area->addr + area->size) |
2182 | return -EINVAL; | 2159 | return -EINVAL; |
2183 | 2160 | ||
2184 | addr += pgoff << PAGE_SHIFT; | ||
2185 | do { | 2161 | do { |
2186 | struct page *page = vmalloc_to_page(addr); | 2162 | struct page *page = vmalloc_to_page(kaddr); |
2187 | int ret; | 2163 | int ret; |
2188 | 2164 | ||
2189 | ret = vm_insert_page(vma, uaddr, page); | 2165 | ret = vm_insert_page(vma, uaddr, page); |
@@ -2191,14 +2167,37 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |||
2191 | return ret; | 2167 | return ret; |
2192 | 2168 | ||
2193 | uaddr += PAGE_SIZE; | 2169 | uaddr += PAGE_SIZE; |
2194 | addr += PAGE_SIZE; | 2170 | kaddr += PAGE_SIZE; |
2195 | usize -= PAGE_SIZE; | 2171 | size -= PAGE_SIZE; |
2196 | } while (usize > 0); | 2172 | } while (size > 0); |
2197 | 2173 | ||
2198 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | 2174 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
2199 | 2175 | ||
2200 | return 0; | 2176 | return 0; |
2201 | } | 2177 | } |
2178 | EXPORT_SYMBOL(remap_vmalloc_range_partial); | ||
2179 | |||
2180 | /** | ||
2181 | * remap_vmalloc_range - map vmalloc pages to userspace | ||
2182 | * @vma: vma to cover (map full range of vma) | ||
2183 | * @addr: vmalloc memory | ||
2184 | * @pgoff: number of pages into addr before first page to map | ||
2185 | * | ||
2186 | * Returns: 0 for success, -Exxx on failure | ||
2187 | * | ||
2188 | * This function checks that addr is a valid vmalloc'ed area, and | ||
2189 | * that it is big enough to cover the vma. Will return failure if | ||
2190 | * that criteria isn't met. | ||
2191 | * | ||
2192 | * Similar to remap_pfn_range() (see mm/memory.c) | ||
2193 | */ | ||
2194 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | ||
2195 | unsigned long pgoff) | ||
2196 | { | ||
2197 | return remap_vmalloc_range_partial(vma, vma->vm_start, | ||
2198 | addr + (pgoff << PAGE_SHIFT), | ||
2199 | vma->vm_end - vma->vm_start); | ||
2200 | } | ||
2202 | EXPORT_SYMBOL(remap_vmalloc_range); | 2201 | EXPORT_SYMBOL(remap_vmalloc_range); |
2203 | 2202 | ||
2204 | /* | 2203 | /* |
@@ -2512,8 +2511,8 @@ found: | |||
2512 | 2511 | ||
2513 | /* insert all vm's */ | 2512 | /* insert all vm's */ |
2514 | for (area = 0; area < nr_vms; area++) | 2513 | for (area = 0; area < nr_vms; area++) |
2515 | insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, | 2514 | setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, |
2516 | pcpu_get_vm_areas); | 2515 | pcpu_get_vm_areas); |
2517 | 2516 | ||
2518 | kfree(vas); | 2517 | kfree(vas); |
2519 | return vms; | 2518 | return vms; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index fa6a85378ee4..99b3ac7771ad 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) | |||
546 | void putback_lru_page(struct page *page) | 546 | void putback_lru_page(struct page *page) |
547 | { | 547 | { |
548 | int lru; | 548 | int lru; |
549 | int active = !!TestClearPageActive(page); | ||
550 | int was_unevictable = PageUnevictable(page); | 549 | int was_unevictable = PageUnevictable(page); |
551 | 550 | ||
552 | VM_BUG_ON(PageLRU(page)); | 551 | VM_BUG_ON(PageLRU(page)); |
@@ -561,8 +560,8 @@ redo: | |||
561 | * unevictable page on [in]active list. | 560 | * unevictable page on [in]active list. |
562 | * We know how to handle that. | 561 | * We know how to handle that. |
563 | */ | 562 | */ |
564 | lru = active + page_lru_base_type(page); | 563 | lru = page_lru_base_type(page); |
565 | lru_cache_add_lru(page, lru); | 564 | lru_cache_add(page); |
566 | } else { | 565 | } else { |
567 | /* | 566 | /* |
568 | * Put unevictable pages directly on zone's unevictable | 567 | * Put unevictable pages directly on zone's unevictable |
@@ -669,6 +668,35 @@ static enum page_references page_check_references(struct page *page, | |||
669 | return PAGEREF_RECLAIM; | 668 | return PAGEREF_RECLAIM; |
670 | } | 669 | } |
671 | 670 | ||
671 | /* Check if a page is dirty or under writeback */ | ||
672 | static void page_check_dirty_writeback(struct page *page, | ||
673 | bool *dirty, bool *writeback) | ||
674 | { | ||
675 | struct address_space *mapping; | ||
676 | |||
677 | /* | ||
678 | * Anonymous pages are not handled by flushers and must be written | ||
679 | * from reclaim context. Do not stall reclaim based on them | ||
680 | */ | ||
681 | if (!page_is_file_cache(page)) { | ||
682 | *dirty = false; | ||
683 | *writeback = false; | ||
684 | return; | ||
685 | } | ||
686 | |||
687 | /* By default assume that the page flags are accurate */ | ||
688 | *dirty = PageDirty(page); | ||
689 | *writeback = PageWriteback(page); | ||
690 | |||
691 | /* Verify dirty/writeback state if the filesystem supports it */ | ||
692 | if (!page_has_private(page)) | ||
693 | return; | ||
694 | |||
695 | mapping = page_mapping(page); | ||
696 | if (mapping && mapping->a_ops->is_dirty_writeback) | ||
697 | mapping->a_ops->is_dirty_writeback(page, dirty, writeback); | ||
698 | } | ||
699 | |||
672 | /* | 700 | /* |
673 | * shrink_page_list() returns the number of reclaimed pages | 701 | * shrink_page_list() returns the number of reclaimed pages |
674 | */ | 702 | */ |
@@ -677,16 +705,21 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
677 | struct scan_control *sc, | 705 | struct scan_control *sc, |
678 | enum ttu_flags ttu_flags, | 706 | enum ttu_flags ttu_flags, |
679 | unsigned long *ret_nr_dirty, | 707 | unsigned long *ret_nr_dirty, |
708 | unsigned long *ret_nr_unqueued_dirty, | ||
709 | unsigned long *ret_nr_congested, | ||
680 | unsigned long *ret_nr_writeback, | 710 | unsigned long *ret_nr_writeback, |
711 | unsigned long *ret_nr_immediate, | ||
681 | bool force_reclaim) | 712 | bool force_reclaim) |
682 | { | 713 | { |
683 | LIST_HEAD(ret_pages); | 714 | LIST_HEAD(ret_pages); |
684 | LIST_HEAD(free_pages); | 715 | LIST_HEAD(free_pages); |
685 | int pgactivate = 0; | 716 | int pgactivate = 0; |
717 | unsigned long nr_unqueued_dirty = 0; | ||
686 | unsigned long nr_dirty = 0; | 718 | unsigned long nr_dirty = 0; |
687 | unsigned long nr_congested = 0; | 719 | unsigned long nr_congested = 0; |
688 | unsigned long nr_reclaimed = 0; | 720 | unsigned long nr_reclaimed = 0; |
689 | unsigned long nr_writeback = 0; | 721 | unsigned long nr_writeback = 0; |
722 | unsigned long nr_immediate = 0; | ||
690 | 723 | ||
691 | cond_resched(); | 724 | cond_resched(); |
692 | 725 | ||
@@ -696,6 +729,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
696 | struct page *page; | 729 | struct page *page; |
697 | int may_enter_fs; | 730 | int may_enter_fs; |
698 | enum page_references references = PAGEREF_RECLAIM_CLEAN; | 731 | enum page_references references = PAGEREF_RECLAIM_CLEAN; |
732 | bool dirty, writeback; | ||
699 | 733 | ||
700 | cond_resched(); | 734 | cond_resched(); |
701 | 735 | ||
@@ -723,25 +757,77 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
723 | may_enter_fs = (sc->gfp_mask & __GFP_FS) || | 757 | may_enter_fs = (sc->gfp_mask & __GFP_FS) || |
724 | (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); | 758 | (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); |
725 | 759 | ||
760 | /* | ||
761 | * The number of dirty pages determines if a zone is marked | ||
762 | * reclaim_congested which affects wait_iff_congested. kswapd | ||
763 | * will stall and start writing pages if the tail of the LRU | ||
764 | * is all dirty unqueued pages. | ||
765 | */ | ||
766 | page_check_dirty_writeback(page, &dirty, &writeback); | ||
767 | if (dirty || writeback) | ||
768 | nr_dirty++; | ||
769 | |||
770 | if (dirty && !writeback) | ||
771 | nr_unqueued_dirty++; | ||
772 | |||
773 | /* | ||
774 | * Treat this page as congested if the underlying BDI is or if | ||
775 | * pages are cycling through the LRU so quickly that the | ||
776 | * pages marked for immediate reclaim are making it to the | ||
777 | * end of the LRU a second time. | ||
778 | */ | ||
779 | mapping = page_mapping(page); | ||
780 | if ((mapping && bdi_write_congested(mapping->backing_dev_info)) || | ||
781 | (writeback && PageReclaim(page))) | ||
782 | nr_congested++; | ||
783 | |||
784 | /* | ||
785 | * If a page at the tail of the LRU is under writeback, there | ||
786 | * are three cases to consider. | ||
787 | * | ||
788 | * 1) If reclaim is encountering an excessive number of pages | ||
789 | * under writeback and this page is both under writeback and | ||
790 | * PageReclaim then it indicates that pages are being queued | ||
791 | * for IO but are being recycled through the LRU before the | ||
792 | * IO can complete. Waiting on the page itself risks an | ||
793 | * indefinite stall if it is impossible to writeback the | ||
794 | * page due to IO error or disconnected storage so instead | ||
795 | * note that the LRU is being scanned too quickly and the | ||
796 | * caller can stall after page list has been processed. | ||
797 | * | ||
798 | * 2) Global reclaim encounters a page, memcg encounters a | ||
799 | * page that is not marked for immediate reclaim or | ||
800 | * the caller does not have __GFP_IO. In this case mark | ||
801 | * the page for immediate reclaim and continue scanning. | ||
802 | * | ||
803 | * __GFP_IO is checked because a loop driver thread might | ||
804 | * enter reclaim, and deadlock if it waits on a page for | ||
805 | * which it is needed to do the write (loop masks off | ||
806 | * __GFP_IO|__GFP_FS for this reason); but more thought | ||
807 | * would probably show more reasons. | ||
808 | * | ||
809 | * Don't require __GFP_FS, since we're not going into the | ||
810 | * FS, just waiting on its writeback completion. Worryingly, | ||
811 | * ext4 gfs2 and xfs allocate pages with | ||
812 | * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing | ||
813 | * may_enter_fs here is liable to OOM on them. | ||
814 | * | ||
815 | * 3) memcg encounters a page that is not already marked | ||
816 | * PageReclaim. memcg does not have any dirty pages | ||
817 | * throttling so we could easily OOM just because too many | ||
818 | * pages are in writeback and there is nothing else to | ||
819 | * reclaim. Wait for the writeback to complete. | ||
820 | */ | ||
726 | if (PageWriteback(page)) { | 821 | if (PageWriteback(page)) { |
727 | /* | 822 | /* Case 1 above */ |
728 | * memcg doesn't have any dirty pages throttling so we | 823 | if (current_is_kswapd() && |
729 | * could easily OOM just because too many pages are in | 824 | PageReclaim(page) && |
730 | * writeback and there is nothing else to reclaim. | 825 | zone_is_reclaim_writeback(zone)) { |
731 | * | 826 | nr_immediate++; |
732 | * Check __GFP_IO, certainly because a loop driver | 827 | goto keep_locked; |
733 | * thread might enter reclaim, and deadlock if it waits | 828 | |
734 | * on a page for which it is needed to do the write | 829 | /* Case 2 above */ |
735 | * (loop masks off __GFP_IO|__GFP_FS for this reason); | 830 | } else if (global_reclaim(sc) || |
736 | * but more thought would probably show more reasons. | ||
737 | * | ||
738 | * Don't require __GFP_FS, since we're not going into | ||
739 | * the FS, just waiting on its writeback completion. | ||
740 | * Worryingly, ext4 gfs2 and xfs allocate pages with | ||
741 | * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so | ||
742 | * testing may_enter_fs here is liable to OOM on them. | ||
743 | */ | ||
744 | if (global_reclaim(sc) || | ||
745 | !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { | 831 | !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { |
746 | /* | 832 | /* |
747 | * This is slightly racy - end_page_writeback() | 833 | * This is slightly racy - end_page_writeback() |
@@ -756,9 +842,13 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
756 | */ | 842 | */ |
757 | SetPageReclaim(page); | 843 | SetPageReclaim(page); |
758 | nr_writeback++; | 844 | nr_writeback++; |
845 | |||
759 | goto keep_locked; | 846 | goto keep_locked; |
847 | |||
848 | /* Case 3 above */ | ||
849 | } else { | ||
850 | wait_on_page_writeback(page); | ||
760 | } | 851 | } |
761 | wait_on_page_writeback(page); | ||
762 | } | 852 | } |
763 | 853 | ||
764 | if (!force_reclaim) | 854 | if (!force_reclaim) |
@@ -784,9 +874,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
784 | if (!add_to_swap(page, page_list)) | 874 | if (!add_to_swap(page, page_list)) |
785 | goto activate_locked; | 875 | goto activate_locked; |
786 | may_enter_fs = 1; | 876 | may_enter_fs = 1; |
787 | } | ||
788 | 877 | ||
789 | mapping = page_mapping(page); | 878 | /* Adding to swap updated mapping */ |
879 | mapping = page_mapping(page); | ||
880 | } | ||
790 | 881 | ||
791 | /* | 882 | /* |
792 | * The page is mapped into the page tables of one or more | 883 | * The page is mapped into the page tables of one or more |
@@ -806,16 +897,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
806 | } | 897 | } |
807 | 898 | ||
808 | if (PageDirty(page)) { | 899 | if (PageDirty(page)) { |
809 | nr_dirty++; | ||
810 | |||
811 | /* | 900 | /* |
812 | * Only kswapd can writeback filesystem pages to | 901 | * Only kswapd can writeback filesystem pages to |
813 | * avoid risk of stack overflow but do not writeback | 902 | * avoid risk of stack overflow but only writeback |
814 | * unless under significant pressure. | 903 | * if many dirty pages have been encountered. |
815 | */ | 904 | */ |
816 | if (page_is_file_cache(page) && | 905 | if (page_is_file_cache(page) && |
817 | (!current_is_kswapd() || | 906 | (!current_is_kswapd() || |
818 | sc->priority >= DEF_PRIORITY - 2)) { | 907 | !zone_is_reclaim_dirty(zone))) { |
819 | /* | 908 | /* |
820 | * Immediately reclaim when written back. | 909 | * Immediately reclaim when written back. |
821 | * Similar in principal to deactivate_page() | 910 | * Similar in principal to deactivate_page() |
@@ -838,7 +927,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
838 | /* Page is dirty, try to write it out here */ | 927 | /* Page is dirty, try to write it out here */ |
839 | switch (pageout(page, mapping, sc)) { | 928 | switch (pageout(page, mapping, sc)) { |
840 | case PAGE_KEEP: | 929 | case PAGE_KEEP: |
841 | nr_congested++; | ||
842 | goto keep_locked; | 930 | goto keep_locked; |
843 | case PAGE_ACTIVATE: | 931 | case PAGE_ACTIVATE: |
844 | goto activate_locked; | 932 | goto activate_locked; |
@@ -946,22 +1034,16 @@ keep: | |||
946 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); | 1034 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); |
947 | } | 1035 | } |
948 | 1036 | ||
949 | /* | ||
950 | * Tag a zone as congested if all the dirty pages encountered were | ||
951 | * backed by a congested BDI. In this case, reclaimers should just | ||
952 | * back off and wait for congestion to clear because further reclaim | ||
953 | * will encounter the same problem | ||
954 | */ | ||
955 | if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) | ||
956 | zone_set_flag(zone, ZONE_CONGESTED); | ||
957 | |||
958 | free_hot_cold_page_list(&free_pages, 1); | 1037 | free_hot_cold_page_list(&free_pages, 1); |
959 | 1038 | ||
960 | list_splice(&ret_pages, page_list); | 1039 | list_splice(&ret_pages, page_list); |
961 | count_vm_events(PGACTIVATE, pgactivate); | 1040 | count_vm_events(PGACTIVATE, pgactivate); |
962 | mem_cgroup_uncharge_end(); | 1041 | mem_cgroup_uncharge_end(); |
963 | *ret_nr_dirty += nr_dirty; | 1042 | *ret_nr_dirty += nr_dirty; |
1043 | *ret_nr_congested += nr_congested; | ||
1044 | *ret_nr_unqueued_dirty += nr_unqueued_dirty; | ||
964 | *ret_nr_writeback += nr_writeback; | 1045 | *ret_nr_writeback += nr_writeback; |
1046 | *ret_nr_immediate += nr_immediate; | ||
965 | return nr_reclaimed; | 1047 | return nr_reclaimed; |
966 | } | 1048 | } |
967 | 1049 | ||
@@ -973,7 +1055,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |||
973 | .priority = DEF_PRIORITY, | 1055 | .priority = DEF_PRIORITY, |
974 | .may_unmap = 1, | 1056 | .may_unmap = 1, |
975 | }; | 1057 | }; |
976 | unsigned long ret, dummy1, dummy2; | 1058 | unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5; |
977 | struct page *page, *next; | 1059 | struct page *page, *next; |
978 | LIST_HEAD(clean_pages); | 1060 | LIST_HEAD(clean_pages); |
979 | 1061 | ||
@@ -985,8 +1067,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |||
985 | } | 1067 | } |
986 | 1068 | ||
987 | ret = shrink_page_list(&clean_pages, zone, &sc, | 1069 | ret = shrink_page_list(&clean_pages, zone, &sc, |
988 | TTU_UNMAP|TTU_IGNORE_ACCESS, | 1070 | TTU_UNMAP|TTU_IGNORE_ACCESS, |
989 | &dummy1, &dummy2, true); | 1071 | &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true); |
990 | list_splice(&clean_pages, page_list); | 1072 | list_splice(&clean_pages, page_list); |
991 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); | 1073 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); |
992 | return ret; | 1074 | return ret; |
@@ -1281,7 +1363,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1281 | unsigned long nr_reclaimed = 0; | 1363 | unsigned long nr_reclaimed = 0; |
1282 | unsigned long nr_taken; | 1364 | unsigned long nr_taken; |
1283 | unsigned long nr_dirty = 0; | 1365 | unsigned long nr_dirty = 0; |
1366 | unsigned long nr_congested = 0; | ||
1367 | unsigned long nr_unqueued_dirty = 0; | ||
1284 | unsigned long nr_writeback = 0; | 1368 | unsigned long nr_writeback = 0; |
1369 | unsigned long nr_immediate = 0; | ||
1285 | isolate_mode_t isolate_mode = 0; | 1370 | isolate_mode_t isolate_mode = 0; |
1286 | int file = is_file_lru(lru); | 1371 | int file = is_file_lru(lru); |
1287 | struct zone *zone = lruvec_zone(lruvec); | 1372 | struct zone *zone = lruvec_zone(lruvec); |
@@ -1323,7 +1408,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1323 | return 0; | 1408 | return 0; |
1324 | 1409 | ||
1325 | nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, | 1410 | nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, |
1326 | &nr_dirty, &nr_writeback, false); | 1411 | &nr_dirty, &nr_unqueued_dirty, &nr_congested, |
1412 | &nr_writeback, &nr_immediate, | ||
1413 | false); | ||
1327 | 1414 | ||
1328 | spin_lock_irq(&zone->lru_lock); | 1415 | spin_lock_irq(&zone->lru_lock); |
1329 | 1416 | ||
@@ -1357,7 +1444,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1357 | * same way balance_dirty_pages() manages. | 1444 | * same way balance_dirty_pages() manages. |
1358 | * | 1445 | * |
1359 | * This scales the number of dirty pages that must be under writeback | 1446 | * This scales the number of dirty pages that must be under writeback |
1360 | * before throttling depending on priority. It is a simple backoff | 1447 | * before a zone gets flagged ZONE_WRITEBACK. It is a simple backoff |
1361 | * function that has the most effect in the range DEF_PRIORITY to | 1448 | * function that has the most effect in the range DEF_PRIORITY to |
1362 | * DEF_PRIORITY-2 which is the priority reclaim is considered to be | 1449 | * DEF_PRIORITY-2 which is the priority reclaim is considered to be |
1363 | * in trouble and reclaim is considered to be in trouble. | 1450 | * in trouble and reclaim is considered to be in trouble. |
@@ -1368,9 +1455,53 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1368 | * ... | 1455 | * ... |
1369 | * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any | 1456 | * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any |
1370 | * isolated page is PageWriteback | 1457 | * isolated page is PageWriteback |
1458 | * | ||
1459 | * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number | ||
1460 | * of pages under pages flagged for immediate reclaim and stall if any | ||
1461 | * are encountered in the nr_immediate check below. | ||
1371 | */ | 1462 | */ |
1372 | if (nr_writeback && nr_writeback >= | 1463 | if (nr_writeback && nr_writeback >= |
1373 | (nr_taken >> (DEF_PRIORITY - sc->priority))) | 1464 | (nr_taken >> (DEF_PRIORITY - sc->priority))) |
1465 | zone_set_flag(zone, ZONE_WRITEBACK); | ||
1466 | |||
1467 | /* | ||
1468 | * memcg will stall in page writeback so only consider forcibly | ||
1469 | * stalling for global reclaim | ||
1470 | */ | ||
1471 | if (global_reclaim(sc)) { | ||
1472 | /* | ||
1473 | * Tag a zone as congested if all the dirty pages scanned were | ||
1474 | * backed by a congested BDI and wait_iff_congested will stall. | ||
1475 | */ | ||
1476 | if (nr_dirty && nr_dirty == nr_congested) | ||
1477 | zone_set_flag(zone, ZONE_CONGESTED); | ||
1478 | |||
1479 | /* | ||
1480 | * If dirty pages are scanned that are not queued for IO, it | ||
1481 | * implies that flushers are not keeping up. In this case, flag | ||
1482 | * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing | ||
1483 | * pages from reclaim context. It will forcibly stall in the | ||
1484 | * next check. | ||
1485 | */ | ||
1486 | if (nr_unqueued_dirty == nr_taken) | ||
1487 | zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); | ||
1488 | |||
1489 | /* | ||
1490 | * In addition, if kswapd scans pages marked marked for | ||
1491 | * immediate reclaim and under writeback (nr_immediate), it | ||
1492 | * implies that pages are cycling through the LRU faster than | ||
1493 | * they are written so also forcibly stall. | ||
1494 | */ | ||
1495 | if (nr_unqueued_dirty == nr_taken || nr_immediate) | ||
1496 | congestion_wait(BLK_RW_ASYNC, HZ/10); | ||
1497 | } | ||
1498 | |||
1499 | /* | ||
1500 | * Stall direct reclaim for IO completions if underlying BDIs or zone | ||
1501 | * is congested. Allow kswapd to continue until it starts encountering | ||
1502 | * unqueued dirty pages or cycling through the LRU too quickly. | ||
1503 | */ | ||
1504 | if (!sc->hibernation_mode && !current_is_kswapd()) | ||
1374 | wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); | 1505 | wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); |
1375 | 1506 | ||
1376 | trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, | 1507 | trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, |
@@ -1822,17 +1953,25 @@ out: | |||
1822 | static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) | 1953 | static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
1823 | { | 1954 | { |
1824 | unsigned long nr[NR_LRU_LISTS]; | 1955 | unsigned long nr[NR_LRU_LISTS]; |
1956 | unsigned long targets[NR_LRU_LISTS]; | ||
1825 | unsigned long nr_to_scan; | 1957 | unsigned long nr_to_scan; |
1826 | enum lru_list lru; | 1958 | enum lru_list lru; |
1827 | unsigned long nr_reclaimed = 0; | 1959 | unsigned long nr_reclaimed = 0; |
1828 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; | 1960 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; |
1829 | struct blk_plug plug; | 1961 | struct blk_plug plug; |
1962 | bool scan_adjusted = false; | ||
1830 | 1963 | ||
1831 | get_scan_count(lruvec, sc, nr); | 1964 | get_scan_count(lruvec, sc, nr); |
1832 | 1965 | ||
1966 | /* Record the original scan target for proportional adjustments later */ | ||
1967 | memcpy(targets, nr, sizeof(nr)); | ||
1968 | |||
1833 | blk_start_plug(&plug); | 1969 | blk_start_plug(&plug); |
1834 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || | 1970 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |
1835 | nr[LRU_INACTIVE_FILE]) { | 1971 | nr[LRU_INACTIVE_FILE]) { |
1972 | unsigned long nr_anon, nr_file, percentage; | ||
1973 | unsigned long nr_scanned; | ||
1974 | |||
1836 | for_each_evictable_lru(lru) { | 1975 | for_each_evictable_lru(lru) { |
1837 | if (nr[lru]) { | 1976 | if (nr[lru]) { |
1838 | nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); | 1977 | nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); |
@@ -1842,17 +1981,60 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) | |||
1842 | lruvec, sc); | 1981 | lruvec, sc); |
1843 | } | 1982 | } |
1844 | } | 1983 | } |
1984 | |||
1985 | if (nr_reclaimed < nr_to_reclaim || scan_adjusted) | ||
1986 | continue; | ||
1987 | |||
1845 | /* | 1988 | /* |
1846 | * On large memory systems, scan >> priority can become | 1989 | * For global direct reclaim, reclaim only the number of pages |
1847 | * really large. This is fine for the starting priority; | 1990 | * requested. Less care is taken to scan proportionally as it |
1848 | * we want to put equal scanning pressure on each zone. | 1991 | * is more important to minimise direct reclaim stall latency |
1849 | * However, if the VM has a harder time of freeing pages, | 1992 | * than it is to properly age the LRU lists. |
1850 | * with multiple processes reclaiming pages, the total | ||
1851 | * freeing target can get unreasonably large. | ||
1852 | */ | 1993 | */ |
1853 | if (nr_reclaimed >= nr_to_reclaim && | 1994 | if (global_reclaim(sc) && !current_is_kswapd()) |
1854 | sc->priority < DEF_PRIORITY) | ||
1855 | break; | 1995 | break; |
1996 | |||
1997 | /* | ||
1998 | * For kswapd and memcg, reclaim at least the number of pages | ||
1999 | * requested. Ensure that the anon and file LRUs shrink | ||
2000 | * proportionally what was requested by get_scan_count(). We | ||
2001 | * stop reclaiming one LRU and reduce the amount scanning | ||
2002 | * proportional to the original scan target. | ||
2003 | */ | ||
2004 | nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; | ||
2005 | nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; | ||
2006 | |||
2007 | if (nr_file > nr_anon) { | ||
2008 | unsigned long scan_target = targets[LRU_INACTIVE_ANON] + | ||
2009 | targets[LRU_ACTIVE_ANON] + 1; | ||
2010 | lru = LRU_BASE; | ||
2011 | percentage = nr_anon * 100 / scan_target; | ||
2012 | } else { | ||
2013 | unsigned long scan_target = targets[LRU_INACTIVE_FILE] + | ||
2014 | targets[LRU_ACTIVE_FILE] + 1; | ||
2015 | lru = LRU_FILE; | ||
2016 | percentage = nr_file * 100 / scan_target; | ||
2017 | } | ||
2018 | |||
2019 | /* Stop scanning the smaller of the LRU */ | ||
2020 | nr[lru] = 0; | ||
2021 | nr[lru + LRU_ACTIVE] = 0; | ||
2022 | |||
2023 | /* | ||
2024 | * Recalculate the other LRU scan count based on its original | ||
2025 | * scan target and the percentage scanning already complete | ||
2026 | */ | ||
2027 | lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; | ||
2028 | nr_scanned = targets[lru] - nr[lru]; | ||
2029 | nr[lru] = targets[lru] * (100 - percentage) / 100; | ||
2030 | nr[lru] -= min(nr[lru], nr_scanned); | ||
2031 | |||
2032 | lru += LRU_ACTIVE; | ||
2033 | nr_scanned = targets[lru] - nr[lru]; | ||
2034 | nr[lru] = targets[lru] * (100 - percentage) / 100; | ||
2035 | nr[lru] -= min(nr[lru], nr_scanned); | ||
2036 | |||
2037 | scan_adjusted = true; | ||
1856 | } | 2038 | } |
1857 | blk_finish_plug(&plug); | 2039 | blk_finish_plug(&plug); |
1858 | sc->nr_reclaimed += nr_reclaimed; | 2040 | sc->nr_reclaimed += nr_reclaimed; |
@@ -2222,17 +2404,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2222 | WB_REASON_TRY_TO_FREE_PAGES); | 2404 | WB_REASON_TRY_TO_FREE_PAGES); |
2223 | sc->may_writepage = 1; | 2405 | sc->may_writepage = 1; |
2224 | } | 2406 | } |
2225 | |||
2226 | /* Take a nap, wait for some writeback to complete */ | ||
2227 | if (!sc->hibernation_mode && sc->nr_scanned && | ||
2228 | sc->priority < DEF_PRIORITY - 2) { | ||
2229 | struct zone *preferred_zone; | ||
2230 | |||
2231 | first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), | ||
2232 | &cpuset_current_mems_allowed, | ||
2233 | &preferred_zone); | ||
2234 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); | ||
2235 | } | ||
2236 | } while (--sc->priority >= 0); | 2407 | } while (--sc->priority >= 0); |
2237 | 2408 | ||
2238 | out: | 2409 | out: |
@@ -2601,6 +2772,91 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, | |||
2601 | } | 2772 | } |
2602 | 2773 | ||
2603 | /* | 2774 | /* |
2775 | * kswapd shrinks the zone by the number of pages required to reach | ||
2776 | * the high watermark. | ||
2777 | * | ||
2778 | * Returns true if kswapd scanned at least the requested number of pages to | ||
2779 | * reclaim or if the lack of progress was due to pages under writeback. | ||
2780 | * This is used to determine if the scanning priority needs to be raised. | ||
2781 | */ | ||
2782 | static bool kswapd_shrink_zone(struct zone *zone, | ||
2783 | int classzone_idx, | ||
2784 | struct scan_control *sc, | ||
2785 | unsigned long lru_pages, | ||
2786 | unsigned long *nr_attempted) | ||
2787 | { | ||
2788 | unsigned long nr_slab; | ||
2789 | int testorder = sc->order; | ||
2790 | unsigned long balance_gap; | ||
2791 | struct reclaim_state *reclaim_state = current->reclaim_state; | ||
2792 | struct shrink_control shrink = { | ||
2793 | .gfp_mask = sc->gfp_mask, | ||
2794 | }; | ||
2795 | bool lowmem_pressure; | ||
2796 | |||
2797 | /* Reclaim above the high watermark. */ | ||
2798 | sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); | ||
2799 | |||
2800 | /* | ||
2801 | * Kswapd reclaims only single pages with compaction enabled. Trying | ||
2802 | * too hard to reclaim until contiguous free pages have become | ||
2803 | * available can hurt performance by evicting too much useful data | ||
2804 | * from memory. Do not reclaim more than needed for compaction. | ||
2805 | */ | ||
2806 | if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && | ||
2807 | compaction_suitable(zone, sc->order) != | ||
2808 | COMPACT_SKIPPED) | ||
2809 | testorder = 0; | ||
2810 | |||
2811 | /* | ||
2812 | * We put equal pressure on every zone, unless one zone has way too | ||
2813 | * many pages free already. The "too many pages" is defined as the | ||
2814 | * high wmark plus a "gap" where the gap is either the low | ||
2815 | * watermark or 1% of the zone, whichever is smaller. | ||
2816 | */ | ||
2817 | balance_gap = min(low_wmark_pages(zone), | ||
2818 | (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / | ||
2819 | KSWAPD_ZONE_BALANCE_GAP_RATIO); | ||
2820 | |||
2821 | /* | ||
2822 | * If there is no low memory pressure or the zone is balanced then no | ||
2823 | * reclaim is necessary | ||
2824 | */ | ||
2825 | lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone)); | ||
2826 | if (!lowmem_pressure && zone_balanced(zone, testorder, | ||
2827 | balance_gap, classzone_idx)) | ||
2828 | return true; | ||
2829 | |||
2830 | shrink_zone(zone, sc); | ||
2831 | |||
2832 | reclaim_state->reclaimed_slab = 0; | ||
2833 | nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); | ||
2834 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; | ||
2835 | |||
2836 | /* Account for the number of pages attempted to reclaim */ | ||
2837 | *nr_attempted += sc->nr_to_reclaim; | ||
2838 | |||
2839 | if (nr_slab == 0 && !zone_reclaimable(zone)) | ||
2840 | zone->all_unreclaimable = 1; | ||
2841 | |||
2842 | zone_clear_flag(zone, ZONE_WRITEBACK); | ||
2843 | |||
2844 | /* | ||
2845 | * If a zone reaches its high watermark, consider it to be no longer | ||
2846 | * congested. It's possible there are dirty pages backed by congested | ||
2847 | * BDIs but as pressure is relieved, speculatively avoid congestion | ||
2848 | * waits. | ||
2849 | */ | ||
2850 | if (!zone->all_unreclaimable && | ||
2851 | zone_balanced(zone, testorder, 0, classzone_idx)) { | ||
2852 | zone_clear_flag(zone, ZONE_CONGESTED); | ||
2853 | zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); | ||
2854 | } | ||
2855 | |||
2856 | return sc->nr_scanned >= sc->nr_to_reclaim; | ||
2857 | } | ||
2858 | |||
2859 | /* | ||
2604 | * For kswapd, balance_pgdat() will work across all this node's zones until | 2860 | * For kswapd, balance_pgdat() will work across all this node's zones until |
2605 | * they are all at high_wmark_pages(zone). | 2861 | * they are all at high_wmark_pages(zone). |
2606 | * | 2862 | * |
@@ -2624,35 +2880,28 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, | |||
2624 | static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | 2880 | static unsigned long balance_pgdat(pg_data_t *pgdat, int order, |
2625 | int *classzone_idx) | 2881 | int *classzone_idx) |
2626 | { | 2882 | { |
2627 | bool pgdat_is_balanced = false; | ||
2628 | int i; | 2883 | int i; |
2629 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ | 2884 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ |
2630 | struct reclaim_state *reclaim_state = current->reclaim_state; | ||
2631 | unsigned long nr_soft_reclaimed; | 2885 | unsigned long nr_soft_reclaimed; |
2632 | unsigned long nr_soft_scanned; | 2886 | unsigned long nr_soft_scanned; |
2633 | struct scan_control sc = { | 2887 | struct scan_control sc = { |
2634 | .gfp_mask = GFP_KERNEL, | 2888 | .gfp_mask = GFP_KERNEL, |
2889 | .priority = DEF_PRIORITY, | ||
2635 | .may_unmap = 1, | 2890 | .may_unmap = 1, |
2636 | .may_swap = 1, | 2891 | .may_swap = 1, |
2637 | /* | 2892 | .may_writepage = !laptop_mode, |
2638 | * kswapd doesn't want to be bailed out while reclaim. because | ||
2639 | * we want to put equal scanning pressure on each zone. | ||
2640 | */ | ||
2641 | .nr_to_reclaim = ULONG_MAX, | ||
2642 | .order = order, | 2893 | .order = order, |
2643 | .target_mem_cgroup = NULL, | 2894 | .target_mem_cgroup = NULL, |
2644 | }; | 2895 | }; |
2645 | struct shrink_control shrink = { | ||
2646 | .gfp_mask = sc.gfp_mask, | ||
2647 | }; | ||
2648 | loop_again: | ||
2649 | sc.priority = DEF_PRIORITY; | ||
2650 | sc.nr_reclaimed = 0; | ||
2651 | sc.may_writepage = !laptop_mode; | ||
2652 | count_vm_event(PAGEOUTRUN); | 2896 | count_vm_event(PAGEOUTRUN); |
2653 | 2897 | ||
2654 | do { | 2898 | do { |
2655 | unsigned long lru_pages = 0; | 2899 | unsigned long lru_pages = 0; |
2900 | unsigned long nr_attempted = 0; | ||
2901 | bool raise_priority = true; | ||
2902 | bool pgdat_needs_compaction = (order > 0); | ||
2903 | |||
2904 | sc.nr_reclaimed = 0; | ||
2656 | 2905 | ||
2657 | /* | 2906 | /* |
2658 | * Scan in the highmem->dma direction for the highest | 2907 | * Scan in the highmem->dma direction for the highest |
@@ -2689,23 +2938,46 @@ loop_again: | |||
2689 | end_zone = i; | 2938 | end_zone = i; |
2690 | break; | 2939 | break; |
2691 | } else { | 2940 | } else { |
2692 | /* If balanced, clear the congested flag */ | 2941 | /* |
2942 | * If balanced, clear the dirty and congested | ||
2943 | * flags | ||
2944 | */ | ||
2693 | zone_clear_flag(zone, ZONE_CONGESTED); | 2945 | zone_clear_flag(zone, ZONE_CONGESTED); |
2946 | zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); | ||
2694 | } | 2947 | } |
2695 | } | 2948 | } |
2696 | 2949 | ||
2697 | if (i < 0) { | 2950 | if (i < 0) |
2698 | pgdat_is_balanced = true; | ||
2699 | goto out; | 2951 | goto out; |
2700 | } | ||
2701 | 2952 | ||
2702 | for (i = 0; i <= end_zone; i++) { | 2953 | for (i = 0; i <= end_zone; i++) { |
2703 | struct zone *zone = pgdat->node_zones + i; | 2954 | struct zone *zone = pgdat->node_zones + i; |
2704 | 2955 | ||
2956 | if (!populated_zone(zone)) | ||
2957 | continue; | ||
2958 | |||
2705 | lru_pages += zone_reclaimable_pages(zone); | 2959 | lru_pages += zone_reclaimable_pages(zone); |
2960 | |||
2961 | /* | ||
2962 | * If any zone is currently balanced then kswapd will | ||
2963 | * not call compaction as it is expected that the | ||
2964 | * necessary pages are already available. | ||
2965 | */ | ||
2966 | if (pgdat_needs_compaction && | ||
2967 | zone_watermark_ok(zone, order, | ||
2968 | low_wmark_pages(zone), | ||
2969 | *classzone_idx, 0)) | ||
2970 | pgdat_needs_compaction = false; | ||
2706 | } | 2971 | } |
2707 | 2972 | ||
2708 | /* | 2973 | /* |
2974 | * If we're getting trouble reclaiming, start doing writepage | ||
2975 | * even in laptop mode. | ||
2976 | */ | ||
2977 | if (sc.priority < DEF_PRIORITY - 2) | ||
2978 | sc.may_writepage = 1; | ||
2979 | |||
2980 | /* | ||
2709 | * Now scan the zone in the dma->highmem direction, stopping | 2981 | * Now scan the zone in the dma->highmem direction, stopping |
2710 | * at the last zone which needs scanning. | 2982 | * at the last zone which needs scanning. |
2711 | * | 2983 | * |
@@ -2716,8 +2988,6 @@ loop_again: | |||
2716 | */ | 2988 | */ |
2717 | for (i = 0; i <= end_zone; i++) { | 2989 | for (i = 0; i <= end_zone; i++) { |
2718 | struct zone *zone = pgdat->node_zones + i; | 2990 | struct zone *zone = pgdat->node_zones + i; |
2719 | int nr_slab, testorder; | ||
2720 | unsigned long balance_gap; | ||
2721 | 2991 | ||
2722 | if (!populated_zone(zone)) | 2992 | if (!populated_zone(zone)) |
2723 | continue; | 2993 | continue; |
@@ -2738,65 +3008,14 @@ loop_again: | |||
2738 | sc.nr_reclaimed += nr_soft_reclaimed; | 3008 | sc.nr_reclaimed += nr_soft_reclaimed; |
2739 | 3009 | ||
2740 | /* | 3010 | /* |
2741 | * We put equal pressure on every zone, unless | 3011 | * There should be no need to raise the scanning |
2742 | * one zone has way too many pages free | 3012 | * priority if enough pages are already being scanned |
2743 | * already. The "too many pages" is defined | 3013 | * that that high watermark would be met at 100% |
2744 | * as the high wmark plus a "gap" where the | 3014 | * efficiency. |
2745 | * gap is either the low watermark or 1% | ||
2746 | * of the zone, whichever is smaller. | ||
2747 | */ | 3015 | */ |
2748 | balance_gap = min(low_wmark_pages(zone), | 3016 | if (kswapd_shrink_zone(zone, end_zone, &sc, |
2749 | (zone->managed_pages + | 3017 | lru_pages, &nr_attempted)) |
2750 | KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / | 3018 | raise_priority = false; |
2751 | KSWAPD_ZONE_BALANCE_GAP_RATIO); | ||
2752 | /* | ||
2753 | * Kswapd reclaims only single pages with compaction | ||
2754 | * enabled. Trying too hard to reclaim until contiguous | ||
2755 | * free pages have become available can hurt performance | ||
2756 | * by evicting too much useful data from memory. | ||
2757 | * Do not reclaim more than needed for compaction. | ||
2758 | */ | ||
2759 | testorder = order; | ||
2760 | if (IS_ENABLED(CONFIG_COMPACTION) && order && | ||
2761 | compaction_suitable(zone, order) != | ||
2762 | COMPACT_SKIPPED) | ||
2763 | testorder = 0; | ||
2764 | |||
2765 | if ((buffer_heads_over_limit && is_highmem_idx(i)) || | ||
2766 | !zone_balanced(zone, testorder, | ||
2767 | balance_gap, end_zone)) { | ||
2768 | shrink_zone(zone, &sc); | ||
2769 | |||
2770 | reclaim_state->reclaimed_slab = 0; | ||
2771 | nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); | ||
2772 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; | ||
2773 | |||
2774 | if (nr_slab == 0 && !zone_reclaimable(zone)) | ||
2775 | zone->all_unreclaimable = 1; | ||
2776 | } | ||
2777 | |||
2778 | /* | ||
2779 | * If we're getting trouble reclaiming, start doing | ||
2780 | * writepage even in laptop mode. | ||
2781 | */ | ||
2782 | if (sc.priority < DEF_PRIORITY - 2) | ||
2783 | sc.may_writepage = 1; | ||
2784 | |||
2785 | if (zone->all_unreclaimable) { | ||
2786 | if (end_zone && end_zone == i) | ||
2787 | end_zone--; | ||
2788 | continue; | ||
2789 | } | ||
2790 | |||
2791 | if (zone_balanced(zone, testorder, 0, end_zone)) | ||
2792 | /* | ||
2793 | * If a zone reaches its high watermark, | ||
2794 | * consider it to be no longer congested. It's | ||
2795 | * possible there are dirty pages backed by | ||
2796 | * congested BDIs but as pressure is relieved, | ||
2797 | * speculatively avoid congestion waits | ||
2798 | */ | ||
2799 | zone_clear_flag(zone, ZONE_CONGESTED); | ||
2800 | } | 3019 | } |
2801 | 3020 | ||
2802 | /* | 3021 | /* |
@@ -2808,74 +3027,38 @@ loop_again: | |||
2808 | pfmemalloc_watermark_ok(pgdat)) | 3027 | pfmemalloc_watermark_ok(pgdat)) |
2809 | wake_up(&pgdat->pfmemalloc_wait); | 3028 | wake_up(&pgdat->pfmemalloc_wait); |
2810 | 3029 | ||
2811 | if (pgdat_balanced(pgdat, order, *classzone_idx)) { | ||
2812 | pgdat_is_balanced = true; | ||
2813 | break; /* kswapd: all done */ | ||
2814 | } | ||
2815 | |||
2816 | /* | 3030 | /* |
2817 | * We do this so kswapd doesn't build up large priorities for | 3031 | * Fragmentation may mean that the system cannot be rebalanced |
2818 | * example when it is freeing in parallel with allocators. It | 3032 | * for high-order allocations in all zones. If twice the |
2819 | * matches the direct reclaim path behaviour in terms of impact | 3033 | * allocation size has been reclaimed and the zones are still |
2820 | * on zone->*_priority. | 3034 | * not balanced then recheck the watermarks at order-0 to |
3035 | * prevent kswapd reclaiming excessively. Assume that a | ||
3036 | * process requested a high-order can direct reclaim/compact. | ||
2821 | */ | 3037 | */ |
2822 | if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) | 3038 | if (order && sc.nr_reclaimed >= 2UL << order) |
2823 | break; | 3039 | order = sc.order = 0; |
2824 | } while (--sc.priority >= 0); | ||
2825 | |||
2826 | out: | ||
2827 | if (!pgdat_is_balanced) { | ||
2828 | cond_resched(); | ||
2829 | 3040 | ||
2830 | try_to_freeze(); | 3041 | /* Check if kswapd should be suspending */ |
3042 | if (try_to_freeze() || kthread_should_stop()) | ||
3043 | break; | ||
2831 | 3044 | ||
2832 | /* | 3045 | /* |
2833 | * Fragmentation may mean that the system cannot be | 3046 | * Compact if necessary and kswapd is reclaiming at least the |
2834 | * rebalanced for high-order allocations in all zones. | 3047 | * high watermark number of pages as requsted |
2835 | * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, | ||
2836 | * it means the zones have been fully scanned and are still | ||
2837 | * not balanced. For high-order allocations, there is | ||
2838 | * little point trying all over again as kswapd may | ||
2839 | * infinite loop. | ||
2840 | * | ||
2841 | * Instead, recheck all watermarks at order-0 as they | ||
2842 | * are the most important. If watermarks are ok, kswapd will go | ||
2843 | * back to sleep. High-order users can still perform direct | ||
2844 | * reclaim if they wish. | ||
2845 | */ | 3048 | */ |
2846 | if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) | 3049 | if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted) |
2847 | order = sc.order = 0; | ||
2848 | |||
2849 | goto loop_again; | ||
2850 | } | ||
2851 | |||
2852 | /* | ||
2853 | * If kswapd was reclaiming at a higher order, it has the option of | ||
2854 | * sleeping without all zones being balanced. Before it does, it must | ||
2855 | * ensure that the watermarks for order-0 on *all* zones are met and | ||
2856 | * that the congestion flags are cleared. The congestion flag must | ||
2857 | * be cleared as kswapd is the only mechanism that clears the flag | ||
2858 | * and it is potentially going to sleep here. | ||
2859 | */ | ||
2860 | if (order) { | ||
2861 | int zones_need_compaction = 1; | ||
2862 | |||
2863 | for (i = 0; i <= end_zone; i++) { | ||
2864 | struct zone *zone = pgdat->node_zones + i; | ||
2865 | |||
2866 | if (!populated_zone(zone)) | ||
2867 | continue; | ||
2868 | |||
2869 | /* Check if the memory needs to be defragmented. */ | ||
2870 | if (zone_watermark_ok(zone, order, | ||
2871 | low_wmark_pages(zone), *classzone_idx, 0)) | ||
2872 | zones_need_compaction = 0; | ||
2873 | } | ||
2874 | |||
2875 | if (zones_need_compaction) | ||
2876 | compact_pgdat(pgdat, order); | 3050 | compact_pgdat(pgdat, order); |
2877 | } | ||
2878 | 3051 | ||
3052 | /* | ||
3053 | * Raise priority if scanning rate is too low or there was no | ||
3054 | * progress in reclaiming pages | ||
3055 | */ | ||
3056 | if (raise_priority || !sc.nr_reclaimed) | ||
3057 | sc.priority--; | ||
3058 | } while (sc.priority >= 1 && | ||
3059 | !pgdat_balanced(pgdat, order, *classzone_idx)); | ||
3060 | |||
3061 | out: | ||
2879 | /* | 3062 | /* |
2880 | * Return the order we were reclaiming at so prepare_kswapd_sleep() | 3063 | * Return the order we were reclaiming at so prepare_kswapd_sleep() |
2881 | * makes a decision on the order we were last reclaiming at. However, | 3064 | * makes a decision on the order we were last reclaiming at. However, |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ace5e55fe5a3..db7de80b88a2 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -2211,16 +2211,15 @@ int hci_register_dev(struct hci_dev *hdev) | |||
2211 | list_add(&hdev->list, &hci_dev_list); | 2211 | list_add(&hdev->list, &hci_dev_list); |
2212 | write_unlock(&hci_dev_list_lock); | 2212 | write_unlock(&hci_dev_list_lock); |
2213 | 2213 | ||
2214 | hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | | 2214 | hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | |
2215 | WQ_MEM_RECLAIM, 1); | 2215 | WQ_MEM_RECLAIM, 1, hdev->name); |
2216 | if (!hdev->workqueue) { | 2216 | if (!hdev->workqueue) { |
2217 | error = -ENOMEM; | 2217 | error = -ENOMEM; |
2218 | goto err; | 2218 | goto err; |
2219 | } | 2219 | } |
2220 | 2220 | ||
2221 | hdev->req_workqueue = alloc_workqueue(hdev->name, | 2221 | hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | |
2222 | WQ_HIGHPRI | WQ_UNBOUND | | 2222 | WQ_MEM_RECLAIM, 1, hdev->name); |
2223 | WQ_MEM_RECLAIM, 1); | ||
2224 | if (!hdev->req_workqueue) { | 2223 | if (!hdev->req_workqueue) { |
2225 | destroy_workqueue(hdev->workqueue); | 2224 | destroy_workqueue(hdev->workqueue); |
2226 | error = -ENOMEM; | 2225 | error = -ENOMEM; |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 7e06641e36ae..cec539458307 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -93,7 +93,7 @@ void inet_frags_init(struct inet_frags *f) | |||
93 | } | 93 | } |
94 | rwlock_init(&f->lock); | 94 | rwlock_init(&f->lock); |
95 | 95 | ||
96 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | 96 | f->rnd = (u32) ((totalram_pages ^ (totalram_pages >> 7)) ^ |
97 | (jiffies ^ (jiffies >> 6))); | 97 | (jiffies ^ (jiffies >> 6))); |
98 | 98 | ||
99 | setup_timer(&f->secret_timer, inet_frag_secret_rebuild, | 99 | setup_timer(&f->secret_timer, inet_frag_secret_rebuild, |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 8a7bfc47d577..8eae74ac4e1e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -921,7 +921,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
921 | hw->queues = IEEE80211_MAX_QUEUES; | 921 | hw->queues = IEEE80211_MAX_QUEUES; |
922 | 922 | ||
923 | local->workqueue = | 923 | local->workqueue = |
924 | alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0); | 924 | alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy)); |
925 | if (!local->workqueue) { | 925 | if (!local->workqueue) { |
926 | result = -ENOMEM; | 926 | result = -ENOMEM; |
927 | goto fail_workqueue; | 927 | goto fail_workqueue; |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 89a588b4478b..b974571126fe 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -740,7 +740,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
740 | 740 | ||
741 | __module_get(serv->sv_module); | 741 | __module_get(serv->sv_module); |
742 | task = kthread_create_on_node(serv->sv_function, rqstp, | 742 | task = kthread_create_on_node(serv->sv_function, rqstp, |
743 | node, serv->sv_name); | 743 | node, "%s", serv->sv_name); |
744 | if (IS_ERR(task)) { | 744 | if (IS_ERR(task)) { |
745 | error = PTR_ERR(task); | 745 | error = PTR_ERR(task); |
746 | module_put(serv->sv_module); | 746 | module_put(serv->sv_module); |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index b954de58304f..6afcd1239ca5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -27,9 +27,11 @@ my $summary = 1; | |||
27 | my $mailback = 0; | 27 | my $mailback = 0; |
28 | my $summary_file = 0; | 28 | my $summary_file = 0; |
29 | my $show_types = 0; | 29 | my $show_types = 0; |
30 | my $fix = 0; | ||
30 | my $root; | 31 | my $root; |
31 | my %debug; | 32 | my %debug; |
32 | my %ignore_type = (); | 33 | my %ignore_type = (); |
34 | my %camelcase = (); | ||
33 | my @ignore = (); | 35 | my @ignore = (); |
34 | my $help = 0; | 36 | my $help = 0; |
35 | my $configuration_file = ".checkpatch.conf"; | 37 | my $configuration_file = ".checkpatch.conf"; |
@@ -63,6 +65,11 @@ Options: | |||
63 | is all off) | 65 | is all off) |
64 | --test-only=WORD report only warnings/errors containing WORD | 66 | --test-only=WORD report only warnings/errors containing WORD |
65 | literally | 67 | literally |
68 | --fix EXPERIMENTAL - may create horrible results | ||
69 | If correctable single-line errors exist, create | ||
70 | "<inputfile>.EXPERIMENTAL-checkpatch-fixes" | ||
71 | with potential errors corrected to the preferred | ||
72 | checkpatch style | ||
66 | -h, --help, --version display this help and exit | 73 | -h, --help, --version display this help and exit |
67 | 74 | ||
68 | When FILE is - read standard input. | 75 | When FILE is - read standard input. |
@@ -114,7 +121,7 @@ GetOptions( | |||
114 | 'summary!' => \$summary, | 121 | 'summary!' => \$summary, |
115 | 'mailback!' => \$mailback, | 122 | 'mailback!' => \$mailback, |
116 | 'summary-file!' => \$summary_file, | 123 | 'summary-file!' => \$summary_file, |
117 | 124 | 'fix!' => \$fix, | |
118 | 'debug=s' => \%debug, | 125 | 'debug=s' => \%debug, |
119 | 'test-only=s' => \$tst_only, | 126 | 'test-only=s' => \$tst_only, |
120 | 'h|help' => \$help, | 127 | 'h|help' => \$help, |
@@ -230,17 +237,22 @@ our $Inline = qr{inline|__always_inline|noinline}; | |||
230 | our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; | 237 | our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; |
231 | our $Lval = qr{$Ident(?:$Member)*}; | 238 | our $Lval = qr{$Ident(?:$Member)*}; |
232 | 239 | ||
240 | our $Int_type = qr{(?i)llu|ull|ll|lu|ul|l|u}; | ||
241 | our $Binary = qr{(?i)0b[01]+$Int_type?}; | ||
242 | our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?}; | ||
243 | our $Int = qr{[0-9]+$Int_type?}; | ||
233 | our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; | 244 | our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; |
234 | our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; | 245 | our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; |
235 | our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; | 246 | our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; |
236 | our $Float = qr{$Float_hex|$Float_dec|$Float_int}; | 247 | our $Float = qr{$Float_hex|$Float_dec|$Float_int}; |
237 | our $Constant = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*}; | 248 | our $Constant = qr{$Float|$Binary|$Hex|$Int}; |
238 | our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; | 249 | our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; |
239 | our $Compare = qr{<=|>=|==|!=|<|>}; | 250 | our $Compare = qr{<=|>=|==|!=|<|>}; |
251 | our $Arithmetic = qr{\+|-|\*|\/|%}; | ||
240 | our $Operators = qr{ | 252 | our $Operators = qr{ |
241 | <=|>=|==|!=| | 253 | <=|>=|==|!=| |
242 | =>|->|<<|>>|<|>|!|~| | 254 | =>|->|<<|>>|<|>|!|~| |
243 | &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|% | 255 | &&|\|\||,|\^|\+\+|--|&|\||$Arithmetic |
244 | }x; | 256 | }x; |
245 | 257 | ||
246 | our $NonptrType; | 258 | our $NonptrType; |
@@ -269,7 +281,7 @@ our $typeTypedefs = qr{(?x: | |||
269 | 281 | ||
270 | our $logFunctions = qr{(?x: | 282 | our $logFunctions = qr{(?x: |
271 | printk(?:_ratelimited|_once|)| | 283 | printk(?:_ratelimited|_once|)| |
272 | [a-z0-9]+_(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)| | 284 | (?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)| |
273 | WARN(?:_RATELIMIT|_ONCE|)| | 285 | WARN(?:_RATELIMIT|_ONCE|)| |
274 | panic| | 286 | panic| |
275 | MODULE_[A-Z_]+ | 287 | MODULE_[A-Z_]+ |
@@ -338,7 +350,6 @@ sub build_types { | |||
338 | } | 350 | } |
339 | build_types(); | 351 | build_types(); |
340 | 352 | ||
341 | |||
342 | our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*}; | 353 | our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*}; |
343 | 354 | ||
344 | # Using $balanced_parens, $LvalOrFunc, or $FuncArg | 355 | # Using $balanced_parens, $LvalOrFunc, or $FuncArg |
@@ -358,10 +369,79 @@ sub deparenthesize { | |||
358 | return $string; | 369 | return $string; |
359 | } | 370 | } |
360 | 371 | ||
372 | sub seed_camelcase_file { | ||
373 | my ($file) = @_; | ||
374 | |||
375 | return if (!(-f $file)); | ||
376 | |||
377 | local $/; | ||
378 | |||
379 | open(my $include_file, '<', "$file") | ||
380 | or warn "$P: Can't read '$file' $!\n"; | ||
381 | my $text = <$include_file>; | ||
382 | close($include_file); | ||
383 | |||
384 | my @lines = split('\n', $text); | ||
385 | |||
386 | foreach my $line (@lines) { | ||
387 | next if ($line !~ /(?:[A-Z][a-z]|[a-z][A-Z])/); | ||
388 | if ($line =~ /^[ \t]*(?:#[ \t]*define|typedef\s+$Type)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)/) { | ||
389 | $camelcase{$1} = 1; | ||
390 | } | ||
391 | elsif ($line =~ /^\s*$Declare\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*\(/) { | ||
392 | $camelcase{$1} = 1; | ||
393 | } | ||
394 | } | ||
395 | } | ||
396 | |||
397 | my $camelcase_seeded = 0; | ||
398 | sub seed_camelcase_includes { | ||
399 | return if ($camelcase_seeded); | ||
400 | |||
401 | my $files; | ||
402 | my $camelcase_git_file = ""; | ||
403 | |||
404 | if (-d ".git") { | ||
405 | my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`; | ||
406 | chomp $git_last_include_commit; | ||
407 | $camelcase_git_file = ".checkpatch-camelcase.$git_last_include_commit"; | ||
408 | if (-f $camelcase_git_file) { | ||
409 | open(my $camelcase_file, '<', "$camelcase_git_file") | ||
410 | or warn "$P: Can't read '$camelcase_git_file' $!\n"; | ||
411 | while (<$camelcase_file>) { | ||
412 | chomp; | ||
413 | $camelcase{$_} = 1; | ||
414 | } | ||
415 | close($camelcase_file); | ||
416 | |||
417 | return; | ||
418 | } | ||
419 | $files = `git ls-files include`; | ||
420 | } else { | ||
421 | $files = `find $root/include -name "*.h"`; | ||
422 | } | ||
423 | my @include_files = split('\n', $files); | ||
424 | foreach my $file (@include_files) { | ||
425 | seed_camelcase_file($file); | ||
426 | } | ||
427 | $camelcase_seeded = 1; | ||
428 | |||
429 | if ($camelcase_git_file ne "") { | ||
430 | unlink glob ".checkpatch-camelcase.*"; | ||
431 | open(my $camelcase_file, '>', "$camelcase_git_file") | ||
432 | or warn "$P: Can't write '$camelcase_git_file' $!\n"; | ||
433 | foreach (sort { lc($a) cmp lc($b) } keys(%camelcase)) { | ||
434 | print $camelcase_file ("$_\n"); | ||
435 | } | ||
436 | close($camelcase_file); | ||
437 | } | ||
438 | } | ||
439 | |||
361 | $chk_signoff = 0 if ($file); | 440 | $chk_signoff = 0 if ($file); |
362 | 441 | ||
363 | my @rawlines = (); | 442 | my @rawlines = (); |
364 | my @lines = (); | 443 | my @lines = (); |
444 | my @fixed = (); | ||
365 | my $vname; | 445 | my $vname; |
366 | for my $filename (@ARGV) { | 446 | for my $filename (@ARGV) { |
367 | my $FILE; | 447 | my $FILE; |
@@ -389,6 +469,7 @@ for my $filename (@ARGV) { | |||
389 | } | 469 | } |
390 | @rawlines = (); | 470 | @rawlines = (); |
391 | @lines = (); | 471 | @lines = (); |
472 | @fixed = (); | ||
392 | } | 473 | } |
393 | 474 | ||
394 | exit($exit); | 475 | exit($exit); |
@@ -429,7 +510,7 @@ sub parse_email { | |||
429 | $comment = $2 if defined $2; | 510 | $comment = $2 if defined $2; |
430 | $formatted_email =~ s/$address.*$//; | 511 | $formatted_email =~ s/$address.*$//; |
431 | $name = $formatted_email; | 512 | $name = $formatted_email; |
432 | $name =~ s/^\s+|\s+$//g; | 513 | $name = trim($name); |
433 | $name =~ s/^\"|\"$//g; | 514 | $name =~ s/^\"|\"$//g; |
434 | # If there's a name left after stripping spaces and | 515 | # If there's a name left after stripping spaces and |
435 | # leading quotes, and the address doesn't have both | 516 | # leading quotes, and the address doesn't have both |
@@ -444,9 +525,9 @@ sub parse_email { | |||
444 | } | 525 | } |
445 | } | 526 | } |
446 | 527 | ||
447 | $name =~ s/^\s+|\s+$//g; | 528 | $name = trim($name); |
448 | $name =~ s/^\"|\"$//g; | 529 | $name =~ s/^\"|\"$//g; |
449 | $address =~ s/^\s+|\s+$//g; | 530 | $address = trim($address); |
450 | $address =~ s/^\<|\>$//g; | 531 | $address =~ s/^\<|\>$//g; |
451 | 532 | ||
452 | if ($name =~ /[^\w \-]/i) { ##has "must quote" chars | 533 | if ($name =~ /[^\w \-]/i) { ##has "must quote" chars |
@@ -462,9 +543,9 @@ sub format_email { | |||
462 | 543 | ||
463 | my $formatted_email; | 544 | my $formatted_email; |
464 | 545 | ||
465 | $name =~ s/^\s+|\s+$//g; | 546 | $name = trim($name); |
466 | $name =~ s/^\"|\"$//g; | 547 | $name =~ s/^\"|\"$//g; |
467 | $address =~ s/^\s+|\s+$//g; | 548 | $address = trim($address); |
468 | 549 | ||
469 | if ($name =~ /[^\w \-]/i) { ##has "must quote" chars | 550 | if ($name =~ /[^\w \-]/i) { ##has "must quote" chars |
470 | $name =~ s/(?<!\\)"/\\"/g; ##escape quotes | 551 | $name =~ s/(?<!\\)"/\\"/g; ##escape quotes |
@@ -1286,19 +1367,25 @@ sub ERROR { | |||
1286 | if (report("ERROR", $_[0], $_[1])) { | 1367 | if (report("ERROR", $_[0], $_[1])) { |
1287 | our $clean = 0; | 1368 | our $clean = 0; |
1288 | our $cnt_error++; | 1369 | our $cnt_error++; |
1370 | return 1; | ||
1289 | } | 1371 | } |
1372 | return 0; | ||
1290 | } | 1373 | } |
1291 | sub WARN { | 1374 | sub WARN { |
1292 | if (report("WARNING", $_[0], $_[1])) { | 1375 | if (report("WARNING", $_[0], $_[1])) { |
1293 | our $clean = 0; | 1376 | our $clean = 0; |
1294 | our $cnt_warn++; | 1377 | our $cnt_warn++; |
1378 | return 1; | ||
1295 | } | 1379 | } |
1380 | return 0; | ||
1296 | } | 1381 | } |
1297 | sub CHK { | 1382 | sub CHK { |
1298 | if ($check && report("CHECK", $_[0], $_[1])) { | 1383 | if ($check && report("CHECK", $_[0], $_[1])) { |
1299 | our $clean = 0; | 1384 | our $clean = 0; |
1300 | our $cnt_chk++; | 1385 | our $cnt_chk++; |
1386 | return 1; | ||
1301 | } | 1387 | } |
1388 | return 0; | ||
1302 | } | 1389 | } |
1303 | 1390 | ||
1304 | sub check_absolute_file { | 1391 | sub check_absolute_file { |
@@ -1329,6 +1416,29 @@ sub check_absolute_file { | |||
1329 | } | 1416 | } |
1330 | } | 1417 | } |
1331 | 1418 | ||
1419 | sub trim { | ||
1420 | my ($string) = @_; | ||
1421 | |||
1422 | $string =~ s/(^\s+|\s+$)//g; | ||
1423 | |||
1424 | return $string; | ||
1425 | } | ||
1426 | |||
1427 | sub tabify { | ||
1428 | my ($leading) = @_; | ||
1429 | |||
1430 | my $source_indent = 8; | ||
1431 | my $max_spaces_before_tab = $source_indent - 1; | ||
1432 | my $spaces_to_tab = " " x $source_indent; | ||
1433 | |||
1434 | #convert leading spaces to tabs | ||
1435 | 1 while $leading =~ s@^([\t]*)$spaces_to_tab@$1\t@g; | ||
1436 | #Remove spaces before a tab | ||
1437 | 1 while $leading =~ s@^([\t]*)( {1,$max_spaces_before_tab})\t@$1\t@g; | ||
1438 | |||
1439 | return "$leading"; | ||
1440 | } | ||
1441 | |||
1332 | sub pos_last_openparen { | 1442 | sub pos_last_openparen { |
1333 | my ($line) = @_; | 1443 | my ($line) = @_; |
1334 | 1444 | ||
@@ -1406,7 +1516,6 @@ sub process { | |||
1406 | my %suppress_export; | 1516 | my %suppress_export; |
1407 | my $suppress_statement = 0; | 1517 | my $suppress_statement = 0; |
1408 | 1518 | ||
1409 | my %camelcase = (); | ||
1410 | 1519 | ||
1411 | # Pre-scan the patch sanitizing the lines. | 1520 | # Pre-scan the patch sanitizing the lines. |
1412 | # Pre-scan the patch looking for any __setup documentation. | 1521 | # Pre-scan the patch looking for any __setup documentation. |
@@ -1420,6 +1529,8 @@ sub process { | |||
1420 | $linenr++; | 1529 | $linenr++; |
1421 | $line = $rawline; | 1530 | $line = $rawline; |
1422 | 1531 | ||
1532 | push(@fixed, $rawline) if ($fix); | ||
1533 | |||
1423 | if ($rawline=~/^\+\+\+\s+(\S+)/) { | 1534 | if ($rawline=~/^\+\+\+\s+(\S+)/) { |
1424 | $setup_docs = 0; | 1535 | $setup_docs = 0; |
1425 | if ($1 =~ m@Documentation/kernel-parameters.txt$@) { | 1536 | if ($1 =~ m@Documentation/kernel-parameters.txt$@) { |
@@ -1611,16 +1722,29 @@ sub process { | |||
1611 | "Non-standard signature: $sign_off\n" . $herecurr); | 1722 | "Non-standard signature: $sign_off\n" . $herecurr); |
1612 | } | 1723 | } |
1613 | if (defined $space_before && $space_before ne "") { | 1724 | if (defined $space_before && $space_before ne "") { |
1614 | WARN("BAD_SIGN_OFF", | 1725 | if (WARN("BAD_SIGN_OFF", |
1615 | "Do not use whitespace before $ucfirst_sign_off\n" . $herecurr); | 1726 | "Do not use whitespace before $ucfirst_sign_off\n" . $herecurr) && |
1727 | $fix) { | ||
1728 | $fixed[$linenr - 1] = | ||
1729 | "$ucfirst_sign_off $email"; | ||
1730 | } | ||
1616 | } | 1731 | } |
1617 | if ($sign_off =~ /-by:$/i && $sign_off ne $ucfirst_sign_off) { | 1732 | if ($sign_off =~ /-by:$/i && $sign_off ne $ucfirst_sign_off) { |
1618 | WARN("BAD_SIGN_OFF", | 1733 | if (WARN("BAD_SIGN_OFF", |
1619 | "'$ucfirst_sign_off' is the preferred signature form\n" . $herecurr); | 1734 | "'$ucfirst_sign_off' is the preferred signature form\n" . $herecurr) && |
1735 | $fix) { | ||
1736 | $fixed[$linenr - 1] = | ||
1737 | "$ucfirst_sign_off $email"; | ||
1738 | } | ||
1739 | |||
1620 | } | 1740 | } |
1621 | if (!defined $space_after || $space_after ne " ") { | 1741 | if (!defined $space_after || $space_after ne " ") { |
1622 | WARN("BAD_SIGN_OFF", | 1742 | if (WARN("BAD_SIGN_OFF", |
1623 | "Use a single space after $ucfirst_sign_off\n" . $herecurr); | 1743 | "Use a single space after $ucfirst_sign_off\n" . $herecurr) && |
1744 | $fix) { | ||
1745 | $fixed[$linenr - 1] = | ||
1746 | "$ucfirst_sign_off $email"; | ||
1747 | } | ||
1624 | } | 1748 | } |
1625 | 1749 | ||
1626 | my ($email_name, $email_address, $comment) = parse_email($email); | 1750 | my ($email_name, $email_address, $comment) = parse_email($email); |
@@ -1710,8 +1834,12 @@ sub process { | |||
1710 | 1834 | ||
1711 | } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) { | 1835 | } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) { |
1712 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 1836 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
1713 | ERROR("TRAILING_WHITESPACE", | 1837 | if (ERROR("TRAILING_WHITESPACE", |
1714 | "trailing whitespace\n" . $herevet); | 1838 | "trailing whitespace\n" . $herevet) && |
1839 | $fix) { | ||
1840 | $fixed[$linenr - 1] =~ s/^(\+.*?)\s+$/$1/; | ||
1841 | } | ||
1842 | |||
1715 | $rpt_cleaners = 1; | 1843 | $rpt_cleaners = 1; |
1716 | } | 1844 | } |
1717 | 1845 | ||
@@ -1806,8 +1934,12 @@ sub process { | |||
1806 | 1934 | ||
1807 | # check for spaces before a quoted newline | 1935 | # check for spaces before a quoted newline |
1808 | if ($rawline =~ /^.*\".*\s\\n/) { | 1936 | if ($rawline =~ /^.*\".*\s\\n/) { |
1809 | WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE", | 1937 | if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE", |
1810 | "unnecessary whitespace before a quoted newline\n" . $herecurr); | 1938 | "unnecessary whitespace before a quoted newline\n" . $herecurr) && |
1939 | $fix) { | ||
1940 | $fixed[$linenr - 1] =~ s/^(\+.*\".*)\s+\\n/$1\\n/; | ||
1941 | } | ||
1942 | |||
1811 | } | 1943 | } |
1812 | 1944 | ||
1813 | # check for adding lines without a newline. | 1945 | # check for adding lines without a newline. |
@@ -1838,16 +1970,23 @@ sub process { | |||
1838 | if ($rawline =~ /^\+\s* \t\s*\S/ || | 1970 | if ($rawline =~ /^\+\s* \t\s*\S/ || |
1839 | $rawline =~ /^\+\s* \s*/) { | 1971 | $rawline =~ /^\+\s* \s*/) { |
1840 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 1972 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
1841 | ERROR("CODE_INDENT", | ||
1842 | "code indent should use tabs where possible\n" . $herevet); | ||
1843 | $rpt_cleaners = 1; | 1973 | $rpt_cleaners = 1; |
1974 | if (ERROR("CODE_INDENT", | ||
1975 | "code indent should use tabs where possible\n" . $herevet) && | ||
1976 | $fix) { | ||
1977 | $fixed[$linenr - 1] =~ s/^\+([ \t]+)/"\+" . tabify($1)/e; | ||
1978 | } | ||
1844 | } | 1979 | } |
1845 | 1980 | ||
1846 | # check for space before tabs. | 1981 | # check for space before tabs. |
1847 | if ($rawline =~ /^\+/ && $rawline =~ / \t/) { | 1982 | if ($rawline =~ /^\+/ && $rawline =~ / \t/) { |
1848 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 1983 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
1849 | WARN("SPACE_BEFORE_TAB", | 1984 | if (WARN("SPACE_BEFORE_TAB", |
1850 | "please, no space before tabs\n" . $herevet); | 1985 | "please, no space before tabs\n" . $herevet) && |
1986 | $fix) { | ||
1987 | $fixed[$linenr - 1] =~ | ||
1988 | s/(^\+.*) +\t/$1\t/; | ||
1989 | } | ||
1851 | } | 1990 | } |
1852 | 1991 | ||
1853 | # check for && or || at the start of a line | 1992 | # check for && or || at the start of a line |
@@ -1875,25 +2014,42 @@ sub process { | |||
1875 | 2014 | ||
1876 | if ($newindent ne $goodtabindent && | 2015 | if ($newindent ne $goodtabindent && |
1877 | $newindent ne $goodspaceindent) { | 2016 | $newindent ne $goodspaceindent) { |
1878 | CHK("PARENTHESIS_ALIGNMENT", | 2017 | |
1879 | "Alignment should match open parenthesis\n" . $hereprev); | 2018 | if (CHK("PARENTHESIS_ALIGNMENT", |
2019 | "Alignment should match open parenthesis\n" . $hereprev) && | ||
2020 | $fix && $line =~ /^\+/) { | ||
2021 | $fixed[$linenr - 1] =~ | ||
2022 | s/^\+[ \t]*/\+$goodtabindent/; | ||
2023 | } | ||
1880 | } | 2024 | } |
1881 | } | 2025 | } |
1882 | } | 2026 | } |
1883 | 2027 | ||
1884 | if ($line =~ /^\+.*\*[ \t]*\)[ \t]+/) { | 2028 | if ($line =~ /^\+.*\*[ \t]*\)[ \t]+(?!$Assignment|$Arithmetic)/) { |
1885 | CHK("SPACING", | 2029 | if (CHK("SPACING", |
1886 | "No space is necessary after a cast\n" . $hereprev); | 2030 | "No space is necessary after a cast\n" . $hereprev) && |
2031 | $fix) { | ||
2032 | $fixed[$linenr - 1] =~ | ||
2033 | s/^(\+.*\*[ \t]*\))[ \t]+/$1/; | ||
2034 | } | ||
1887 | } | 2035 | } |
1888 | 2036 | ||
1889 | if ($realfile =~ m@^(drivers/net/|net/)@ && | 2037 | if ($realfile =~ m@^(drivers/net/|net/)@ && |
1890 | $rawline =~ /^\+[ \t]*\/\*[ \t]*$/ && | 2038 | $prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ && |
1891 | $prevrawline =~ /^\+[ \t]*$/) { | 2039 | $rawline =~ /^\+[ \t]*\*/) { |
1892 | WARN("NETWORKING_BLOCK_COMMENT_STYLE", | 2040 | WARN("NETWORKING_BLOCK_COMMENT_STYLE", |
1893 | "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev); | 2041 | "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev); |
1894 | } | 2042 | } |
1895 | 2043 | ||
1896 | if ($realfile =~ m@^(drivers/net/|net/)@ && | 2044 | if ($realfile =~ m@^(drivers/net/|net/)@ && |
2045 | $prevrawline =~ /^\+[ \t]*\/\*/ && #starting /* | ||
2046 | $prevrawline !~ /\*\/[ \t]*$/ && #no trailing */ | ||
2047 | $rawline !~ /^\+[ \t]*\*/) { #no leading * | ||
2048 | WARN("NETWORKING_BLOCK_COMMENT_STYLE", | ||
2049 | "networking block comments start with * on subsequent lines\n" . $hereprev); | ||
2050 | } | ||
2051 | |||
2052 | if ($realfile =~ m@^(drivers/net/|net/)@ && | ||
1897 | $rawline !~ m@^\+[ \t]*\*/[ \t]*$@ && #trailing */ | 2053 | $rawline !~ m@^\+[ \t]*\*/[ \t]*$@ && #trailing */ |
1898 | $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ && #inline /*...*/ | 2054 | $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ && #inline /*...*/ |
1899 | $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ && #trailing **/ | 2055 | $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ && #trailing **/ |
@@ -1907,10 +2063,13 @@ sub process { | |||
1907 | # 1) within comments | 2063 | # 1) within comments |
1908 | # 2) indented preprocessor commands | 2064 | # 2) indented preprocessor commands |
1909 | # 3) hanging labels | 2065 | # 3) hanging labels |
1910 | if ($rawline =~ /^\+ / && $line !~ /\+ *(?:$;|#|$Ident:)/) { | 2066 | if ($rawline =~ /^\+ / && $line !~ /^\+ *(?:$;|#|$Ident:)/) { |
1911 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 2067 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
1912 | WARN("LEADING_SPACE", | 2068 | if (WARN("LEADING_SPACE", |
1913 | "please, no spaces at the start of a line\n" . $herevet); | 2069 | "please, no spaces at the start of a line\n" . $herevet) && |
2070 | $fix) { | ||
2071 | $fixed[$linenr - 1] =~ s/^\+([ \t]+)/"\+" . tabify($1)/e; | ||
2072 | } | ||
1914 | } | 2073 | } |
1915 | 2074 | ||
1916 | # check we are in a valid C source file if not then ignore this hunk | 2075 | # check we are in a valid C source file if not then ignore this hunk |
@@ -2200,7 +2359,7 @@ sub process { | |||
2200 | $prev_values = substr($curr_values, -1); | 2359 | $prev_values = substr($curr_values, -1); |
2201 | 2360 | ||
2202 | #ignore lines not being added | 2361 | #ignore lines not being added |
2203 | if ($line=~/^[^\+]/) {next;} | 2362 | next if ($line =~ /^[^\+]/); |
2204 | 2363 | ||
2205 | # TEST: allow direct testing of the type matcher. | 2364 | # TEST: allow direct testing of the type matcher. |
2206 | if ($dbg_type) { | 2365 | if ($dbg_type) { |
@@ -2251,8 +2410,15 @@ sub process { | |||
2251 | 2410 | ||
2252 | # no C99 // comments | 2411 | # no C99 // comments |
2253 | if ($line =~ m{//}) { | 2412 | if ($line =~ m{//}) { |
2254 | ERROR("C99_COMMENTS", | 2413 | if (ERROR("C99_COMMENTS", |
2255 | "do not use C99 // comments\n" . $herecurr); | 2414 | "do not use C99 // comments\n" . $herecurr) && |
2415 | $fix) { | ||
2416 | my $line = $fixed[$linenr - 1]; | ||
2417 | if ($line =~ /\/\/(.*)$/) { | ||
2418 | my $comment = trim($1); | ||
2419 | $fixed[$linenr - 1] =~ s@\/\/(.*)$@/\* $comment \*/@; | ||
2420 | } | ||
2421 | } | ||
2256 | } | 2422 | } |
2257 | # Remove C99 comments. | 2423 | # Remove C99 comments. |
2258 | $line =~ s@//.*@@; | 2424 | $line =~ s@//.*@@; |
@@ -2351,7 +2517,7 @@ sub process { | |||
2351 | # (char*[ const]) | 2517 | # (char*[ const]) |
2352 | while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) { | 2518 | while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) { |
2353 | #print "AA<$1>\n"; | 2519 | #print "AA<$1>\n"; |
2354 | my ($from, $to) = ($2, $2); | 2520 | my ($ident, $from, $to) = ($1, $2, $2); |
2355 | 2521 | ||
2356 | # Should start with a space. | 2522 | # Should start with a space. |
2357 | $to =~ s/^(\S)/ $1/; | 2523 | $to =~ s/^(\S)/ $1/; |
@@ -2361,15 +2527,22 @@ sub process { | |||
2361 | while ($to =~ s/\*\s+\*/\*\*/) { | 2527 | while ($to =~ s/\*\s+\*/\*\*/) { |
2362 | } | 2528 | } |
2363 | 2529 | ||
2364 | #print "from<$from> to<$to>\n"; | 2530 | ## print "1: from<$from> to<$to> ident<$ident>\n"; |
2365 | if ($from ne $to) { | 2531 | if ($from ne $to) { |
2366 | ERROR("POINTER_LOCATION", | 2532 | if (ERROR("POINTER_LOCATION", |
2367 | "\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr); | 2533 | "\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr) && |
2534 | $fix) { | ||
2535 | my $sub_from = $ident; | ||
2536 | my $sub_to = $ident; | ||
2537 | $sub_to =~ s/\Q$from\E/$to/; | ||
2538 | $fixed[$linenr - 1] =~ | ||
2539 | s@\Q$sub_from\E@$sub_to@; | ||
2540 | } | ||
2368 | } | 2541 | } |
2369 | } | 2542 | } |
2370 | while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) { | 2543 | while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) { |
2371 | #print "BB<$1>\n"; | 2544 | #print "BB<$1>\n"; |
2372 | my ($from, $to, $ident) = ($2, $2, $3); | 2545 | my ($match, $from, $to, $ident) = ($1, $2, $2, $3); |
2373 | 2546 | ||
2374 | # Should start with a space. | 2547 | # Should start with a space. |
2375 | $to =~ s/^(\S)/ $1/; | 2548 | $to =~ s/^(\S)/ $1/; |
@@ -2381,10 +2554,18 @@ sub process { | |||
2381 | # Modifiers should have spaces. | 2554 | # Modifiers should have spaces. |
2382 | $to =~ s/(\b$Modifier$)/$1 /; | 2555 | $to =~ s/(\b$Modifier$)/$1 /; |
2383 | 2556 | ||
2384 | #print "from<$from> to<$to> ident<$ident>\n"; | 2557 | ## print "2: from<$from> to<$to> ident<$ident>\n"; |
2385 | if ($from ne $to && $ident !~ /^$Modifier$/) { | 2558 | if ($from ne $to && $ident !~ /^$Modifier$/) { |
2386 | ERROR("POINTER_LOCATION", | 2559 | if (ERROR("POINTER_LOCATION", |
2387 | "\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr); | 2560 | "\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr) && |
2561 | $fix) { | ||
2562 | |||
2563 | my $sub_from = $match; | ||
2564 | my $sub_to = $match; | ||
2565 | $sub_to =~ s/\Q$from\E/$to/; | ||
2566 | $fixed[$linenr - 1] =~ | ||
2567 | s@\Q$sub_from\E@$sub_to@; | ||
2568 | } | ||
2388 | } | 2569 | } |
2389 | } | 2570 | } |
2390 | 2571 | ||
@@ -2470,9 +2651,13 @@ sub process { | |||
2470 | } | 2651 | } |
2471 | 2652 | ||
2472 | # missing space after union, struct or enum definition | 2653 | # missing space after union, struct or enum definition |
2473 | if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) { | 2654 | if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident){1,2}[=\{]/) { |
2474 | WARN("SPACING", | 2655 | if (WARN("SPACING", |
2475 | "missing space after $1 definition\n" . $herecurr); | 2656 | "missing space after $1 definition\n" . $herecurr) && |
2657 | $fix) { | ||
2658 | $fixed[$linenr - 1] =~ | ||
2659 | s/^(.\s*(?:typedef\s+)?(?:enum|union|struct)(?:\s+$Ident){1,2})([=\{])/$1 $2/; | ||
2660 | } | ||
2476 | } | 2661 | } |
2477 | 2662 | ||
2478 | # check for spacing round square brackets; allowed: | 2663 | # check for spacing round square brackets; allowed: |
@@ -2484,8 +2669,12 @@ sub process { | |||
2484 | if ($prefix !~ /$Type\s+$/ && | 2669 | if ($prefix !~ /$Type\s+$/ && |
2485 | ($where != 0 || $prefix !~ /^.\s+$/) && | 2670 | ($where != 0 || $prefix !~ /^.\s+$/) && |
2486 | $prefix !~ /[{,]\s+$/) { | 2671 | $prefix !~ /[{,]\s+$/) { |
2487 | ERROR("BRACKET_SPACE", | 2672 | if (ERROR("BRACKET_SPACE", |
2488 | "space prohibited before open square bracket '['\n" . $herecurr); | 2673 | "space prohibited before open square bracket '['\n" . $herecurr) && |
2674 | $fix) { | ||
2675 | $fixed[$linenr - 1] =~ | ||
2676 | s/^(\+.*?)\s+\[/$1\[/; | ||
2677 | } | ||
2489 | } | 2678 | } |
2490 | } | 2679 | } |
2491 | 2680 | ||
@@ -2502,7 +2691,6 @@ sub process { | |||
2502 | __attribute__|format|__extension__| | 2691 | __attribute__|format|__extension__| |
2503 | asm|__asm__)$/x) | 2692 | asm|__asm__)$/x) |
2504 | { | 2693 | { |
2505 | |||
2506 | # cpp #define statements have non-optional spaces, ie | 2694 | # cpp #define statements have non-optional spaces, ie |
2507 | # if there is a space between the name and the open | 2695 | # if there is a space between the name and the open |
2508 | # parenthesis it is simply not a parameter group. | 2696 | # parenthesis it is simply not a parameter group. |
@@ -2516,19 +2704,20 @@ sub process { | |||
2516 | } elsif ($ctx =~ /$Type$/) { | 2704 | } elsif ($ctx =~ /$Type$/) { |
2517 | 2705 | ||
2518 | } else { | 2706 | } else { |
2519 | WARN("SPACING", | 2707 | if (WARN("SPACING", |
2520 | "space prohibited between function name and open parenthesis '('\n" . $herecurr); | 2708 | "space prohibited between function name and open parenthesis '('\n" . $herecurr) && |
2709 | $fix) { | ||
2710 | $fixed[$linenr - 1] =~ | ||
2711 | s/\b$name\s+\(/$name\(/; | ||
2712 | } | ||
2521 | } | 2713 | } |
2522 | } | 2714 | } |
2523 | 2715 | ||
2524 | # check for whitespace before a non-naked semicolon | ||
2525 | if ($line =~ /^\+.*\S\s+;/) { | ||
2526 | WARN("SPACING", | ||
2527 | "space prohibited before semicolon\n" . $herecurr); | ||
2528 | } | ||
2529 | |||
2530 | # Check operator spacing. | 2716 | # Check operator spacing. |
2531 | if (!($line=~/\#\s*include/)) { | 2717 | if (!($line=~/\#\s*include/)) { |
2718 | my $fixed_line = ""; | ||
2719 | my $line_fixed = 0; | ||
2720 | |||
2532 | my $ops = qr{ | 2721 | my $ops = qr{ |
2533 | <<=|>>=|<=|>=|==|!=| | 2722 | <<=|>>=|<=|>=|==|!=| |
2534 | \+=|-=|\*=|\/=|%=|\^=|\|=|&=| | 2723 | \+=|-=|\*=|\/=|%=|\^=|\|=|&=| |
@@ -2537,11 +2726,30 @@ sub process { | |||
2537 | \?|: | 2726 | \?|: |
2538 | }x; | 2727 | }x; |
2539 | my @elements = split(/($ops|;)/, $opline); | 2728 | my @elements = split(/($ops|;)/, $opline); |
2729 | |||
2730 | ## print("element count: <" . $#elements . ">\n"); | ||
2731 | ## foreach my $el (@elements) { | ||
2732 | ## print("el: <$el>\n"); | ||
2733 | ## } | ||
2734 | |||
2735 | my @fix_elements = (); | ||
2540 | my $off = 0; | 2736 | my $off = 0; |
2541 | 2737 | ||
2738 | foreach my $el (@elements) { | ||
2739 | push(@fix_elements, substr($rawline, $off, length($el))); | ||
2740 | $off += length($el); | ||
2741 | } | ||
2742 | |||
2743 | $off = 0; | ||
2744 | |||
2542 | my $blank = copy_spacing($opline); | 2745 | my $blank = copy_spacing($opline); |
2543 | 2746 | ||
2544 | for (my $n = 0; $n < $#elements; $n += 2) { | 2747 | for (my $n = 0; $n < $#elements; $n += 2) { |
2748 | |||
2749 | my $good = $fix_elements[$n] . $fix_elements[$n + 1]; | ||
2750 | |||
2751 | ## print("n: <$n> good: <$good>\n"); | ||
2752 | |||
2545 | $off += length($elements[$n]); | 2753 | $off += length($elements[$n]); |
2546 | 2754 | ||
2547 | # Pick up the preceding and succeeding characters. | 2755 | # Pick up the preceding and succeeding characters. |
@@ -2598,8 +2806,11 @@ sub process { | |||
2598 | } elsif ($op eq ';') { | 2806 | } elsif ($op eq ';') { |
2599 | if ($ctx !~ /.x[WEBC]/ && | 2807 | if ($ctx !~ /.x[WEBC]/ && |
2600 | $cc !~ /^\\/ && $cc !~ /^;/) { | 2808 | $cc !~ /^\\/ && $cc !~ /^;/) { |
2601 | ERROR("SPACING", | 2809 | if (ERROR("SPACING", |
2602 | "space required after that '$op' $at\n" . $hereptr); | 2810 | "space required after that '$op' $at\n" . $hereptr)) { |
2811 | $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " "; | ||
2812 | $line_fixed = 1; | ||
2813 | } | ||
2603 | } | 2814 | } |
2604 | 2815 | ||
2605 | # // is a comment | 2816 | # // is a comment |
@@ -2610,15 +2821,24 @@ sub process { | |||
2610 | # : when part of a bitfield | 2821 | # : when part of a bitfield |
2611 | } elsif ($op eq '->' || $opv eq ':B') { | 2822 | } elsif ($op eq '->' || $opv eq ':B') { |
2612 | if ($ctx =~ /Wx.|.xW/) { | 2823 | if ($ctx =~ /Wx.|.xW/) { |
2613 | ERROR("SPACING", | 2824 | if (ERROR("SPACING", |
2614 | "spaces prohibited around that '$op' $at\n" . $hereptr); | 2825 | "spaces prohibited around that '$op' $at\n" . $hereptr)) { |
2826 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]); | ||
2827 | $line_fixed = 1; | ||
2828 | if (defined $fix_elements[$n + 2]) { | ||
2829 | $fix_elements[$n + 2] =~ s/^\s+//; | ||
2830 | } | ||
2831 | } | ||
2615 | } | 2832 | } |
2616 | 2833 | ||
2617 | # , must have a space on the right. | 2834 | # , must have a space on the right. |
2618 | } elsif ($op eq ',') { | 2835 | } elsif ($op eq ',') { |
2619 | if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) { | 2836 | if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) { |
2620 | ERROR("SPACING", | 2837 | if (ERROR("SPACING", |
2621 | "space required after that '$op' $at\n" . $hereptr); | 2838 | "space required after that '$op' $at\n" . $hereptr)) { |
2839 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]) . " "; | ||
2840 | $line_fixed = 1; | ||
2841 | } | ||
2622 | } | 2842 | } |
2623 | 2843 | ||
2624 | # '*' as part of a type definition -- reported already. | 2844 | # '*' as part of a type definition -- reported already. |
@@ -2632,34 +2852,58 @@ sub process { | |||
2632 | $opv eq '*U' || $opv eq '-U' || | 2852 | $opv eq '*U' || $opv eq '-U' || |
2633 | $opv eq '&U' || $opv eq '&&U') { | 2853 | $opv eq '&U' || $opv eq '&&U') { |
2634 | if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { | 2854 | if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { |
2635 | ERROR("SPACING", | 2855 | if (ERROR("SPACING", |
2636 | "space required before that '$op' $at\n" . $hereptr); | 2856 | "space required before that '$op' $at\n" . $hereptr)) { |
2857 | $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]); | ||
2858 | $line_fixed = 1; | ||
2859 | } | ||
2637 | } | 2860 | } |
2638 | if ($op eq '*' && $cc =~/\s*$Modifier\b/) { | 2861 | if ($op eq '*' && $cc =~/\s*$Modifier\b/) { |
2639 | # A unary '*' may be const | 2862 | # A unary '*' may be const |
2640 | 2863 | ||
2641 | } elsif ($ctx =~ /.xW/) { | 2864 | } elsif ($ctx =~ /.xW/) { |
2642 | ERROR("SPACING", | 2865 | if (ERROR("SPACING", |
2643 | "space prohibited after that '$op' $at\n" . $hereptr); | 2866 | "space prohibited after that '$op' $at\n" . $hereptr)) { |
2867 | $fixed_line =~ s/\s+$//; | ||
2868 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]); | ||
2869 | $line_fixed = 1; | ||
2870 | if (defined $fix_elements[$n + 2]) { | ||
2871 | $fix_elements[$n + 2] =~ s/^\s+//; | ||
2872 | } | ||
2873 | } | ||
2644 | } | 2874 | } |
2645 | 2875 | ||
2646 | # unary ++ and unary -- are allowed no space on one side. | 2876 | # unary ++ and unary -- are allowed no space on one side. |
2647 | } elsif ($op eq '++' or $op eq '--') { | 2877 | } elsif ($op eq '++' or $op eq '--') { |
2648 | if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) { | 2878 | if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) { |
2649 | ERROR("SPACING", | 2879 | if (ERROR("SPACING", |
2650 | "space required one side of that '$op' $at\n" . $hereptr); | 2880 | "space required one side of that '$op' $at\n" . $hereptr)) { |
2881 | $fixed_line =~ s/\s+$//; | ||
2882 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]) . " "; | ||
2883 | $line_fixed = 1; | ||
2884 | } | ||
2651 | } | 2885 | } |
2652 | if ($ctx =~ /Wx[BE]/ || | 2886 | if ($ctx =~ /Wx[BE]/ || |
2653 | ($ctx =~ /Wx./ && $cc =~ /^;/)) { | 2887 | ($ctx =~ /Wx./ && $cc =~ /^;/)) { |
2654 | ERROR("SPACING", | 2888 | if (ERROR("SPACING", |
2655 | "space prohibited before that '$op' $at\n" . $hereptr); | 2889 | "space prohibited before that '$op' $at\n" . $hereptr)) { |
2890 | $fixed_line =~ s/\s+$//; | ||
2891 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]); | ||
2892 | $line_fixed = 1; | ||
2893 | } | ||
2656 | } | 2894 | } |
2657 | if ($ctx =~ /ExW/) { | 2895 | if ($ctx =~ /ExW/) { |
2658 | ERROR("SPACING", | 2896 | if (ERROR("SPACING", |
2659 | "space prohibited after that '$op' $at\n" . $hereptr); | 2897 | "space prohibited after that '$op' $at\n" . $hereptr)) { |
2898 | $fixed_line =~ s/\s+$//; | ||
2899 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]); | ||
2900 | $line_fixed = 1; | ||
2901 | if (defined $fix_elements[$n + 2]) { | ||
2902 | $fix_elements[$n + 2] =~ s/^\s+//; | ||
2903 | } | ||
2904 | } | ||
2660 | } | 2905 | } |
2661 | 2906 | ||
2662 | |||
2663 | # << and >> may either have or not have spaces both sides | 2907 | # << and >> may either have or not have spaces both sides |
2664 | } elsif ($op eq '<<' or $op eq '>>' or | 2908 | } elsif ($op eq '<<' or $op eq '>>' or |
2665 | $op eq '&' or $op eq '^' or $op eq '|' or | 2909 | $op eq '&' or $op eq '^' or $op eq '|' or |
@@ -2668,17 +2912,23 @@ sub process { | |||
2668 | $op eq '%') | 2912 | $op eq '%') |
2669 | { | 2913 | { |
2670 | if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) { | 2914 | if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) { |
2671 | ERROR("SPACING", | 2915 | if (ERROR("SPACING", |
2672 | "need consistent spacing around '$op' $at\n" . | 2916 | "need consistent spacing around '$op' $at\n" . $hereptr)) { |
2673 | $hereptr); | 2917 | $fixed_line =~ s/\s+$//; |
2918 | $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " "; | ||
2919 | $line_fixed = 1; | ||
2920 | } | ||
2674 | } | 2921 | } |
2675 | 2922 | ||
2676 | # A colon needs no spaces before when it is | 2923 | # A colon needs no spaces before when it is |
2677 | # terminating a case value or a label. | 2924 | # terminating a case value or a label. |
2678 | } elsif ($opv eq ':C' || $opv eq ':L') { | 2925 | } elsif ($opv eq ':C' || $opv eq ':L') { |
2679 | if ($ctx =~ /Wx./) { | 2926 | if ($ctx =~ /Wx./) { |
2680 | ERROR("SPACING", | 2927 | if (ERROR("SPACING", |
2681 | "space prohibited before that '$op' $at\n" . $hereptr); | 2928 | "space prohibited before that '$op' $at\n" . $hereptr)) { |
2929 | $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]); | ||
2930 | $line_fixed = 1; | ||
2931 | } | ||
2682 | } | 2932 | } |
2683 | 2933 | ||
2684 | # All the others need spaces both sides. | 2934 | # All the others need spaces both sides. |
@@ -2701,11 +2951,39 @@ sub process { | |||
2701 | } | 2951 | } |
2702 | 2952 | ||
2703 | if ($ok == 0) { | 2953 | if ($ok == 0) { |
2704 | ERROR("SPACING", | 2954 | if (ERROR("SPACING", |
2705 | "spaces required around that '$op' $at\n" . $hereptr); | 2955 | "spaces required around that '$op' $at\n" . $hereptr)) { |
2956 | $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " "; | ||
2957 | $good = $fix_elements[$n] . " " . trim($fix_elements[$n + 1]) . " "; | ||
2958 | $line_fixed = 1; | ||
2959 | } | ||
2706 | } | 2960 | } |
2707 | } | 2961 | } |
2708 | $off += length($elements[$n + 1]); | 2962 | $off += length($elements[$n + 1]); |
2963 | |||
2964 | ## print("n: <$n> GOOD: <$good>\n"); | ||
2965 | |||
2966 | $fixed_line = $fixed_line . $good; | ||
2967 | } | ||
2968 | |||
2969 | if (($#elements % 2) == 0) { | ||
2970 | $fixed_line = $fixed_line . $fix_elements[$#elements]; | ||
2971 | } | ||
2972 | |||
2973 | if ($fix && $line_fixed && $fixed_line ne $fixed[$linenr - 1]) { | ||
2974 | $fixed[$linenr - 1] = $fixed_line; | ||
2975 | } | ||
2976 | |||
2977 | |||
2978 | } | ||
2979 | |||
2980 | # check for whitespace before a non-naked semicolon | ||
2981 | if ($line =~ /^\+.*\S\s+;/) { | ||
2982 | if (WARN("SPACING", | ||
2983 | "space prohibited before semicolon\n" . $herecurr) && | ||
2984 | $fix) { | ||
2985 | 1 while $fixed[$linenr - 1] =~ | ||
2986 | s/^(\+.*\S)\s+;/$1;/; | ||
2709 | } | 2987 | } |
2710 | } | 2988 | } |
2711 | 2989 | ||
@@ -2734,10 +3012,22 @@ sub process { | |||
2734 | #need space before brace following if, while, etc | 3012 | #need space before brace following if, while, etc |
2735 | if (($line =~ /\(.*\){/ && $line !~ /\($Type\){/) || | 3013 | if (($line =~ /\(.*\){/ && $line !~ /\($Type\){/) || |
2736 | $line =~ /do{/) { | 3014 | $line =~ /do{/) { |
2737 | ERROR("SPACING", | 3015 | if (ERROR("SPACING", |
2738 | "space required before the open brace '{'\n" . $herecurr); | 3016 | "space required before the open brace '{'\n" . $herecurr) && |
3017 | $fix) { | ||
3018 | $fixed[$linenr - 1] =~ | ||
3019 | s/^(\+.*(?:do|\))){/$1 {/; | ||
3020 | } | ||
2739 | } | 3021 | } |
2740 | 3022 | ||
3023 | ## # check for blank lines before declarations | ||
3024 | ## if ($line =~ /^.\t+$Type\s+$Ident(?:\s*=.*)?;/ && | ||
3025 | ## $prevrawline =~ /^.\s*$/) { | ||
3026 | ## WARN("SPACING", | ||
3027 | ## "No blank lines before declarations\n" . $hereprev); | ||
3028 | ## } | ||
3029 | ## | ||
3030 | |||
2741 | # closing brace should have a space following it when it has anything | 3031 | # closing brace should have a space following it when it has anything |
2742 | # on the line | 3032 | # on the line |
2743 | if ($line =~ /}(?!(?:,|;|\)))\S/) { | 3033 | if ($line =~ /}(?!(?:,|;|\)))\S/) { |
@@ -2747,32 +3037,52 @@ sub process { | |||
2747 | 3037 | ||
2748 | # check spacing on square brackets | 3038 | # check spacing on square brackets |
2749 | if ($line =~ /\[\s/ && $line !~ /\[\s*$/) { | 3039 | if ($line =~ /\[\s/ && $line !~ /\[\s*$/) { |
2750 | ERROR("SPACING", | 3040 | if (ERROR("SPACING", |
2751 | "space prohibited after that open square bracket '['\n" . $herecurr); | 3041 | "space prohibited after that open square bracket '['\n" . $herecurr) && |
3042 | $fix) { | ||
3043 | $fixed[$linenr - 1] =~ | ||
3044 | s/\[\s+/\[/; | ||
3045 | } | ||
2752 | } | 3046 | } |
2753 | if ($line =~ /\s\]/) { | 3047 | if ($line =~ /\s\]/) { |
2754 | ERROR("SPACING", | 3048 | if (ERROR("SPACING", |
2755 | "space prohibited before that close square bracket ']'\n" . $herecurr); | 3049 | "space prohibited before that close square bracket ']'\n" . $herecurr) && |
3050 | $fix) { | ||
3051 | $fixed[$linenr - 1] =~ | ||
3052 | s/\s+\]/\]/; | ||
3053 | } | ||
2756 | } | 3054 | } |
2757 | 3055 | ||
2758 | # check spacing on parentheses | 3056 | # check spacing on parentheses |
2759 | if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ && | 3057 | if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ && |
2760 | $line !~ /for\s*\(\s+;/) { | 3058 | $line !~ /for\s*\(\s+;/) { |
2761 | ERROR("SPACING", | 3059 | if (ERROR("SPACING", |
2762 | "space prohibited after that open parenthesis '('\n" . $herecurr); | 3060 | "space prohibited after that open parenthesis '('\n" . $herecurr) && |
3061 | $fix) { | ||
3062 | $fixed[$linenr - 1] =~ | ||
3063 | s/\(\s+/\(/; | ||
3064 | } | ||
2763 | } | 3065 | } |
2764 | if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ && | 3066 | if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ && |
2765 | $line !~ /for\s*\(.*;\s+\)/ && | 3067 | $line !~ /for\s*\(.*;\s+\)/ && |
2766 | $line !~ /:\s+\)/) { | 3068 | $line !~ /:\s+\)/) { |
2767 | ERROR("SPACING", | 3069 | if (ERROR("SPACING", |
2768 | "space prohibited before that close parenthesis ')'\n" . $herecurr); | 3070 | "space prohibited before that close parenthesis ')'\n" . $herecurr) && |
3071 | $fix) { | ||
3072 | $fixed[$linenr - 1] =~ | ||
3073 | s/\s+\)/\)/; | ||
3074 | } | ||
2769 | } | 3075 | } |
2770 | 3076 | ||
2771 | #goto labels aren't indented, allow a single space however | 3077 | #goto labels aren't indented, allow a single space however |
2772 | if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and | 3078 | if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and |
2773 | !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) { | 3079 | !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) { |
2774 | WARN("INDENTED_LABEL", | 3080 | if (WARN("INDENTED_LABEL", |
2775 | "labels should not be indented\n" . $herecurr); | 3081 | "labels should not be indented\n" . $herecurr) && |
3082 | $fix) { | ||
3083 | $fixed[$linenr - 1] =~ | ||
3084 | s/^(.)\s+/$1/; | ||
3085 | } | ||
2776 | } | 3086 | } |
2777 | 3087 | ||
2778 | # Return is not a function. | 3088 | # Return is not a function. |
@@ -2809,8 +3119,13 @@ sub process { | |||
2809 | } | 3119 | } |
2810 | 3120 | ||
2811 | # Need a space before open parenthesis after if, while etc | 3121 | # Need a space before open parenthesis after if, while etc |
2812 | if ($line=~/\b(if|while|for|switch)\(/) { | 3122 | if ($line =~ /\b(if|while|for|switch)\(/) { |
2813 | ERROR("SPACING", "space required before the open parenthesis '('\n" . $herecurr); | 3123 | if (ERROR("SPACING", |
3124 | "space required before the open parenthesis '('\n" . $herecurr) && | ||
3125 | $fix) { | ||
3126 | $fixed[$linenr - 1] =~ | ||
3127 | s/\b(if|while|for|switch)\(/$1 \(/; | ||
3128 | } | ||
2814 | } | 3129 | } |
2815 | 3130 | ||
2816 | # Check for illegal assignment in if conditional -- and check for trailing | 3131 | # Check for illegal assignment in if conditional -- and check for trailing |
@@ -2934,16 +3249,29 @@ sub process { | |||
2934 | } | 3249 | } |
2935 | } | 3250 | } |
2936 | 3251 | ||
2937 | #CamelCase | 3252 | #Specific variable tests |
2938 | while ($line =~ m{($Constant|$Lval)}g) { | 3253 | while ($line =~ m{($Constant|$Lval)}g) { |
2939 | my $var = $1; | 3254 | my $var = $1; |
2940 | if ($var !~ /$Constant/ && | 3255 | |
2941 | $var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ && | 3256 | #gcc binary extension |
2942 | $var !~ /"^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ && | 3257 | if ($var =~ /^$Binary$/) { |
2943 | !defined $camelcase{$var}) { | 3258 | WARN("GCC_BINARY_CONSTANT", |
2944 | $camelcase{$var} = 1; | 3259 | "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr); |
2945 | WARN("CAMELCASE", | 3260 | } |
2946 | "Avoid CamelCase: <$var>\n" . $herecurr); | 3261 | |
3262 | #CamelCase | ||
3263 | if ($var !~ /^$Constant$/ && | ||
3264 | $var =~ /[A-Z][a-z]|[a-z][A-Z]/ && | ||
3265 | #Ignore Page<foo> variants | ||
3266 | $var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ && | ||
3267 | #Ignore SI style variants like nS, mV and dB (ie: max_uV, regulator_min_uA_show) | ||
3268 | $var !~ /^(?:[a-z_]*?)_?[a-z][A-Z](?:_[a-z_]+)?$/) { | ||
3269 | seed_camelcase_includes() if ($check); | ||
3270 | if (!defined $camelcase{$var}) { | ||
3271 | $camelcase{$var} = 1; | ||
3272 | CHK("CAMELCASE", | ||
3273 | "Avoid CamelCase: <$var>\n" . $herecurr); | ||
3274 | } | ||
2947 | } | 3275 | } |
2948 | } | 3276 | } |
2949 | 3277 | ||
@@ -3021,7 +3349,7 @@ sub process { | |||
3021 | if ($dstat ne '' && | 3349 | if ($dstat ne '' && |
3022 | $dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(), | 3350 | $dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(), |
3023 | $dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo(); | 3351 | $dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo(); |
3024 | $dstat !~ /^[!~-]?(?:$Ident|$Constant)$/ && # 10 // foo() // !foo // ~foo // -foo | 3352 | $dstat !~ /^[!~-]?(?:$Lval|$Constant)$/ && # 10 // foo() // !foo // ~foo // -foo // foo->bar // foo.bar->baz |
3025 | $dstat !~ /^'X'$/ && # character constants | 3353 | $dstat !~ /^'X'$/ && # character constants |
3026 | $dstat !~ /$exceptions/ && | 3354 | $dstat !~ /$exceptions/ && |
3027 | $dstat !~ /^\.$Ident\s*=/ && # .foo = | 3355 | $dstat !~ /^\.$Ident\s*=/ && # .foo = |
@@ -3230,11 +3558,11 @@ sub process { | |||
3230 | } | 3558 | } |
3231 | 3559 | ||
3232 | # check for unnecessary blank lines around braces | 3560 | # check for unnecessary blank lines around braces |
3233 | if (($line =~ /^.\s*}\s*$/ && $prevline =~ /^.\s*$/)) { | 3561 | if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) { |
3234 | CHK("BRACES", | 3562 | CHK("BRACES", |
3235 | "Blank lines aren't necessary before a close brace '}'\n" . $hereprev); | 3563 | "Blank lines aren't necessary before a close brace '}'\n" . $hereprev); |
3236 | } | 3564 | } |
3237 | if (($line =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) { | 3565 | if (($rawline =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) { |
3238 | CHK("BRACES", | 3566 | CHK("BRACES", |
3239 | "Blank lines aren't necessary after an open brace '{'\n" . $hereprev); | 3567 | "Blank lines aren't necessary after an open brace '{'\n" . $hereprev); |
3240 | } | 3568 | } |
@@ -3279,6 +3607,18 @@ sub process { | |||
3279 | } | 3607 | } |
3280 | } | 3608 | } |
3281 | 3609 | ||
3610 | # check for comparisons of jiffies | ||
3611 | if ($line =~ /\bjiffies\s*$Compare|$Compare\s*jiffies\b/) { | ||
3612 | WARN("JIFFIES_COMPARISON", | ||
3613 | "Comparing jiffies is almost always wrong; prefer time_after, time_before and friends\n" . $herecurr); | ||
3614 | } | ||
3615 | |||
3616 | # check for comparisons of get_jiffies_64() | ||
3617 | if ($line =~ /\bget_jiffies_64\s*\(\s*\)\s*$Compare|$Compare\s*get_jiffies_64\s*\(\s*\)/) { | ||
3618 | WARN("JIFFIES_COMPARISON", | ||
3619 | "Comparing get_jiffies_64() is almost always wrong; prefer time_after64, time_before64 and friends\n" . $herecurr); | ||
3620 | } | ||
3621 | |||
3282 | # warn about #ifdefs in C files | 3622 | # warn about #ifdefs in C files |
3283 | # if ($line =~ /^.\s*\#\s*if(|n)def/ && ($realfile =~ /\.c$/)) { | 3623 | # if ($line =~ /^.\s*\#\s*if(|n)def/ && ($realfile =~ /\.c$/)) { |
3284 | # print "#ifdef in C files should be avoided\n"; | 3624 | # print "#ifdef in C files should be avoided\n"; |
@@ -3288,8 +3628,13 @@ sub process { | |||
3288 | 3628 | ||
3289 | # warn about spacing in #ifdefs | 3629 | # warn about spacing in #ifdefs |
3290 | if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) { | 3630 | if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) { |
3291 | ERROR("SPACING", | 3631 | if (ERROR("SPACING", |
3292 | "exactly one space required after that #$1\n" . $herecurr); | 3632 | "exactly one space required after that #$1\n" . $herecurr) && |
3633 | $fix) { | ||
3634 | $fixed[$linenr - 1] =~ | ||
3635 | s/^(.\s*\#\s*(ifdef|ifndef|elif))\s{2,}/$1 /; | ||
3636 | } | ||
3637 | |||
3293 | } | 3638 | } |
3294 | 3639 | ||
3295 | # check for spinlock_t definitions without a comment. | 3640 | # check for spinlock_t definitions without a comment. |
@@ -3495,6 +3840,14 @@ sub process { | |||
3495 | "unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr); | 3840 | "unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr); |
3496 | } | 3841 | } |
3497 | 3842 | ||
3843 | # alloc style | ||
3844 | # p = alloc(sizeof(struct foo), ...) should be p = alloc(sizeof(*p), ...) | ||
3845 | if ($^V && $^V ge 5.10.0 && | ||
3846 | $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*([kv][mz]alloc(?:_node)?)\s*\(\s*(sizeof\s*\(\s*struct\s+$Lval\s*\))/) { | ||
3847 | CHK("ALLOC_SIZEOF_STRUCT", | ||
3848 | "Prefer $3(sizeof(*$1)...) over $3($4...)\n" . $herecurr); | ||
3849 | } | ||
3850 | |||
3498 | # check for krealloc arg reuse | 3851 | # check for krealloc arg reuse |
3499 | if ($^V && $^V ge 5.10.0 && | 3852 | if ($^V && $^V ge 5.10.0 && |
3500 | $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*krealloc\s*\(\s*\1\s*,/) { | 3853 | $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*krealloc\s*\(\s*\1\s*,/) { |
@@ -3540,6 +3893,33 @@ sub process { | |||
3540 | "Using yield() is generally wrong. See yield() kernel-doc (sched/core.c)\n" . $herecurr); | 3893 | "Using yield() is generally wrong. See yield() kernel-doc (sched/core.c)\n" . $herecurr); |
3541 | } | 3894 | } |
3542 | 3895 | ||
3896 | # check for comparisons against true and false | ||
3897 | if ($line =~ /\+\s*(.*?)\b(true|false|$Lval)\s*(==|\!=)\s*(true|false|$Lval)\b(.*)$/i) { | ||
3898 | my $lead = $1; | ||
3899 | my $arg = $2; | ||
3900 | my $test = $3; | ||
3901 | my $otype = $4; | ||
3902 | my $trail = $5; | ||
3903 | my $op = "!"; | ||
3904 | |||
3905 | ($arg, $otype) = ($otype, $arg) if ($arg =~ /^(?:true|false)$/i); | ||
3906 | |||
3907 | my $type = lc($otype); | ||
3908 | if ($type =~ /^(?:true|false)$/) { | ||
3909 | if (("$test" eq "==" && "$type" eq "true") || | ||
3910 | ("$test" eq "!=" && "$type" eq "false")) { | ||
3911 | $op = ""; | ||
3912 | } | ||
3913 | |||
3914 | CHK("BOOL_COMPARISON", | ||
3915 | "Using comparison to $otype is error prone\n" . $herecurr); | ||
3916 | |||
3917 | ## maybe suggesting a correct construct would better | ||
3918 | ## "Using comparison to $otype is error prone. Perhaps use '${lead}${op}${arg}${trail}'\n" . $herecurr); | ||
3919 | |||
3920 | } | ||
3921 | } | ||
3922 | |||
3543 | # check for semaphores initialized locked | 3923 | # check for semaphores initialized locked |
3544 | if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) { | 3924 | if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) { |
3545 | WARN("CONSIDER_COMPLETION", | 3925 | WARN("CONSIDER_COMPLETION", |
@@ -3717,6 +4097,40 @@ sub process { | |||
3717 | print "\n\n"; | 4097 | print "\n\n"; |
3718 | } | 4098 | } |
3719 | 4099 | ||
4100 | if ($clean == 0 && $fix && "@rawlines" ne "@fixed") { | ||
4101 | my $newfile = $filename . ".EXPERIMENTAL-checkpatch-fixes"; | ||
4102 | my $linecount = 0; | ||
4103 | my $f; | ||
4104 | |||
4105 | open($f, '>', $newfile) | ||
4106 | or die "$P: Can't open $newfile for write\n"; | ||
4107 | foreach my $fixed_line (@fixed) { | ||
4108 | $linecount++; | ||
4109 | if ($file) { | ||
4110 | if ($linecount > 3) { | ||
4111 | $fixed_line =~ s/^\+//; | ||
4112 | print $f $fixed_line. "\n"; | ||
4113 | } | ||
4114 | } else { | ||
4115 | print $f $fixed_line . "\n"; | ||
4116 | } | ||
4117 | } | ||
4118 | close($f); | ||
4119 | |||
4120 | if (!$quiet) { | ||
4121 | print << "EOM"; | ||
4122 | Wrote EXPERIMENTAL --fix correction(s) to '$newfile' | ||
4123 | |||
4124 | Do _NOT_ trust the results written to this file. | ||
4125 | Do _NOT_ submit these changes without inspecting them for correctness. | ||
4126 | |||
4127 | This EXPERIMENTAL file is simply a convenience to help rewrite patches. | ||
4128 | No warranties, expressed or implied... | ||
4129 | |||
4130 | EOM | ||
4131 | } | ||
4132 | } | ||
4133 | |||
3720 | if ($clean == 1 && $quiet == 0) { | 4134 | if ($clean == 1 && $quiet == 0) { |
3721 | print "$vname has no obvious style problems and is ready for submission.\n" | 4135 | print "$vname has no obvious style problems and is ready for submission.\n" |
3722 | } | 4136 | } |
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index e66d4d258e1a..bb5d115ca671 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c | |||
@@ -177,5 +177,11 @@ int main(void) | |||
177 | DEVID(mei_cl_device_id); | 177 | DEVID(mei_cl_device_id); |
178 | DEVID_FIELD(mei_cl_device_id, name); | 178 | DEVID_FIELD(mei_cl_device_id, name); |
179 | 179 | ||
180 | DEVID(rio_device_id); | ||
181 | DEVID_FIELD(rio_device_id, did); | ||
182 | DEVID_FIELD(rio_device_id, vid); | ||
183 | DEVID_FIELD(rio_device_id, asm_did); | ||
184 | DEVID_FIELD(rio_device_id, asm_vid); | ||
185 | |||
180 | return 0; | 186 | return 0; |
181 | } | 187 | } |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 45f9a3377dcd..d9e67b719f08 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
@@ -1145,6 +1145,26 @@ static int do_mei_entry(const char *filename, void *symval, | |||
1145 | } | 1145 | } |
1146 | ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry); | 1146 | ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry); |
1147 | 1147 | ||
1148 | /* Looks like: rapidio:vNdNavNadN */ | ||
1149 | static int do_rio_entry(const char *filename, | ||
1150 | void *symval, char *alias) | ||
1151 | { | ||
1152 | DEF_FIELD(symval, rio_device_id, did); | ||
1153 | DEF_FIELD(symval, rio_device_id, vid); | ||
1154 | DEF_FIELD(symval, rio_device_id, asm_did); | ||
1155 | DEF_FIELD(symval, rio_device_id, asm_vid); | ||
1156 | |||
1157 | strcpy(alias, "rapidio:"); | ||
1158 | ADD(alias, "v", vid != RIO_ANY_ID, vid); | ||
1159 | ADD(alias, "d", did != RIO_ANY_ID, did); | ||
1160 | ADD(alias, "av", asm_vid != RIO_ANY_ID, asm_vid); | ||
1161 | ADD(alias, "ad", asm_did != RIO_ANY_ID, asm_did); | ||
1162 | |||
1163 | add_wildcard(alias); | ||
1164 | return 1; | ||
1165 | } | ||
1166 | ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry); | ||
1167 | |||
1148 | /* Does namelen bytes of name exactly match the symbol? */ | 1168 | /* Does namelen bytes of name exactly match the symbol? */ |
1149 | static bool sym_is(const char *name, unsigned namelen, const char *symbol) | 1169 | static bool sym_is(const char *name, unsigned namelen, const char *symbol) |
1150 | { | 1170 | { |
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index 721587c9cd84..73e205c892a0 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c | |||
@@ -38,9 +38,9 @@ enum si476x_digital_io_output_format { | |||
38 | SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT = 8, | 38 | SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT = 8, |
39 | }; | 39 | }; |
40 | 40 | ||
41 | #define SI476X_DIGITAL_IO_OUTPUT_WIDTH_MASK ((0b111 << SI476X_DIGITAL_IO_SLOT_SIZE_SHIFT) | \ | 41 | #define SI476X_DIGITAL_IO_OUTPUT_WIDTH_MASK ((0x7 << SI476X_DIGITAL_IO_SLOT_SIZE_SHIFT) | \ |
42 | (0b111 << SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT)) | 42 | (0x7 << SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT)) |
43 | #define SI476X_DIGITAL_IO_OUTPUT_FORMAT_MASK (0b1111110) | 43 | #define SI476X_DIGITAL_IO_OUTPUT_FORMAT_MASK (0x7e) |
44 | 44 | ||
45 | enum si476x_daudio_formats { | 45 | enum si476x_daudio_formats { |
46 | SI476X_DAUDIO_MODE_I2S = (0x0 << 1), | 46 | SI476X_DAUDIO_MODE_I2S = (0x0 << 1), |
diff --git a/sound/sound_core.c b/sound/sound_core.c index 359753fc24e1..45759f4cca75 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c | |||
@@ -292,7 +292,7 @@ retry: | |||
292 | } | 292 | } |
293 | 293 | ||
294 | device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor), | 294 | device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor), |
295 | NULL, s->name+6); | 295 | NULL, "%s", s->name+6); |
296 | return s->unit_minor; | 296 | return s->unit_minor; |
297 | 297 | ||
298 | fail: | 298 | fail: |
diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile index 12657a5e4bf9..ae5faf9aade2 100644 --- a/tools/testing/selftests/cpu-hotplug/Makefile +++ b/tools/testing/selftests/cpu-hotplug/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | all: | 1 | all: |
2 | 2 | ||
3 | run_tests: | 3 | run_tests: |
4 | @./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" | 4 | @/bin/sh ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" |
5 | 5 | ||
6 | clean: | 6 | clean: |
diff --git a/tools/testing/selftests/kcmp/.gitignore b/tools/testing/selftests/kcmp/.gitignore new file mode 100644 index 000000000000..5a9b3732b2de --- /dev/null +++ b/tools/testing/selftests/kcmp/.gitignore | |||
@@ -0,0 +1,2 @@ | |||
1 | kcmp_test | ||
2 | kcmp-test-file | ||
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile index 56eb5523dbb8..d7d6bbeeff2f 100644 --- a/tools/testing/selftests/kcmp/Makefile +++ b/tools/testing/selftests/kcmp/Makefile | |||
@@ -25,5 +25,4 @@ run_tests: all | |||
25 | @./kcmp_test || echo "kcmp_test: [FAIL]" | 25 | @./kcmp_test || echo "kcmp_test: [FAIL]" |
26 | 26 | ||
27 | clean: | 27 | clean: |
28 | rm -fr ./run_test | 28 | $(RM) kcmp_test kcmp-test-file |
29 | rm -fr ./test-file | ||
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile index 0f49c3f5f58d..350bfeda3aa8 100644 --- a/tools/testing/selftests/memory-hotplug/Makefile +++ b/tools/testing/selftests/memory-hotplug/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | all: | 1 | all: |
2 | 2 | ||
3 | run_tests: | 3 | run_tests: |
4 | @./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" | 4 | @/bin/sh ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" |
5 | 5 | ||
6 | clean: | 6 | clean: |
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore new file mode 100644 index 000000000000..ff1bb16cec4f --- /dev/null +++ b/tools/testing/selftests/vm/.gitignore | |||
@@ -0,0 +1,4 @@ | |||
1 | hugepage-mmap | ||
2 | hugepage-shm | ||
3 | map_hugetlb | ||
4 | thuge-gen | ||
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 436d2e81868b..3f94e1afd6cf 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
@@ -2,13 +2,14 @@ | |||
2 | 2 | ||
3 | CC = $(CROSS_COMPILE)gcc | 3 | CC = $(CROSS_COMPILE)gcc |
4 | CFLAGS = -Wall | 4 | CFLAGS = -Wall |
5 | BINARIES = hugepage-mmap hugepage-shm map_hugetlb thuge-gen hugetlbfstest | ||
5 | 6 | ||
6 | all: hugepage-mmap hugepage-shm map_hugetlb thuge-gen | 7 | all: $(BINARIES) |
7 | %: %.c | 8 | %: %.c |
8 | $(CC) $(CFLAGS) -o $@ $^ | 9 | $(CC) $(CFLAGS) -o $@ $^ |
9 | 10 | ||
10 | run_tests: all | 11 | run_tests: all |
11 | @/bin/sh ./run_vmtests || echo "vmtests: [FAIL]" | 12 | @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) |
12 | 13 | ||
13 | clean: | 14 | clean: |
14 | $(RM) hugepage-mmap hugepage-shm map_hugetlb | 15 | $(RM) $(BINARIES) |
diff --git a/tools/testing/selftests/vm/hugetlbfstest.c b/tools/testing/selftests/vm/hugetlbfstest.c new file mode 100644 index 000000000000..ea40ff8c2391 --- /dev/null +++ b/tools/testing/selftests/vm/hugetlbfstest.c | |||
@@ -0,0 +1,84 @@ | |||
1 | #define _GNU_SOURCE | ||
2 | #include <assert.h> | ||
3 | #include <fcntl.h> | ||
4 | #include <stdio.h> | ||
5 | #include <stdlib.h> | ||
6 | #include <string.h> | ||
7 | #include <sys/mman.h> | ||
8 | #include <sys/stat.h> | ||
9 | #include <sys/types.h> | ||
10 | #include <unistd.h> | ||
11 | |||
12 | typedef unsigned long long u64; | ||
13 | |||
14 | static size_t length = 1 << 24; | ||
15 | |||
16 | static u64 read_rss(void) | ||
17 | { | ||
18 | char buf[4096], *s = buf; | ||
19 | int i, fd; | ||
20 | u64 rss; | ||
21 | |||
22 | fd = open("/proc/self/statm", O_RDONLY); | ||
23 | assert(fd > 2); | ||
24 | memset(buf, 0, sizeof(buf)); | ||
25 | read(fd, buf, sizeof(buf) - 1); | ||
26 | for (i = 0; i < 1; i++) | ||
27 | s = strchr(s, ' ') + 1; | ||
28 | rss = strtoull(s, NULL, 10); | ||
29 | return rss << 12; /* assumes 4k pagesize */ | ||
30 | } | ||
31 | |||
32 | static void do_mmap(int fd, int extra_flags, int unmap) | ||
33 | { | ||
34 | int *p; | ||
35 | int flags = MAP_PRIVATE | MAP_POPULATE | extra_flags; | ||
36 | u64 before, after; | ||
37 | |||
38 | before = read_rss(); | ||
39 | p = mmap(NULL, length, PROT_READ | PROT_WRITE, flags, fd, 0); | ||
40 | assert(p != MAP_FAILED || | ||
41 | !"mmap returned an unexpected error"); | ||
42 | after = read_rss(); | ||
43 | assert(llabs(after - before - length) < 0x40000 || | ||
44 | !"rss didn't grow as expected"); | ||
45 | if (!unmap) | ||
46 | return; | ||
47 | munmap(p, length); | ||
48 | after = read_rss(); | ||
49 | assert(llabs(after - before) < 0x40000 || | ||
50 | !"rss didn't shrink as expected"); | ||
51 | } | ||
52 | |||
53 | static int open_file(const char *path) | ||
54 | { | ||
55 | int fd, err; | ||
56 | |||
57 | unlink(path); | ||
58 | fd = open(path, O_CREAT | O_RDWR | O_TRUNC | O_EXCL | ||
59 | | O_LARGEFILE | O_CLOEXEC, 0600); | ||
60 | assert(fd > 2); | ||
61 | unlink(path); | ||
62 | err = ftruncate(fd, length); | ||
63 | assert(!err); | ||
64 | return fd; | ||
65 | } | ||
66 | |||
67 | int main(void) | ||
68 | { | ||
69 | int hugefd, fd; | ||
70 | |||
71 | fd = open_file("/dev/shm/hugetlbhog"); | ||
72 | hugefd = open_file("/hugepages/hugetlbhog"); | ||
73 | |||
74 | system("echo 100 > /proc/sys/vm/nr_hugepages"); | ||
75 | do_mmap(-1, MAP_ANONYMOUS, 1); | ||
76 | do_mmap(fd, 0, 1); | ||
77 | do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 1); | ||
78 | do_mmap(hugefd, 0, 1); | ||
79 | do_mmap(hugefd, MAP_HUGETLB, 1); | ||
80 | /* Leak the last one to test do_exit() */ | ||
81 | do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 0); | ||
82 | printf("oll korrekt.\n"); | ||
83 | return 0; | ||
84 | } | ||
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 4c53cae6c273..c87b6812300d 100644 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests | |||
@@ -4,6 +4,7 @@ | |||
4 | #we need 256M, below is the size in kB | 4 | #we need 256M, below is the size in kB |
5 | needmem=262144 | 5 | needmem=262144 |
6 | mnt=./huge | 6 | mnt=./huge |
7 | exitcode=0 | ||
7 | 8 | ||
8 | #get pagesize and freepages from /proc/meminfo | 9 | #get pagesize and freepages from /proc/meminfo |
9 | while read name size unit; do | 10 | while read name size unit; do |
@@ -41,6 +42,7 @@ echo "--------------------" | |||
41 | ./hugepage-mmap | 42 | ./hugepage-mmap |
42 | if [ $? -ne 0 ]; then | 43 | if [ $? -ne 0 ]; then |
43 | echo "[FAIL]" | 44 | echo "[FAIL]" |
45 | exitcode=1 | ||
44 | else | 46 | else |
45 | echo "[PASS]" | 47 | echo "[PASS]" |
46 | fi | 48 | fi |
@@ -55,6 +57,7 @@ echo "--------------------" | |||
55 | ./hugepage-shm | 57 | ./hugepage-shm |
56 | if [ $? -ne 0 ]; then | 58 | if [ $? -ne 0 ]; then |
57 | echo "[FAIL]" | 59 | echo "[FAIL]" |
60 | exitcode=1 | ||
58 | else | 61 | else |
59 | echo "[PASS]" | 62 | echo "[PASS]" |
60 | fi | 63 | fi |
@@ -67,6 +70,18 @@ echo "--------------------" | |||
67 | ./map_hugetlb | 70 | ./map_hugetlb |
68 | if [ $? -ne 0 ]; then | 71 | if [ $? -ne 0 ]; then |
69 | echo "[FAIL]" | 72 | echo "[FAIL]" |
73 | exitcode=1 | ||
74 | else | ||
75 | echo "[PASS]" | ||
76 | fi | ||
77 | |||
78 | echo "--------------------" | ||
79 | echo "running hugetlbfstest" | ||
80 | echo "--------------------" | ||
81 | ./hugetlbfstest | ||
82 | if [ $? -ne 0 ]; then | ||
83 | echo "[FAIL]" | ||
84 | exitcode=1 | ||
70 | else | 85 | else |
71 | echo "[PASS]" | 86 | echo "[PASS]" |
72 | fi | 87 | fi |
@@ -75,3 +90,4 @@ fi | |||
75 | umount $mnt | 90 | umount $mnt |
76 | rm -rf $mnt | 91 | rm -rf $mnt |
77 | echo $nr_hugepgs > /proc/sys/vm/nr_hugepages | 92 | echo $nr_hugepgs > /proc/sys/vm/nr_hugepages |
93 | exit $exitcode | ||