diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-02 21:08:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-02 21:08:07 -0400 |
commit | d52bd54db8be8999df6df5a776f38c4f8b5e9cea (patch) | |
tree | 0d8f436e959bb975c002ddf12ea1bdc9adadd04f | |
parent | 8cbdd85bda499d028b8f128191f392d701e8e41d (diff) | |
parent | 3bd080e4d8f2351ee3e143f0ec9307cc95ae6639 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton:
- the rest of ocfs2
- various hotfixes, mainly MM
- quite a bit of misc stuff - drivers, fork, exec, signals, etc.
- printk updates
- firmware
- checkpatch
- nilfs2
- more kexec stuff than usual
- rapidio updates
- w1 things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (111 commits)
ipc: delete "nr_ipc_ns"
kcov: allow more fine-grained coverage instrumentation
init/Kconfig: add clarification for out-of-tree modules
config: add android config fragments
init/Kconfig: ban CONFIG_LOCALVERSION_AUTO with allmodconfig
relay: add global mode support for buffer-only channels
init: allow blacklisting of module_init functions
w1:omap_hdq: fix regression
w1: add helper macro module_w1_family
w1: remove need for ida and use PLATFORM_DEVID_AUTO
rapidio/switches: add driver for IDT gen3 switches
powerpc/fsl_rio: apply changes for RIO spec rev 3
rapidio: modify for rev.3 specification changes
rapidio: change inbound window size type to u64
rapidio/idt_gen2: fix locking warning
rapidio: fix error handling in mbox request/release functions
rapidio/tsi721_dma: advance queue processing from transfer submit call
rapidio/tsi721: add messaging mbox selector parameter
rapidio/tsi721: add PCIe MRRS override parameter
rapidio/tsi721_dma: add channel mask and queue size parameters
...
206 files changed, 5749 insertions, 2012 deletions
@@ -92,6 +92,8 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> | |||
92 | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 92 | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
93 | Leonid I Ananiev <leonid.i.ananiev@intel.com> | 93 | Leonid I Ananiev <leonid.i.ananiev@intel.com> |
94 | Linas Vepstas <linas@austin.ibm.com> | 94 | Linas Vepstas <linas@austin.ibm.com> |
95 | Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> | ||
96 | Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> | ||
95 | Mark Brown <broonie@sirena.org.uk> | 97 | Mark Brown <broonie@sirena.org.uk> |
96 | Matthieu CASTET <castet.matthieu@free.fr> | 98 | Matthieu CASTET <castet.matthieu@free.fr> |
97 | Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com> | 99 | Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com> |
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index 5b21ef76f751..c0727dc36271 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt | |||
@@ -267,7 +267,8 @@ among NILFS2 files can be depicted as follows: | |||
267 | `-- file (ino=yy) | 267 | `-- file (ino=yy) |
268 | ( regular file, directory, or symlink ) | 268 | ( regular file, directory, or symlink ) |
269 | 269 | ||
270 | For detail on the format of each file, please see include/linux/nilfs2_fs.h. | 270 | For detail on the format of each file, please see nilfs2_ondisk.h |
271 | located at include/uapi/linux directory. | ||
271 | 272 | ||
272 | There are no patents or other intellectual property that we protect | 273 | There are no patents or other intellectual property that we protect |
273 | with regard to the design of NILFS2. It is allowed to replicate the | 274 | with regard to the design of NILFS2. It is allowed to replicate the |
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 56af5e43e9c0..81c7f2bb7daf 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -248,7 +248,7 @@ Code Seq#(hex) Include File Comments | |||
248 | 'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! | 248 | 'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! |
249 | 'm' 00-1F net/irda/irmod.h conflict! | 249 | 'm' 00-1F net/irda/irmod.h conflict! |
250 | 'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c | 250 | 'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c |
251 | 'n' 80-8F linux/nilfs2_fs.h NILFS2 | 251 | 'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 |
252 | 'n' E0-FF linux/matroxfb.h matroxfb | 252 | 'n' E0-FF linux/matroxfb.h matroxfb |
253 | 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 | 253 | 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 |
254 | 'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) | 254 | 'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 642029012059..00e4c2f615a8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3182,6 +3182,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3182 | Format: <bool> (1/Y/y=enable, 0/N/n=disable) | 3182 | Format: <bool> (1/Y/y=enable, 0/N/n=disable) |
3183 | default: disabled | 3183 | default: disabled |
3184 | 3184 | ||
3185 | printk.devkmsg={on,off,ratelimit} | ||
3186 | Control writing to /dev/kmsg. | ||
3187 | on - unlimited logging to /dev/kmsg from userspace | ||
3188 | off - logging to /dev/kmsg disabled | ||
3189 | ratelimit - ratelimit the logging | ||
3190 | Default: ratelimit | ||
3191 | |||
3185 | printk.time= Show timing data prefixed to each printk message line | 3192 | printk.time= Show timing data prefixed to each printk message line |
3186 | Format: <bool> (1/Y/y=enable, 0/N/n=disable) | 3193 | Format: <bool> (1/Y/y=enable, 0/N/n=disable) |
3187 | 3194 | ||
diff --git a/Documentation/rapidio/mport_cdev.txt b/Documentation/rapidio/mport_cdev.txt index 20c120d4b3b8..6e491a662461 100644 --- a/Documentation/rapidio/mport_cdev.txt +++ b/Documentation/rapidio/mport_cdev.txt | |||
@@ -82,8 +82,7 @@ III. Module parameters | |||
82 | 82 | ||
83 | - 'dbg_level' - This parameter allows to control amount of debug information | 83 | - 'dbg_level' - This parameter allows to control amount of debug information |
84 | generated by this device driver. This parameter is formed by set of | 84 | generated by this device driver. This parameter is formed by set of |
85 | This parameter can be changed bit masks that correspond to the specific | 85 | bit masks that correspond to the specific functional blocks. |
86 | functional block. | ||
87 | For mask definitions see 'drivers/rapidio/devices/rio_mport_cdev.c' | 86 | For mask definitions see 'drivers/rapidio/devices/rio_mport_cdev.c' |
88 | This parameter can be changed dynamically. | 87 | This parameter can be changed dynamically. |
89 | Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level. | 88 | Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level. |
diff --git a/Documentation/rapidio/rio_cm.txt b/Documentation/rapidio/rio_cm.txt new file mode 100644 index 000000000000..27aa401f1126 --- /dev/null +++ b/Documentation/rapidio/rio_cm.txt | |||
@@ -0,0 +1,119 @@ | |||
1 | RapidIO subsystem Channelized Messaging character device driver (rio_cm.c) | ||
2 | ========================================================================== | ||
3 | |||
4 | Version History: | ||
5 | ---------------- | ||
6 | 1.0.0 - Initial driver release. | ||
7 | |||
8 | ========================================================================== | ||
9 | |||
10 | I. Overview | ||
11 | |||
12 | This device driver is the result of collaboration within the RapidIO.org | ||
13 | Software Task Group (STG) between Texas Instruments, Prodrive Technologies, | ||
14 | Nokia Networks, BAE and IDT. Additional input was received from other members | ||
15 | of RapidIO.org. | ||
16 | |||
17 | The objective was to create a character mode driver interface which exposes | ||
18 | messaging capabilities of RapidIO endpoint devices (mports) directly | ||
19 | to applications, in a manner that allows the numerous and varied RapidIO | ||
20 | implementations to interoperate. | ||
21 | |||
22 | This driver (RIO_CM) provides to user-space applications shared access to | ||
23 | RapidIO mailbox messaging resources. | ||
24 | |||
25 | RapidIO specification (Part 2) defines that endpoint devices may have up to four | ||
26 | messaging mailboxes in case of multi-packet message (up to 4KB) and | ||
27 | up to 64 mailboxes if single-packet messages (up to 256 B) are used. In addition | ||
28 | to protocol definition limitations, a particular hardware implementation can | ||
29 | have reduced number of messaging mailboxes. RapidIO aware applications must | ||
30 | therefore share the messaging resources of a RapidIO endpoint. | ||
31 | |||
32 | Main purpose of this device driver is to provide RapidIO mailbox messaging | ||
33 | capability to large number of user-space processes by introducing socket-like | ||
34 | operations using a single messaging mailbox. This allows applications to | ||
35 | use the limited RapidIO messaging hardware resources efficiently. | ||
36 | |||
37 | Most of device driver's operations are supported through 'ioctl' system calls. | ||
38 | |||
39 | When loaded this device driver creates a single file system node named rio_cm | ||
40 | in /dev directory common for all registered RapidIO mport devices. | ||
41 | |||
42 | Following ioctl commands are available to user-space applications: | ||
43 | |||
44 | - RIO_CM_MPORT_GET_LIST : Returns to caller list of local mport devices that | ||
45 | support messaging operations (number of entries up to RIO_MAX_MPORTS). | ||
46 | Each list entry is combination of mport's index in the system and RapidIO | ||
47 | destination ID assigned to the port. | ||
48 | - RIO_CM_EP_GET_LIST_SIZE : Returns number of messaging capable remote endpoints | ||
49 | in a RapidIO network associated with the specified mport device. | ||
50 | - RIO_CM_EP_GET_LIST : Returns list of RapidIO destination IDs for messaging | ||
51 | capable remote endpoints (peers) available in a RapidIO network associated | ||
52 | with the specified mport device. | ||
53 | - RIO_CM_CHAN_CREATE : Creates RapidIO message exchange channel data structure | ||
54 | with channel ID assigned automatically or as requested by a caller. | ||
55 | - RIO_CM_CHAN_BIND : Binds the specified channel data structure to the specified | ||
56 | mport device. | ||
57 | - RIO_CM_CHAN_LISTEN : Enables listening for connection requests on the specified | ||
58 | channel. | ||
59 | - RIO_CM_CHAN_ACCEPT : Accepts a connection request from peer on the specified | ||
60 | channel. If wait timeout for this request is specified by a caller it is | ||
61 | a blocking call. If timeout set to 0 this is non-blocking call - ioctl | ||
62 | handler checks for a pending connection request and if one is not available | ||
63 | exits with -EGAIN error status immediately. | ||
64 | - RIO_CM_CHAN_CONNECT : Sends a connection request to a remote peer/channel. | ||
65 | - RIO_CM_CHAN_SEND : Sends a data message through the specified channel. | ||
66 | The handler for this request assumes that message buffer specified by | ||
67 | a caller includes the reserved space for a packet header required by | ||
68 | this driver. | ||
69 | - RIO_CM_CHAN_RECEIVE : Receives a data message through a connected channel. | ||
70 | If the channel does not have an incoming message ready to return this ioctl | ||
71 | handler will wait for new message until timeout specified by a caller | ||
72 | expires. If timeout value is set to 0, ioctl handler uses a default value | ||
73 | defined by MAX_SCHEDULE_TIMEOUT. | ||
74 | - RIO_CM_CHAN_CLOSE : Closes a specified channel and frees associated buffers. | ||
75 | If the specified channel is in the CONNECTED state, sends close notification | ||
76 | to the remote peer. | ||
77 | |||
78 | The ioctl command codes and corresponding data structures intended for use by | ||
79 | user-space applications are defined in 'include/uapi/linux/rio_cm_cdev.h'. | ||
80 | |||
81 | II. Hardware Compatibility | ||
82 | |||
83 | This device driver uses standard interfaces defined by kernel RapidIO subsystem | ||
84 | and therefore it can be used with any mport device driver registered by RapidIO | ||
85 | subsystem with limitations set by available mport HW implementation of messaging | ||
86 | mailboxes. | ||
87 | |||
88 | III. Module parameters | ||
89 | |||
90 | - 'dbg_level' - This parameter allows to control amount of debug information | ||
91 | generated by this device driver. This parameter is formed by set of | ||
92 | bit masks that correspond to the specific functional block. | ||
93 | For mask definitions see 'drivers/rapidio/devices/rio_cm.c' | ||
94 | This parameter can be changed dynamically. | ||
95 | Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level. | ||
96 | |||
97 | - 'cmbox' - Number of RapidIO mailbox to use (default value is 1). | ||
98 | This parameter allows to set messaging mailbox number that will be used | ||
99 | within entire RapidIO network. It can be used when default mailbox is | ||
100 | used by other device drivers or is not supported by some nodes in the | ||
101 | RapidIO network. | ||
102 | |||
103 | - 'chstart' - Start channel number for dynamic assignment. Default value - 256. | ||
104 | Allows to exclude channel numbers below this parameter from dynamic | ||
105 | allocation to avoid conflicts with software components that use | ||
106 | reserved predefined channel numbers. | ||
107 | |||
108 | IV. Known problems | ||
109 | |||
110 | None. | ||
111 | |||
112 | V. User-space Applications and API Library | ||
113 | |||
114 | Messaging API library and applications that use this device driver are available | ||
115 | from RapidIO.org. | ||
116 | |||
117 | VI. TODO List | ||
118 | |||
119 | - Add support for system notification messages (reserved channel 0). | ||
diff --git a/Documentation/rapidio/tsi721.txt b/Documentation/rapidio/tsi721.txt index 7c1c7bf48ec0..cd2a2935d51d 100644 --- a/Documentation/rapidio/tsi721.txt +++ b/Documentation/rapidio/tsi721.txt | |||
@@ -25,6 +25,32 @@ fully compatible with RIONET driver (Ethernet over RapidIO messaging services). | |||
25 | This parameter can be changed dynamically. | 25 | This parameter can be changed dynamically. |
26 | Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level. | 26 | Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level. |
27 | 27 | ||
28 | - 'dma_desc_per_channel' - This parameter defines number of hardware buffer | ||
29 | descriptors allocated for each registered Tsi721 DMA channel. | ||
30 | Its default value is 128. | ||
31 | |||
32 | - 'dma_txqueue_sz' - DMA transactions queue size. Defines number of pending | ||
33 | transaction requests that can be accepted by each DMA channel. | ||
34 | Default value is 16. | ||
35 | |||
36 | - 'dma_sel' - DMA channel selection mask. Bitmask that defines which hardware | ||
37 | DMA channels (0 ... 6) will be registered with DmaEngine core. | ||
38 | If bit is set to 1, the corresponding DMA channel will be registered. | ||
39 | DMA channels not selected by this mask will not be used by this device | ||
40 | driver. Default value is 0x7f (use all channels). | ||
41 | |||
42 | - 'pcie_mrrs' - override value for PCIe Maximum Read Request Size (MRRS). | ||
43 | This parameter gives an ability to override MRRS value set during PCIe | ||
44 | configuration process. Tsi721 supports read request sizes up to 4096B. | ||
45 | Value for this parameter must be set as defined by PCIe specification: | ||
46 | 0 = 128B, 1 = 256B, 2 = 512B, 3 = 1024B, 4 = 2048B and 5 = 4096B. | ||
47 | Default value is '-1' (= keep platform setting). | ||
48 | |||
49 | - 'mbox_sel' - RIO messaging MBOX selection mask. This is a bitmask that defines | ||
50 | messaging MBOXes are managed by this device driver. Mask bits 0 - 3 | ||
51 | correspond to MBOX0 - MBOX3. MBOX is under driver's control if the | ||
52 | corresponding bit is set to '1'. Default value is 0x0f (= all). | ||
53 | |||
28 | II. Known problems | 54 | II. Known problems |
29 | 55 | ||
30 | None. | 56 | None. |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 33204604de6c..ffab8b5caa60 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -764,6 +764,20 @@ send before ratelimiting kicks in. | |||
764 | 764 | ||
765 | ============================================================== | 765 | ============================================================== |
766 | 766 | ||
767 | printk_devkmsg: | ||
768 | |||
769 | Control the logging to /dev/kmsg from userspace: | ||
770 | |||
771 | ratelimit: default, ratelimited | ||
772 | on: unlimited logging to /dev/kmsg from userspace | ||
773 | off: logging to /dev/kmsg disabled | ||
774 | |||
775 | The kernel command line parameter printk.devkmsg= overrides this and is | ||
776 | a one-time setting until next reboot: once set, it cannot be changed by | ||
777 | this sysctl interface anymore. | ||
778 | |||
779 | ============================================================== | ||
780 | |||
767 | randomize_va_space: | 781 | randomize_va_space: |
768 | 782 | ||
769 | This option can be used to select the type of process address | 783 | This option can be used to select the type of process address |
diff --git a/MAINTAINERS b/MAINTAINERS index 10074ff03c57..429fc61bee81 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -778,6 +778,11 @@ W: http://ez.analog.com/community/linux-device-drivers | |||
778 | S: Supported | 778 | S: Supported |
779 | F: drivers/dma/dma-axi-dmac.c | 779 | F: drivers/dma/dma-axi-dmac.c |
780 | 780 | ||
781 | ANDROID CONFIG FRAGMENTS | ||
782 | M: Rob Herring <robh@kernel.org> | ||
783 | S: Supported | ||
784 | F: kernel/configs/android* | ||
785 | |||
781 | ANDROID DRIVERS | 786 | ANDROID DRIVERS |
782 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 787 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
783 | M: Arve Hjønnevåg <arve@android.com> | 788 | M: Arve Hjønnevåg <arve@android.com> |
@@ -2346,7 +2351,10 @@ S: Supported | |||
2346 | F: drivers/media/platform/sti/bdisp | 2351 | F: drivers/media/platform/sti/bdisp |
2347 | 2352 | ||
2348 | BEFS FILE SYSTEM | 2353 | BEFS FILE SYSTEM |
2349 | S: Orphan | 2354 | M: Luis de Bethencourt <luisbg@osg.samsung.com> |
2355 | M: Salah Triki <salah.triki@gmail.com> | ||
2356 | S: Maintained | ||
2357 | T: git git://github.com/luisbg/linux-befs.git | ||
2350 | F: Documentation/filesystems/befs.txt | 2358 | F: Documentation/filesystems/befs.txt |
2351 | F: fs/befs/ | 2359 | F: fs/befs/ |
2352 | 2360 | ||
@@ -8264,8 +8272,9 @@ T: git git://github.com/konis/nilfs2.git | |||
8264 | S: Supported | 8272 | S: Supported |
8265 | F: Documentation/filesystems/nilfs2.txt | 8273 | F: Documentation/filesystems/nilfs2.txt |
8266 | F: fs/nilfs2/ | 8274 | F: fs/nilfs2/ |
8267 | F: include/linux/nilfs2_fs.h | ||
8268 | F: include/trace/events/nilfs2.h | 8275 | F: include/trace/events/nilfs2.h |
8276 | F: include/uapi/linux/nilfs2_api.h | ||
8277 | F: include/uapi/linux/nilfs2_ondisk.h | ||
8269 | 8278 | ||
8270 | NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER | 8279 | NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER |
8271 | M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> | 8280 | M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> |
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 32e920a83ae5..e9e90bfa2b50 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -86,33 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
86 | #define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */ | 86 | #define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */ |
87 | #define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ | 87 | #define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ |
88 | #define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */ | 88 | #define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */ |
89 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | ||
90 | |||
91 | #ifndef __ASSEMBLY__ | ||
92 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
93 | static inline void set_restore_sigmask(void) | ||
94 | { | ||
95 | struct thread_info *ti = current_thread_info(); | ||
96 | ti->status |= TS_RESTORE_SIGMASK; | ||
97 | WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); | ||
98 | } | ||
99 | static inline void clear_restore_sigmask(void) | ||
100 | { | ||
101 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
102 | } | ||
103 | static inline bool test_restore_sigmask(void) | ||
104 | { | ||
105 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
106 | } | ||
107 | static inline bool test_and_clear_restore_sigmask(void) | ||
108 | { | ||
109 | struct thread_info *ti = current_thread_info(); | ||
110 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
111 | return false; | ||
112 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
113 | return true; | ||
114 | } | ||
115 | #endif | ||
116 | 89 | ||
117 | #define SET_UNALIGN_CTL(task,value) ({ \ | 90 | #define SET_UNALIGN_CTL(task,value) ({ \ |
118 | __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ | 91 | __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ |
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index f54bdf658cd0..d3398f6ab74c 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h | |||
@@ -137,7 +137,7 @@ | |||
137 | #define __initmv __initdata | 137 | #define __initmv __initdata |
138 | #define ALIAS_MV(x) | 138 | #define ALIAS_MV(x) |
139 | #else | 139 | #else |
140 | #define __initmv __initdata_refok | 140 | #define __initmv __refdata |
141 | 141 | ||
142 | /* GCC actually has a syntax for defining aliases, but is under some | 142 | /* GCC actually has a syntax for defining aliases, but is under some |
143 | delusion that you shouldn't be able to declare it extern somewhere | 143 | delusion that you shouldn't be able to declare it extern somewhere |
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 8be930394750..399e2f223d25 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c | |||
@@ -220,7 +220,7 @@ void __init mem_init(void) | |||
220 | /* | 220 | /* |
221 | * free_initmem: Free all the __init memory. | 221 | * free_initmem: Free all the __init memory. |
222 | */ | 222 | */ |
223 | void __init_refok free_initmem(void) | 223 | void __ref free_initmem(void) |
224 | { | 224 | { |
225 | free_initmem_default(-1); | 225 | free_initmem_default(-1); |
226 | } | 226 | } |
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi index e23f46d15c80..00cb314d5e4d 100644 --- a/arch/arm/boot/dts/keystone.dtsi +++ b/arch/arm/boot/dts/keystone.dtsi | |||
@@ -70,6 +70,14 @@ | |||
70 | cpu_on = <0x84000003>; | 70 | cpu_on = <0x84000003>; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | psci { | ||
74 | compatible = "arm,psci"; | ||
75 | method = "smc"; | ||
76 | cpu_suspend = <0x84000001>; | ||
77 | cpu_off = <0x84000002>; | ||
78 | cpu_on = <0x84000003>; | ||
79 | }; | ||
80 | |||
73 | soc { | 81 | soc { |
74 | #address-cells = <1>; | 82 | #address-cells = <1>; |
75 | #size-cells = <1>; | 83 | #size-cells = <1>; |
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index c2b9b4bdec00..1869af6bac5c 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h | |||
@@ -53,6 +53,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
53 | /* Function pointer to optional machine-specific reinitialization */ | 53 | /* Function pointer to optional machine-specific reinitialization */ |
54 | extern void (*kexec_reinit)(void); | 54 | extern void (*kexec_reinit)(void); |
55 | 55 | ||
56 | static inline unsigned long phys_to_boot_phys(phys_addr_t phys) | ||
57 | { | ||
58 | return phys_to_idmap(phys); | ||
59 | } | ||
60 | #define phys_to_boot_phys phys_to_boot_phys | ||
61 | |||
62 | static inline phys_addr_t boot_phys_to_phys(unsigned long entry) | ||
63 | { | ||
64 | return idmap_to_phys(entry); | ||
65 | } | ||
66 | #define boot_phys_to_phys boot_phys_to_phys | ||
67 | |||
68 | static inline unsigned long page_to_boot_pfn(struct page *page) | ||
69 | { | ||
70 | return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT); | ||
71 | } | ||
72 | #define page_to_boot_pfn page_to_boot_pfn | ||
73 | |||
74 | static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) | ||
75 | { | ||
76 | return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT)); | ||
77 | } | ||
78 | #define boot_pfn_to_page boot_pfn_to_page | ||
79 | |||
56 | #endif /* __ASSEMBLY__ */ | 80 | #endif /* __ASSEMBLY__ */ |
57 | 81 | ||
58 | #endif /* CONFIG_KEXEC */ | 82 | #endif /* CONFIG_KEXEC */ |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 59fd0e24c56b..b18c1ea56bed 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -57,7 +57,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
57 | for (i = 0; i < image->nr_segments; i++) { | 57 | for (i = 0; i < image->nr_segments; i++) { |
58 | current_segment = &image->segment[i]; | 58 | current_segment = &image->segment[i]; |
59 | 59 | ||
60 | if (!memblock_is_region_memory(current_segment->mem, | 60 | if (!memblock_is_region_memory(idmap_to_phys(current_segment->mem), |
61 | current_segment->memsz)) | 61 | current_segment->memsz)) |
62 | return -EINVAL; | 62 | return -EINVAL; |
63 | 63 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index da2f6c360f6b..df7f2a75e769 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -848,10 +848,29 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) | |||
848 | kernel_data.end = virt_to_phys(_end - 1); | 848 | kernel_data.end = virt_to_phys(_end - 1); |
849 | 849 | ||
850 | for_each_memblock(memory, region) { | 850 | for_each_memblock(memory, region) { |
851 | phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); | ||
852 | phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; | ||
853 | unsigned long boot_alias_start; | ||
854 | |||
855 | /* | ||
856 | * Some systems have a special memory alias which is only | ||
857 | * used for booting. We need to advertise this region to | ||
858 | * kexec-tools so they know where bootable RAM is located. | ||
859 | */ | ||
860 | boot_alias_start = phys_to_idmap(start); | ||
861 | if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { | ||
862 | res = memblock_virt_alloc(sizeof(*res), 0); | ||
863 | res->name = "System RAM (boot alias)"; | ||
864 | res->start = boot_alias_start; | ||
865 | res->end = phys_to_idmap(end); | ||
866 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
867 | request_resource(&iomem_resource, res); | ||
868 | } | ||
869 | |||
851 | res = memblock_virt_alloc(sizeof(*res), 0); | 870 | res = memblock_virt_alloc(sizeof(*res), 0); |
852 | res->name = "System RAM"; | 871 | res->name = "System RAM"; |
853 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); | 872 | res->start = start; |
854 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; | 873 | res->end = end; |
855 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | 874 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
856 | 875 | ||
857 | request_resource(&iomem_resource, res); | 876 | request_resource(&iomem_resource, res); |
@@ -1000,9 +1019,25 @@ static void __init reserve_crashkernel(void) | |||
1000 | (unsigned long)(crash_base >> 20), | 1019 | (unsigned long)(crash_base >> 20), |
1001 | (unsigned long)(total_mem >> 20)); | 1020 | (unsigned long)(total_mem >> 20)); |
1002 | 1021 | ||
1022 | /* The crashk resource must always be located in normal mem */ | ||
1003 | crashk_res.start = crash_base; | 1023 | crashk_res.start = crash_base; |
1004 | crashk_res.end = crash_base + crash_size - 1; | 1024 | crashk_res.end = crash_base + crash_size - 1; |
1005 | insert_resource(&iomem_resource, &crashk_res); | 1025 | insert_resource(&iomem_resource, &crashk_res); |
1026 | |||
1027 | if (arm_has_idmap_alias()) { | ||
1028 | /* | ||
1029 | * If we have a special RAM alias for use at boot, we | ||
1030 | * need to advertise to kexec tools where the alias is. | ||
1031 | */ | ||
1032 | static struct resource crashk_boot_res = { | ||
1033 | .name = "Crash kernel (boot alias)", | ||
1034 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | ||
1035 | }; | ||
1036 | |||
1037 | crashk_boot_res.start = phys_to_idmap(crash_base); | ||
1038 | crashk_boot_res.end = crashk_boot_res.start + crash_size - 1; | ||
1039 | insert_resource(&iomem_resource, &crashk_boot_res); | ||
1040 | } | ||
1006 | } | 1041 | } |
1007 | #else | 1042 | #else |
1008 | static inline void reserve_crashkernel(void) {} | 1043 | static inline void reserve_crashkernel(void) {} |
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index 38b0da300dd5..ed9a01484030 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c | |||
@@ -320,11 +320,11 @@ static struct impd1_device impd1_devs[] = { | |||
320 | #define IMPD1_VALID_IRQS 0x00000bffU | 320 | #define IMPD1_VALID_IRQS 0x00000bffU |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * As this module is bool, it is OK to have this as __init_refok() - no | 323 | * As this module is bool, it is OK to have this as __ref() - no |
324 | * probe calls will be done after the initial system bootup, as devices | 324 | * probe calls will be done after the initial system bootup, as devices |
325 | * are discovered as part of the machine startup. | 325 | * are discovered as part of the machine startup. |
326 | */ | 326 | */ |
327 | static int __init_refok impd1_probe(struct lm_device *dev) | 327 | static int __ref impd1_probe(struct lm_device *dev) |
328 | { | 328 | { |
329 | struct impd1_module *impd1; | 329 | struct impd1_module *impd1; |
330 | int irq_base; | 330 | int irq_base; |
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index 45a05207b418..6af5430d0d97 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c | |||
@@ -343,7 +343,7 @@ void __init mv78xx0_init_early(void) | |||
343 | DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ); | 343 | DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ); |
344 | } | 344 | } |
345 | 345 | ||
346 | void __init_refok mv78xx0_timer_init(void) | 346 | void __ref mv78xx0_timer_init(void) |
347 | { | 347 | { |
348 | orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, | 348 | orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, |
349 | IRQ_MV78XX0_TIMER_1, get_tclk()); | 349 | IRQ_MV78XX0_TIMER_1, get_tclk()); |
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index 166842de3dc7..b59cd7c3261a 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c | |||
@@ -112,7 +112,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
112 | } | 112 | } |
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | void __init_refok free_initmem(void) | 115 | void __ref free_initmem(void) |
116 | { | 116 | { |
117 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU | 117 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU |
118 | free_initmem_default(-1); | 118 | free_initmem_default(-1); |
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 88977e42af0a..192584d5ac2f 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c | |||
@@ -93,7 +93,7 @@ void __init mem_init(void) | |||
93 | * Todo: free pages between __init_begin and __init_end; possibly | 93 | * Todo: free pages between __init_begin and __init_end; possibly |
94 | * some devtree related stuff as well. | 94 | * some devtree related stuff as well. |
95 | */ | 95 | */ |
96 | void __init_refok free_initmem(void) | 96 | void __ref free_initmem(void) |
97 | { | 97 | { |
98 | } | 98 | } |
99 | 99 | ||
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index d1212b84fb83..29bd59790d6c 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -121,32 +121,4 @@ struct thread_info { | |||
121 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ | 121 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ |
122 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) | 122 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) |
123 | 123 | ||
124 | #define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ | ||
125 | |||
126 | #ifndef __ASSEMBLY__ | ||
127 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
128 | static inline void set_restore_sigmask(void) | ||
129 | { | ||
130 | struct thread_info *ti = current_thread_info(); | ||
131 | ti->status |= TS_RESTORE_SIGMASK; | ||
132 | WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); | ||
133 | } | ||
134 | static inline void clear_restore_sigmask(void) | ||
135 | { | ||
136 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
137 | } | ||
138 | static inline bool test_restore_sigmask(void) | ||
139 | { | ||
140 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
141 | } | ||
142 | static inline bool test_and_clear_restore_sigmask(void) | ||
143 | { | ||
144 | struct thread_info *ti = current_thread_info(); | ||
145 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
146 | return false; | ||
147 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
148 | return true; | ||
149 | } | ||
150 | #endif /* !__ASSEMBLY__ */ | ||
151 | |||
152 | #endif /* _ASM_IA64_THREAD_INFO_H */ | 124 | #endif /* _ASM_IA64_THREAD_INFO_H */ |
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index b72cd7a07222..599507bcec91 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c | |||
@@ -163,7 +163,7 @@ void arch_crash_save_vmcoreinfo(void) | |||
163 | #endif | 163 | #endif |
164 | } | 164 | } |
165 | 165 | ||
166 | unsigned long paddr_vmcoreinfo_note(void) | 166 | phys_addr_t paddr_vmcoreinfo_note(void) |
167 | { | 167 | { |
168 | return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); | 168 | return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); |
169 | } | 169 | } |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 07a4e32ae96a..eb9220cde76c 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1831,7 +1831,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, | |||
1831 | } | 1831 | } |
1832 | 1832 | ||
1833 | /* Caller prevents this from being called after init */ | 1833 | /* Caller prevents this from being called after init */ |
1834 | static void * __init_refok mca_bootmem(void) | 1834 | static void * __ref mca_bootmem(void) |
1835 | { | 1835 | { |
1836 | return __alloc_bootmem(sizeof(struct ia64_mca_cpu), | 1836 | return __alloc_bootmem(sizeof(struct ia64_mca_cpu), |
1837 | KERNEL_STACK_SIZE, 0); | 1837 | KERNEL_STACK_SIZE, 0); |
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 383f387b4eee..e7e8954e9815 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h | |||
@@ -148,33 +148,6 @@ static inline struct thread_info *current_thread_info(void) | |||
148 | */ | 148 | */ |
149 | /* FPU was used by this task this quantum (SMP) */ | 149 | /* FPU was used by this task this quantum (SMP) */ |
150 | #define TS_USEDFPU 0x0001 | 150 | #define TS_USEDFPU 0x0001 |
151 | #define TS_RESTORE_SIGMASK 0x0002 | ||
152 | |||
153 | #ifndef __ASSEMBLY__ | ||
154 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
155 | static inline void set_restore_sigmask(void) | ||
156 | { | ||
157 | struct thread_info *ti = current_thread_info(); | ||
158 | ti->status |= TS_RESTORE_SIGMASK; | ||
159 | WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); | ||
160 | } | ||
161 | static inline void clear_restore_sigmask(void) | ||
162 | { | ||
163 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
164 | } | ||
165 | static inline bool test_restore_sigmask(void) | ||
166 | { | ||
167 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
168 | } | ||
169 | static inline bool test_and_clear_restore_sigmask(void) | ||
170 | { | ||
171 | struct thread_info *ti = current_thread_info(); | ||
172 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
173 | return false; | ||
174 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
175 | return true; | ||
176 | } | ||
177 | #endif | ||
178 | 151 | ||
179 | #endif /* __KERNEL__ */ | 152 | #endif /* __KERNEL__ */ |
180 | #endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ | 153 | #endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 77bc7c7e6522..434639f9a3a6 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -414,7 +414,7 @@ void __init *early_get_page(void) | |||
414 | 414 | ||
415 | #endif /* CONFIG_MMU */ | 415 | #endif /* CONFIG_MMU */ |
416 | 416 | ||
417 | void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) | 417 | void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask) |
418 | { | 418 | { |
419 | if (mem_init_done) | 419 | if (mem_init_done) |
420 | return kmalloc(size, mask); | 420 | return kmalloc(size, mask); |
@@ -422,7 +422,7 @@ void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) | |||
422 | return alloc_bootmem(size); | 422 | return alloc_bootmem(size); |
423 | } | 423 | } |
424 | 424 | ||
425 | void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) | 425 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) |
426 | { | 426 | { |
427 | void *p; | 427 | void *p; |
428 | 428 | ||
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index eb99fcc76088..cc732fe357ad 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c | |||
@@ -234,7 +234,7 @@ unsigned long iopa(unsigned long addr) | |||
234 | return pa; | 234 | return pa; |
235 | } | 235 | } |
236 | 236 | ||
237 | __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 237 | __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
238 | unsigned long address) | 238 | unsigned long address) |
239 | { | 239 | { |
240 | pte_t *pte; | 240 | pte_t *pte; |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 9b58eb5fd0d5..a5509e7dcad2 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -504,7 +504,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
504 | 504 | ||
505 | void (*free_init_pages_eva)(void *begin, void *end) = NULL; | 505 | void (*free_init_pages_eva)(void *begin, void *end) = NULL; |
506 | 506 | ||
507 | void __init_refok free_initmem(void) | 507 | void __ref free_initmem(void) |
508 | { | 508 | { |
509 | prom_free_prom_memory(); | 509 | prom_free_prom_memory(); |
510 | /* | 510 | /* |
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c index a77698ff2b6f..1f6bc9a3036c 100644 --- a/arch/mips/txx9/generic/pci.c +++ b/arch/mips/txx9/generic/pci.c | |||
@@ -268,7 +268,7 @@ static int txx9_i8259_irq_setup(int irq) | |||
268 | return err; | 268 | return err; |
269 | } | 269 | } |
270 | 270 | ||
271 | static void __init_refok quirk_slc90e66_bridge(struct pci_dev *dev) | 271 | static void __ref quirk_slc90e66_bridge(struct pci_dev *dev) |
272 | { | 272 | { |
273 | int irq; /* PCI/ISA Bridge interrupt */ | 273 | int irq; /* PCI/ISA Bridge interrupt */ |
274 | u8 reg_64; | 274 | u8 reg_64; |
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index e75c75d249d6..c92fe4234009 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c | |||
@@ -89,7 +89,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
89 | } | 89 | } |
90 | #endif | 90 | #endif |
91 | 91 | ||
92 | void __init_refok free_initmem(void) | 92 | void __ref free_initmem(void) |
93 | { | 93 | { |
94 | free_initmem_default(-1); | 94 | free_initmem_default(-1); |
95 | } | 95 | } |
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index 5b2a95116e8f..fa60b81aee3e 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c | |||
@@ -38,7 +38,7 @@ static unsigned int fixmaps_used __initdata; | |||
38 | * have to convert them into an offset in a page-aligned mapping, but the | 38 | * have to convert them into an offset in a page-aligned mapping, but the |
39 | * caller shouldn't need to know that small detail. | 39 | * caller shouldn't need to know that small detail. |
40 | */ | 40 | */ |
41 | void __iomem *__init_refok | 41 | void __iomem *__ref |
42 | __ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot) | 42 | __ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot) |
43 | { | 43 | { |
44 | phys_addr_t p; | 44 | phys_addr_t p; |
@@ -116,7 +116,7 @@ void iounmap(void *addr) | |||
116 | * the memblock infrastructure. | 116 | * the memblock infrastructure. |
117 | */ | 117 | */ |
118 | 118 | ||
119 | pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm, | 119 | pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm, |
120 | unsigned long address) | 120 | unsigned long address) |
121 | { | 121 | { |
122 | pte_t *pte; | 122 | pte_t *pte; |
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 2563c435a4b1..fc420cedecae 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h | |||
@@ -31,13 +31,13 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) | |||
31 | } | 31 | } |
32 | #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) | 32 | #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) |
33 | 33 | ||
34 | static inline int arch_validate_prot(unsigned long prot) | 34 | static inline bool arch_validate_prot(unsigned long prot) |
35 | { | 35 | { |
36 | if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) | 36 | if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) |
37 | return 0; | 37 | return false; |
38 | if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) | 38 | if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) |
39 | return 0; | 39 | return false; |
40 | return 1; | 40 | return true; |
41 | } | 41 | } |
42 | #define arch_validate_prot(prot) arch_validate_prot(prot) | 42 | #define arch_validate_prot(prot) arch_validate_prot(prot) |
43 | 43 | ||
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index b21bb1f72314..87e4b2d8dcd4 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -138,40 +138,15 @@ static inline struct thread_info *current_thread_info(void) | |||
138 | /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ | 138 | /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ |
139 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ | 139 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ |
140 | #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ | 140 | #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ |
141 | #define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ | ||
142 | #define TLF_LAZY_MMU 3 /* tlb_batch is active */ | 141 | #define TLF_LAZY_MMU 3 /* tlb_batch is active */ |
143 | #define TLF_RUNLATCH 4 /* Is the runlatch enabled? */ | 142 | #define TLF_RUNLATCH 4 /* Is the runlatch enabled? */ |
144 | 143 | ||
145 | #define _TLF_NAPPING (1 << TLF_NAPPING) | 144 | #define _TLF_NAPPING (1 << TLF_NAPPING) |
146 | #define _TLF_SLEEPING (1 << TLF_SLEEPING) | 145 | #define _TLF_SLEEPING (1 << TLF_SLEEPING) |
147 | #define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) | ||
148 | #define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) | 146 | #define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) |
149 | #define _TLF_RUNLATCH (1 << TLF_RUNLATCH) | 147 | #define _TLF_RUNLATCH (1 << TLF_RUNLATCH) |
150 | 148 | ||
151 | #ifndef __ASSEMBLY__ | 149 | #ifndef __ASSEMBLY__ |
152 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
153 | static inline void set_restore_sigmask(void) | ||
154 | { | ||
155 | struct thread_info *ti = current_thread_info(); | ||
156 | ti->local_flags |= _TLF_RESTORE_SIGMASK; | ||
157 | WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); | ||
158 | } | ||
159 | static inline void clear_restore_sigmask(void) | ||
160 | { | ||
161 | current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; | ||
162 | } | ||
163 | static inline bool test_restore_sigmask(void) | ||
164 | { | ||
165 | return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK; | ||
166 | } | ||
167 | static inline bool test_and_clear_restore_sigmask(void) | ||
168 | { | ||
169 | struct thread_info *ti = current_thread_info(); | ||
170 | if (!(ti->local_flags & _TLF_RESTORE_SIGMASK)) | ||
171 | return false; | ||
172 | ti->local_flags &= ~_TLF_RESTORE_SIGMASK; | ||
173 | return true; | ||
174 | } | ||
175 | 150 | ||
176 | static inline bool test_thread_local_flags(unsigned int flags) | 151 | static inline bool test_thread_local_flags(unsigned int flags) |
177 | { | 152 | { |
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 60b0b3fc8fc1..a58abe4afbd1 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <asm/setup.h> | 6 | #include <asm/setup.h> |
7 | 7 | ||
8 | 8 | ||
9 | void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) | 9 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) |
10 | { | 10 | { |
11 | void *p; | 11 | void *p; |
12 | 12 | ||
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 7f922f557936..0ae0572bc239 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -79,7 +79,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
79 | #endif | 79 | #endif |
80 | } | 80 | } |
81 | 81 | ||
82 | __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 82 | __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
83 | { | 83 | { |
84 | pte_t *pte; | 84 | pte_t *pte; |
85 | 85 | ||
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 3de4a7c85140..6b4e9d181126 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -353,12 +353,12 @@ static int pmac_late_init(void) | |||
353 | machine_late_initcall(powermac, pmac_late_init); | 353 | machine_late_initcall(powermac, pmac_late_init); |
354 | 354 | ||
355 | /* | 355 | /* |
356 | * This is __init_refok because we check for "initializing" before | 356 | * This is __ref because we check for "initializing" before |
357 | * touching any of the __init sensitive things and "initializing" | 357 | * touching any of the __init sensitive things and "initializing" |
358 | * will be false after __init time. This can't be __init because it | 358 | * will be false after __init time. This can't be __init because it |
359 | * can be called whenever a disk is first accessed. | 359 | * can be called whenever a disk is first accessed. |
360 | */ | 360 | */ |
361 | void __init_refok note_bootable_part(dev_t dev, int part, int goodness) | 361 | void __ref note_bootable_part(dev_t dev, int part, int goodness) |
362 | { | 362 | { |
363 | char *p; | 363 | char *p; |
364 | 364 | ||
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index 3f175e8aedb4..57caaf11a83f 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c | |||
@@ -189,7 +189,7 @@ fail_malloc: | |||
189 | return result; | 189 | return result; |
190 | } | 190 | } |
191 | 191 | ||
192 | static int __init_refok ps3_setup_uhc_device( | 192 | static int __ref ps3_setup_uhc_device( |
193 | const struct ps3_repository_device *repo, enum ps3_match_id match_id, | 193 | const struct ps3_repository_device *repo, enum ps3_match_id match_id, |
194 | enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type) | 194 | enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type) |
195 | { | 195 | { |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index f5bf38b94595..984e816f3faf 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -289,7 +289,7 @@ static void fsl_rio_inbound_mem_init(struct rio_priv *priv) | |||
289 | } | 289 | } |
290 | 290 | ||
291 | int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | 291 | int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, |
292 | u64 rstart, u32 size, u32 flags) | 292 | u64 rstart, u64 size, u32 flags) |
293 | { | 293 | { |
294 | struct rio_priv *priv = mport->priv; | 294 | struct rio_priv *priv = mport->priv; |
295 | u32 base_size; | 295 | u32 base_size; |
@@ -298,7 +298,7 @@ int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | |||
298 | u32 riwar; | 298 | u32 riwar; |
299 | int i; | 299 | int i; |
300 | 300 | ||
301 | if ((size & (size - 1)) != 0) | 301 | if ((size & (size - 1)) != 0 || size > 0x400000000ULL) |
302 | return -EINVAL; | 302 | return -EINVAL; |
303 | 303 | ||
304 | base_size_log = ilog2(size); | 304 | base_size_log = ilog2(size); |
@@ -643,19 +643,11 @@ int fsl_rio_setup(struct platform_device *dev) | |||
643 | port->ops = ops; | 643 | port->ops = ops; |
644 | port->priv = priv; | 644 | port->priv = priv; |
645 | port->phys_efptr = 0x100; | 645 | port->phys_efptr = 0x100; |
646 | port->phys_rmap = 1; | ||
646 | priv->regs_win = rio_regs_win; | 647 | priv->regs_win = rio_regs_win; |
647 | 648 | ||
648 | /* Probe the master port phy type */ | ||
649 | ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20); | 649 | ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20); |
650 | port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; | 650 | |
651 | if (port->phy_type == RIO_PHY_PARALLEL) { | ||
652 | dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n"); | ||
653 | release_resource(&port->iores); | ||
654 | kfree(priv); | ||
655 | kfree(port); | ||
656 | continue; | ||
657 | } | ||
658 | dev_info(&dev->dev, "RapidIO PHY type: Serial\n"); | ||
659 | /* Checking the port training status */ | 651 | /* Checking the port training status */ |
660 | if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) { | 652 | if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) { |
661 | dev_err(&dev->dev, "Port %d is not ready. " | 653 | dev_err(&dev->dev, "Port %d is not ready. " |
@@ -705,11 +697,9 @@ int fsl_rio_setup(struct platform_device *dev) | |||
705 | ((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET : | 697 | ((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET : |
706 | RIO_INB_ATMU_REGS_PORT2_OFFSET)); | 698 | RIO_INB_ATMU_REGS_PORT2_OFFSET)); |
707 | 699 | ||
708 | 700 | /* Set to receive packets with any dest ID */ | |
709 | /* Set to receive any dist ID for serial RapidIO controller. */ | 701 | out_be32((priv->regs_win + RIO_ISR_AACR + i*0x80), |
710 | if (port->phy_type == RIO_PHY_SERIAL) | 702 | RIO_ISR_AACR_AA); |
711 | out_be32((priv->regs_win | ||
712 | + RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA); | ||
713 | 703 | ||
714 | /* Configure maintenance transaction window */ | 704 | /* Configure maintenance transaction window */ |
715 | out_be32(&priv->maint_atmu_regs->rowbar, | 705 | out_be32(&priv->maint_atmu_regs->rowbar, |
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index ed5234ed8d3f..5ebd3f018295 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c | |||
@@ -112,7 +112,7 @@ int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp) | |||
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | 114 | ||
115 | int __init_refok msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, | 115 | int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, |
116 | struct device_node *of_node) | 116 | struct device_node *of_node) |
117 | { | 117 | { |
118 | int size; | 118 | int size; |
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 9fbce49ad3bd..444c26c0f750 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c | |||
@@ -91,7 +91,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
91 | } | 91 | } |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | void __init_refok free_initmem(void) | 94 | void __ref free_initmem(void) |
95 | { | 95 | { |
96 | free_initmem_default(POISON_FREE_INITMEM); | 96 | free_initmem_default(POISON_FREE_INITMEM); |
97 | } | 97 | } |
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index d5462b7bc514..84563e39a5b8 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c | |||
@@ -221,7 +221,7 @@ pcibios_bus_report_status_early(struct pci_channel *hose, | |||
221 | * We can't use pci_find_device() here since we are | 221 | * We can't use pci_find_device() here since we are |
222 | * called from interrupt context. | 222 | * called from interrupt context. |
223 | */ | 223 | */ |
224 | static void __init_refok | 224 | static void __ref |
225 | pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask, | 225 | pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask, |
226 | int warn) | 226 | int warn) |
227 | { | 227 | { |
@@ -256,7 +256,7 @@ pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask, | |||
256 | pcibios_bus_report_status(dev->subordinate, status_mask, warn); | 256 | pcibios_bus_report_status(dev->subordinate, status_mask, warn); |
257 | } | 257 | } |
258 | 258 | ||
259 | void __init_refok pcibios_report_status(unsigned int status_mask, int warn) | 259 | void __ref pcibios_report_status(unsigned int status_mask, int warn) |
260 | { | 260 | { |
261 | struct pci_channel *hose; | 261 | struct pci_channel *hose; |
262 | 262 | ||
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 2afa321157be..6c65dcd470ab 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
@@ -151,19 +151,10 @@ extern void init_thread_xstate(void); | |||
151 | * ever touches our thread-synchronous status, so we don't | 151 | * ever touches our thread-synchronous status, so we don't |
152 | * have to worry about atomic accesses. | 152 | * have to worry about atomic accesses. |
153 | */ | 153 | */ |
154 | #define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ | ||
155 | #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ | 154 | #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ |
156 | 155 | ||
157 | #ifndef __ASSEMBLY__ | 156 | #ifndef __ASSEMBLY__ |
158 | 157 | ||
159 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
160 | static inline void set_restore_sigmask(void) | ||
161 | { | ||
162 | struct thread_info *ti = current_thread_info(); | ||
163 | ti->status |= TS_RESTORE_SIGMASK; | ||
164 | WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); | ||
165 | } | ||
166 | |||
167 | #define TI_FLAG_FAULT_CODE_SHIFT 24 | 158 | #define TI_FLAG_FAULT_CODE_SHIFT 24 |
168 | 159 | ||
169 | /* | 160 | /* |
@@ -182,23 +173,6 @@ static inline unsigned int get_thread_fault_code(void) | |||
182 | return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; | 173 | return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; |
183 | } | 174 | } |
184 | 175 | ||
185 | static inline void clear_restore_sigmask(void) | ||
186 | { | ||
187 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
188 | } | ||
189 | static inline bool test_restore_sigmask(void) | ||
190 | { | ||
191 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
192 | } | ||
193 | static inline bool test_and_clear_restore_sigmask(void) | ||
194 | { | ||
195 | struct thread_info *ti = current_thread_info(); | ||
196 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
197 | return false; | ||
198 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
199 | return true; | ||
200 | } | ||
201 | |||
202 | #endif /* !__ASSEMBLY__ */ | 176 | #endif /* !__ASSEMBLY__ */ |
203 | 177 | ||
204 | #endif /* __KERNEL__ */ | 178 | #endif /* __KERNEL__ */ |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 0c99ec2e7ed8..d09ddfe58fd8 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * have to convert them into an offset in a page-aligned mapping, but the | 34 | * have to convert them into an offset in a page-aligned mapping, but the |
35 | * caller shouldn't need to know that small detail. | 35 | * caller shouldn't need to know that small detail. |
36 | */ | 36 | */ |
37 | void __iomem * __init_refok | 37 | void __iomem * __ref |
38 | __ioremap_caller(phys_addr_t phys_addr, unsigned long size, | 38 | __ioremap_caller(phys_addr_t phys_addr, unsigned long size, |
39 | pgprot_t pgprot, void *caller) | 39 | pgprot_t pgprot, void *caller) |
40 | { | 40 | { |
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index bde59825d06c..3d7b925f6516 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h | |||
@@ -222,32 +222,8 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
222 | * | 222 | * |
223 | * Note that there are only 8 bits available. | 223 | * Note that there are only 8 bits available. |
224 | */ | 224 | */ |
225 | #define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ | ||
226 | 225 | ||
227 | #ifndef __ASSEMBLY__ | 226 | #ifndef __ASSEMBLY__ |
228 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
229 | static inline void set_restore_sigmask(void) | ||
230 | { | ||
231 | struct thread_info *ti = current_thread_info(); | ||
232 | ti->status |= TS_RESTORE_SIGMASK; | ||
233 | WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); | ||
234 | } | ||
235 | static inline void clear_restore_sigmask(void) | ||
236 | { | ||
237 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
238 | } | ||
239 | static inline bool test_restore_sigmask(void) | ||
240 | { | ||
241 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
242 | } | ||
243 | static inline bool test_and_clear_restore_sigmask(void) | ||
244 | { | ||
245 | struct thread_info *ti = current_thread_info(); | ||
246 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
247 | return false; | ||
248 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
249 | return true; | ||
250 | } | ||
251 | 227 | ||
252 | #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) | 228 | #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) |
253 | #define test_thread_64bit_stack(__SP) \ | 229 | #define test_thread_64bit_stack(__SP) \ |
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index c1467ac59ce6..b7659b8f1117 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -166,32 +166,5 @@ extern void _cpu_idle(void); | |||
166 | #ifdef __tilegx__ | 166 | #ifdef __tilegx__ |
167 | #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ | 167 | #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ |
168 | #endif | 168 | #endif |
169 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ | ||
170 | |||
171 | #ifndef __ASSEMBLY__ | ||
172 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
173 | static inline void set_restore_sigmask(void) | ||
174 | { | ||
175 | struct thread_info *ti = current_thread_info(); | ||
176 | ti->status |= TS_RESTORE_SIGMASK; | ||
177 | WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); | ||
178 | } | ||
179 | static inline void clear_restore_sigmask(void) | ||
180 | { | ||
181 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
182 | } | ||
183 | static inline bool test_restore_sigmask(void) | ||
184 | { | ||
185 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
186 | } | ||
187 | static inline bool test_and_clear_restore_sigmask(void) | ||
188 | { | ||
189 | struct thread_info *ti = current_thread_info(); | ||
190 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
191 | return false; | ||
192 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
193 | return true; | ||
194 | } | ||
195 | #endif /* !__ASSEMBLY__ */ | ||
196 | 169 | ||
197 | #endif /* _ASM_TILE_THREAD_INFO_H */ | 170 | #endif /* _ASM_TILE_THREAD_INFO_H */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 89bff044a6f5..b45ffdda3549 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -219,32 +219,8 @@ static inline unsigned long current_stack_pointer(void) | |||
219 | * have to worry about atomic accesses. | 219 | * have to worry about atomic accesses. |
220 | */ | 220 | */ |
221 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | 221 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
222 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | ||
223 | 222 | ||
224 | #ifndef __ASSEMBLY__ | 223 | #ifndef __ASSEMBLY__ |
225 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
226 | static inline void set_restore_sigmask(void) | ||
227 | { | ||
228 | struct thread_info *ti = current_thread_info(); | ||
229 | ti->status |= TS_RESTORE_SIGMASK; | ||
230 | WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); | ||
231 | } | ||
232 | static inline void clear_restore_sigmask(void) | ||
233 | { | ||
234 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
235 | } | ||
236 | static inline bool test_restore_sigmask(void) | ||
237 | { | ||
238 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | ||
239 | } | ||
240 | static inline bool test_and_clear_restore_sigmask(void) | ||
241 | { | ||
242 | struct thread_info *ti = current_thread_info(); | ||
243 | if (!(ti->status & TS_RESTORE_SIGMASK)) | ||
244 | return false; | ||
245 | ti->status &= ~TS_RESTORE_SIGMASK; | ||
246 | return true; | ||
247 | } | ||
248 | 224 | ||
249 | static inline bool in_ia32_syscall(void) | 225 | static inline bool in_ia32_syscall(void) |
250 | { | 226 | { |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fb4c1b42fc7e..620928903be3 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -208,7 +208,7 @@ static int __meminit save_mr(struct map_range *mr, int nr_range, | |||
208 | * adjust the page_size_mask for small range to go with | 208 | * adjust the page_size_mask for small range to go with |
209 | * big page size instead small one if nearby are ram too. | 209 | * big page size instead small one if nearby are ram too. |
210 | */ | 210 | */ |
211 | static void __init_refok adjust_range_page_size_mask(struct map_range *mr, | 211 | static void __ref adjust_range_page_size_mask(struct map_range *mr, |
212 | int nr_range) | 212 | int nr_range) |
213 | { | 213 | { |
214 | int i; | 214 | int i; |
@@ -396,7 +396,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) | |||
396 | * This runs before bootmem is initialized and gets pages directly from | 396 | * This runs before bootmem is initialized and gets pages directly from |
397 | * the physical memory. To access them they are temporarily mapped. | 397 | * the physical memory. To access them they are temporarily mapped. |
398 | */ | 398 | */ |
399 | unsigned long __init_refok init_memory_mapping(unsigned long start, | 399 | unsigned long __ref init_memory_mapping(unsigned long start, |
400 | unsigned long end) | 400 | unsigned long end) |
401 | { | 401 | { |
402 | struct map_range mr[NR_RANGE_MR]; | 402 | struct map_range mr[NR_RANGE_MR]; |
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 524142117296..5fdacb322ceb 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c | |||
@@ -44,7 +44,7 @@ early_initcall(early_efi_map_fb); | |||
44 | * In case earlyprintk=efi,keep we have the whole framebuffer mapped already | 44 | * In case earlyprintk=efi,keep we have the whole framebuffer mapped already |
45 | * so just return the offset efi_fb + start. | 45 | * so just return the offset efi_fb + start. |
46 | */ | 46 | */ |
47 | static __init_refok void *early_efi_map(unsigned long start, unsigned long len) | 47 | static __ref void *early_efi_map(unsigned long start, unsigned long len) |
48 | { | 48 | { |
49 | unsigned long base; | 49 | unsigned long base; |
50 | 50 | ||
@@ -56,7 +56,7 @@ static __init_refok void *early_efi_map(unsigned long start, unsigned long len) | |||
56 | return early_ioremap(base + start, len); | 56 | return early_ioremap(base + start, len); |
57 | } | 57 | } |
58 | 58 | ||
59 | static __init_refok void early_efi_unmap(void *addr, unsigned long len) | 59 | static __ref void early_efi_unmap(void *addr, unsigned long len) |
60 | { | 60 | { |
61 | if (!efi_fb) | 61 | if (!efi_fb) |
62 | early_iounmap(addr, len); | 62 | early_iounmap(addr, len); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index cd993051aed7..8ffb089b19a5 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -34,9 +34,7 @@ | |||
34 | #include <linux/edd.h> | 34 | #include <linux/edd.h> |
35 | #include <linux/frame.h> | 35 | #include <linux/frame.h> |
36 | 36 | ||
37 | #ifdef CONFIG_KEXEC_CORE | ||
38 | #include <linux/kexec.h> | 37 | #include <linux/kexec.h> |
39 | #endif | ||
40 | 38 | ||
41 | #include <xen/xen.h> | 39 | #include <xen/xen.h> |
42 | #include <xen/events.h> | 40 | #include <xen/events.h> |
@@ -1334,7 +1332,8 @@ static void xen_crash_shutdown(struct pt_regs *regs) | |||
1334 | static int | 1332 | static int |
1335 | xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) | 1333 | xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) |
1336 | { | 1334 | { |
1337 | xen_reboot(SHUTDOWN_crash); | 1335 | if (!kexec_crash_loaded()) |
1336 | xen_reboot(SHUTDOWN_crash); | ||
1338 | return NOTIFY_DONE; | 1337 | return NOTIFY_DONE; |
1339 | } | 1338 | } |
1340 | 1339 | ||
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b108f1358a32..4305ee9db4b2 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -309,7 +309,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) | |||
309 | * During early init (when acpi_gbl_permanent_mmap has not been set yet) this | 309 | * During early init (when acpi_gbl_permanent_mmap has not been set yet) this |
310 | * routine simply calls __acpi_map_table() to get the job done. | 310 | * routine simply calls __acpi_map_table() to get the job done. |
311 | */ | 311 | */ |
312 | void __iomem *__init_refok | 312 | void __iomem *__ref |
313 | acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) | 313 | acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) |
314 | { | 314 | { |
315 | struct acpi_ioremap *map; | 315 | struct acpi_ioremap *map; |
@@ -362,8 +362,7 @@ out: | |||
362 | } | 362 | } |
363 | EXPORT_SYMBOL_GPL(acpi_os_map_iomem); | 363 | EXPORT_SYMBOL_GPL(acpi_os_map_iomem); |
364 | 364 | ||
365 | void *__init_refok | 365 | void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) |
366 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | ||
367 | { | 366 | { |
368 | return (void *)acpi_os_map_iomem(phys, size); | 367 | return (void *)acpi_os_map_iomem(phys, size); |
369 | } | 368 | } |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 773fc3099769..22d1760a4278 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -46,7 +46,8 @@ MODULE_LICENSE("GPL"); | |||
46 | extern struct builtin_fw __start_builtin_fw[]; | 46 | extern struct builtin_fw __start_builtin_fw[]; |
47 | extern struct builtin_fw __end_builtin_fw[]; | 47 | extern struct builtin_fw __end_builtin_fw[]; |
48 | 48 | ||
49 | static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) | 49 | static bool fw_get_builtin_firmware(struct firmware *fw, const char *name, |
50 | void *buf, size_t size) | ||
50 | { | 51 | { |
51 | struct builtin_fw *b_fw; | 52 | struct builtin_fw *b_fw; |
52 | 53 | ||
@@ -54,6 +55,9 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) | |||
54 | if (strcmp(name, b_fw->name) == 0) { | 55 | if (strcmp(name, b_fw->name) == 0) { |
55 | fw->size = b_fw->size; | 56 | fw->size = b_fw->size; |
56 | fw->data = b_fw->data; | 57 | fw->data = b_fw->data; |
58 | |||
59 | if (buf && fw->size <= size) | ||
60 | memcpy(buf, fw->data, fw->size); | ||
57 | return true; | 61 | return true; |
58 | } | 62 | } |
59 | } | 63 | } |
@@ -74,7 +78,9 @@ static bool fw_is_builtin_firmware(const struct firmware *fw) | |||
74 | 78 | ||
75 | #else /* Module case - no builtin firmware support */ | 79 | #else /* Module case - no builtin firmware support */ |
76 | 80 | ||
77 | static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) | 81 | static inline bool fw_get_builtin_firmware(struct firmware *fw, |
82 | const char *name, void *buf, | ||
83 | size_t size) | ||
78 | { | 84 | { |
79 | return false; | 85 | return false; |
80 | } | 86 | } |
@@ -112,6 +118,7 @@ static inline long firmware_loading_timeout(void) | |||
112 | #define FW_OPT_FALLBACK 0 | 118 | #define FW_OPT_FALLBACK 0 |
113 | #endif | 119 | #endif |
114 | #define FW_OPT_NO_WARN (1U << 3) | 120 | #define FW_OPT_NO_WARN (1U << 3) |
121 | #define FW_OPT_NOCACHE (1U << 4) | ||
115 | 122 | ||
116 | struct firmware_cache { | 123 | struct firmware_cache { |
117 | /* firmware_buf instance will be added into the below list */ | 124 | /* firmware_buf instance will be added into the below list */ |
@@ -143,6 +150,7 @@ struct firmware_buf { | |||
143 | unsigned long status; | 150 | unsigned long status; |
144 | void *data; | 151 | void *data; |
145 | size_t size; | 152 | size_t size; |
153 | size_t allocated_size; | ||
146 | #ifdef CONFIG_FW_LOADER_USER_HELPER | 154 | #ifdef CONFIG_FW_LOADER_USER_HELPER |
147 | bool is_paged_buf; | 155 | bool is_paged_buf; |
148 | bool need_uevent; | 156 | bool need_uevent; |
@@ -178,7 +186,8 @@ static DEFINE_MUTEX(fw_lock); | |||
178 | static struct firmware_cache fw_cache; | 186 | static struct firmware_cache fw_cache; |
179 | 187 | ||
180 | static struct firmware_buf *__allocate_fw_buf(const char *fw_name, | 188 | static struct firmware_buf *__allocate_fw_buf(const char *fw_name, |
181 | struct firmware_cache *fwc) | 189 | struct firmware_cache *fwc, |
190 | void *dbuf, size_t size) | ||
182 | { | 191 | { |
183 | struct firmware_buf *buf; | 192 | struct firmware_buf *buf; |
184 | 193 | ||
@@ -194,6 +203,8 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name, | |||
194 | 203 | ||
195 | kref_init(&buf->ref); | 204 | kref_init(&buf->ref); |
196 | buf->fwc = fwc; | 205 | buf->fwc = fwc; |
206 | buf->data = dbuf; | ||
207 | buf->allocated_size = size; | ||
197 | init_completion(&buf->completion); | 208 | init_completion(&buf->completion); |
198 | #ifdef CONFIG_FW_LOADER_USER_HELPER | 209 | #ifdef CONFIG_FW_LOADER_USER_HELPER |
199 | INIT_LIST_HEAD(&buf->pending_list); | 210 | INIT_LIST_HEAD(&buf->pending_list); |
@@ -217,7 +228,8 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name) | |||
217 | 228 | ||
218 | static int fw_lookup_and_allocate_buf(const char *fw_name, | 229 | static int fw_lookup_and_allocate_buf(const char *fw_name, |
219 | struct firmware_cache *fwc, | 230 | struct firmware_cache *fwc, |
220 | struct firmware_buf **buf) | 231 | struct firmware_buf **buf, void *dbuf, |
232 | size_t size) | ||
221 | { | 233 | { |
222 | struct firmware_buf *tmp; | 234 | struct firmware_buf *tmp; |
223 | 235 | ||
@@ -229,7 +241,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name, | |||
229 | *buf = tmp; | 241 | *buf = tmp; |
230 | return 1; | 242 | return 1; |
231 | } | 243 | } |
232 | tmp = __allocate_fw_buf(fw_name, fwc); | 244 | tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size); |
233 | if (tmp) | 245 | if (tmp) |
234 | list_add(&tmp->list, &fwc->head); | 246 | list_add(&tmp->list, &fwc->head); |
235 | spin_unlock(&fwc->lock); | 247 | spin_unlock(&fwc->lock); |
@@ -261,6 +273,7 @@ static void __fw_free_buf(struct kref *ref) | |||
261 | vfree(buf->pages); | 273 | vfree(buf->pages); |
262 | } else | 274 | } else |
263 | #endif | 275 | #endif |
276 | if (!buf->allocated_size) | ||
264 | vfree(buf->data); | 277 | vfree(buf->data); |
265 | kfree_const(buf->fw_id); | 278 | kfree_const(buf->fw_id); |
266 | kfree(buf); | 279 | kfree(buf); |
@@ -301,13 +314,21 @@ static void fw_finish_direct_load(struct device *device, | |||
301 | mutex_unlock(&fw_lock); | 314 | mutex_unlock(&fw_lock); |
302 | } | 315 | } |
303 | 316 | ||
304 | static int fw_get_filesystem_firmware(struct device *device, | 317 | static int |
305 | struct firmware_buf *buf) | 318 | fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf) |
306 | { | 319 | { |
307 | loff_t size; | 320 | loff_t size; |
308 | int i, len; | 321 | int i, len; |
309 | int rc = -ENOENT; | 322 | int rc = -ENOENT; |
310 | char *path; | 323 | char *path; |
324 | enum kernel_read_file_id id = READING_FIRMWARE; | ||
325 | size_t msize = INT_MAX; | ||
326 | |||
327 | /* Already populated data member means we're loading into a buffer */ | ||
328 | if (buf->data) { | ||
329 | id = READING_FIRMWARE_PREALLOC_BUFFER; | ||
330 | msize = buf->allocated_size; | ||
331 | } | ||
311 | 332 | ||
312 | path = __getname(); | 333 | path = __getname(); |
313 | if (!path) | 334 | if (!path) |
@@ -326,8 +347,8 @@ static int fw_get_filesystem_firmware(struct device *device, | |||
326 | } | 347 | } |
327 | 348 | ||
328 | buf->size = 0; | 349 | buf->size = 0; |
329 | rc = kernel_read_file_from_path(path, &buf->data, &size, | 350 | rc = kernel_read_file_from_path(path, &buf->data, &size, msize, |
330 | INT_MAX, READING_FIRMWARE); | 351 | id); |
331 | if (rc) { | 352 | if (rc) { |
332 | if (rc == -ENOENT) | 353 | if (rc == -ENOENT) |
333 | dev_dbg(device, "loading %s failed with error %d\n", | 354 | dev_dbg(device, "loading %s failed with error %d\n", |
@@ -691,6 +712,38 @@ out: | |||
691 | 712 | ||
692 | static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); | 713 | static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); |
693 | 714 | ||
715 | static void firmware_rw_buf(struct firmware_buf *buf, char *buffer, | ||
716 | loff_t offset, size_t count, bool read) | ||
717 | { | ||
718 | if (read) | ||
719 | memcpy(buffer, buf->data + offset, count); | ||
720 | else | ||
721 | memcpy(buf->data + offset, buffer, count); | ||
722 | } | ||
723 | |||
724 | static void firmware_rw(struct firmware_buf *buf, char *buffer, | ||
725 | loff_t offset, size_t count, bool read) | ||
726 | { | ||
727 | while (count) { | ||
728 | void *page_data; | ||
729 | int page_nr = offset >> PAGE_SHIFT; | ||
730 | int page_ofs = offset & (PAGE_SIZE-1); | ||
731 | int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); | ||
732 | |||
733 | page_data = kmap(buf->pages[page_nr]); | ||
734 | |||
735 | if (read) | ||
736 | memcpy(buffer, page_data + page_ofs, page_cnt); | ||
737 | else | ||
738 | memcpy(page_data + page_ofs, buffer, page_cnt); | ||
739 | |||
740 | kunmap(buf->pages[page_nr]); | ||
741 | buffer += page_cnt; | ||
742 | offset += page_cnt; | ||
743 | count -= page_cnt; | ||
744 | } | ||
745 | } | ||
746 | |||
694 | static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, | 747 | static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, |
695 | struct bin_attribute *bin_attr, | 748 | struct bin_attribute *bin_attr, |
696 | char *buffer, loff_t offset, size_t count) | 749 | char *buffer, loff_t offset, size_t count) |
@@ -715,21 +768,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, | |||
715 | 768 | ||
716 | ret_count = count; | 769 | ret_count = count; |
717 | 770 | ||
718 | while (count) { | 771 | if (buf->data) |
719 | void *page_data; | 772 | firmware_rw_buf(buf, buffer, offset, count, true); |
720 | int page_nr = offset >> PAGE_SHIFT; | 773 | else |
721 | int page_ofs = offset & (PAGE_SIZE-1); | 774 | firmware_rw(buf, buffer, offset, count, true); |
722 | int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); | ||
723 | |||
724 | page_data = kmap(buf->pages[page_nr]); | ||
725 | |||
726 | memcpy(buffer, page_data + page_ofs, page_cnt); | ||
727 | 775 | ||
728 | kunmap(buf->pages[page_nr]); | ||
729 | buffer += page_cnt; | ||
730 | offset += page_cnt; | ||
731 | count -= page_cnt; | ||
732 | } | ||
733 | out: | 776 | out: |
734 | mutex_unlock(&fw_lock); | 777 | mutex_unlock(&fw_lock); |
735 | return ret_count; | 778 | return ret_count; |
@@ -804,29 +847,23 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, | |||
804 | goto out; | 847 | goto out; |
805 | } | 848 | } |
806 | 849 | ||
807 | retval = fw_realloc_buffer(fw_priv, offset + count); | 850 | if (buf->data) { |
808 | if (retval) | 851 | if (offset + count > buf->allocated_size) { |
809 | goto out; | 852 | retval = -ENOMEM; |
810 | 853 | goto out; | |
811 | retval = count; | 854 | } |
812 | 855 | firmware_rw_buf(buf, buffer, offset, count, false); | |
813 | while (count) { | 856 | retval = count; |
814 | void *page_data; | 857 | } else { |
815 | int page_nr = offset >> PAGE_SHIFT; | 858 | retval = fw_realloc_buffer(fw_priv, offset + count); |
816 | int page_ofs = offset & (PAGE_SIZE - 1); | 859 | if (retval) |
817 | int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); | 860 | goto out; |
818 | |||
819 | page_data = kmap(buf->pages[page_nr]); | ||
820 | |||
821 | memcpy(page_data + page_ofs, buffer, page_cnt); | ||
822 | 861 | ||
823 | kunmap(buf->pages[page_nr]); | 862 | retval = count; |
824 | buffer += page_cnt; | 863 | firmware_rw(buf, buffer, offset, count, false); |
825 | offset += page_cnt; | ||
826 | count -= page_cnt; | ||
827 | } | 864 | } |
828 | 865 | ||
829 | buf->size = max_t(size_t, offset, buf->size); | 866 | buf->size = max_t(size_t, offset + count, buf->size); |
830 | out: | 867 | out: |
831 | mutex_unlock(&fw_lock); | 868 | mutex_unlock(&fw_lock); |
832 | return retval; | 869 | return retval; |
@@ -894,7 +931,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, | |||
894 | struct firmware_buf *buf = fw_priv->buf; | 931 | struct firmware_buf *buf = fw_priv->buf; |
895 | 932 | ||
896 | /* fall back on userspace loading */ | 933 | /* fall back on userspace loading */ |
897 | buf->is_paged_buf = true; | 934 | if (!buf->data) |
935 | buf->is_paged_buf = true; | ||
898 | 936 | ||
899 | dev_set_uevent_suppress(f_dev, true); | 937 | dev_set_uevent_suppress(f_dev, true); |
900 | 938 | ||
@@ -929,7 +967,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, | |||
929 | 967 | ||
930 | if (is_fw_load_aborted(buf)) | 968 | if (is_fw_load_aborted(buf)) |
931 | retval = -EAGAIN; | 969 | retval = -EAGAIN; |
932 | else if (!buf->data) | 970 | else if (buf->is_paged_buf && !buf->data) |
933 | retval = -ENOMEM; | 971 | retval = -ENOMEM; |
934 | 972 | ||
935 | device_del(f_dev); | 973 | device_del(f_dev); |
@@ -1012,7 +1050,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf) | |||
1012 | */ | 1050 | */ |
1013 | static int | 1051 | static int |
1014 | _request_firmware_prepare(struct firmware **firmware_p, const char *name, | 1052 | _request_firmware_prepare(struct firmware **firmware_p, const char *name, |
1015 | struct device *device) | 1053 | struct device *device, void *dbuf, size_t size) |
1016 | { | 1054 | { |
1017 | struct firmware *firmware; | 1055 | struct firmware *firmware; |
1018 | struct firmware_buf *buf; | 1056 | struct firmware_buf *buf; |
@@ -1025,12 +1063,12 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, | |||
1025 | return -ENOMEM; | 1063 | return -ENOMEM; |
1026 | } | 1064 | } |
1027 | 1065 | ||
1028 | if (fw_get_builtin_firmware(firmware, name)) { | 1066 | if (fw_get_builtin_firmware(firmware, name, dbuf, size)) { |
1029 | dev_dbg(device, "using built-in %s\n", name); | 1067 | dev_dbg(device, "using built-in %s\n", name); |
1030 | return 0; /* assigned */ | 1068 | return 0; /* assigned */ |
1031 | } | 1069 | } |
1032 | 1070 | ||
1033 | ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); | 1071 | ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size); |
1034 | 1072 | ||
1035 | /* | 1073 | /* |
1036 | * bind with 'buf' now to avoid warning in failure path | 1074 | * bind with 'buf' now to avoid warning in failure path |
@@ -1070,14 +1108,16 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, | |||
1070 | * should be fixed in devres or driver core. | 1108 | * should be fixed in devres or driver core. |
1071 | */ | 1109 | */ |
1072 | /* don't cache firmware handled without uevent */ | 1110 | /* don't cache firmware handled without uevent */ |
1073 | if (device && (opt_flags & FW_OPT_UEVENT)) | 1111 | if (device && (opt_flags & FW_OPT_UEVENT) && |
1112 | !(opt_flags & FW_OPT_NOCACHE)) | ||
1074 | fw_add_devm_name(device, buf->fw_id); | 1113 | fw_add_devm_name(device, buf->fw_id); |
1075 | 1114 | ||
1076 | /* | 1115 | /* |
1077 | * After caching firmware image is started, let it piggyback | 1116 | * After caching firmware image is started, let it piggyback |
1078 | * on request firmware. | 1117 | * on request firmware. |
1079 | */ | 1118 | */ |
1080 | if (buf->fwc->state == FW_LOADER_START_CACHE) { | 1119 | if (!(opt_flags & FW_OPT_NOCACHE) && |
1120 | buf->fwc->state == FW_LOADER_START_CACHE) { | ||
1081 | if (fw_cache_piggyback_on_request(buf->fw_id)) | 1121 | if (fw_cache_piggyback_on_request(buf->fw_id)) |
1082 | kref_get(&buf->ref); | 1122 | kref_get(&buf->ref); |
1083 | } | 1123 | } |
@@ -1091,7 +1131,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, | |||
1091 | /* called from request_firmware() and request_firmware_work_func() */ | 1131 | /* called from request_firmware() and request_firmware_work_func() */ |
1092 | static int | 1132 | static int |
1093 | _request_firmware(const struct firmware **firmware_p, const char *name, | 1133 | _request_firmware(const struct firmware **firmware_p, const char *name, |
1094 | struct device *device, unsigned int opt_flags) | 1134 | struct device *device, void *buf, size_t size, |
1135 | unsigned int opt_flags) | ||
1095 | { | 1136 | { |
1096 | struct firmware *fw = NULL; | 1137 | struct firmware *fw = NULL; |
1097 | long timeout; | 1138 | long timeout; |
@@ -1105,7 +1146,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
1105 | goto out; | 1146 | goto out; |
1106 | } | 1147 | } |
1107 | 1148 | ||
1108 | ret = _request_firmware_prepare(&fw, name, device); | 1149 | ret = _request_firmware_prepare(&fw, name, device, buf, size); |
1109 | if (ret <= 0) /* error or already assigned */ | 1150 | if (ret <= 0) /* error or already assigned */ |
1110 | goto out; | 1151 | goto out; |
1111 | 1152 | ||
@@ -1184,7 +1225,7 @@ request_firmware(const struct firmware **firmware_p, const char *name, | |||
1184 | 1225 | ||
1185 | /* Need to pin this module until return */ | 1226 | /* Need to pin this module until return */ |
1186 | __module_get(THIS_MODULE); | 1227 | __module_get(THIS_MODULE); |
1187 | ret = _request_firmware(firmware_p, name, device, | 1228 | ret = _request_firmware(firmware_p, name, device, NULL, 0, |
1188 | FW_OPT_UEVENT | FW_OPT_FALLBACK); | 1229 | FW_OPT_UEVENT | FW_OPT_FALLBACK); |
1189 | module_put(THIS_MODULE); | 1230 | module_put(THIS_MODULE); |
1190 | return ret; | 1231 | return ret; |
@@ -1208,7 +1249,7 @@ int request_firmware_direct(const struct firmware **firmware_p, | |||
1208 | int ret; | 1249 | int ret; |
1209 | 1250 | ||
1210 | __module_get(THIS_MODULE); | 1251 | __module_get(THIS_MODULE); |
1211 | ret = _request_firmware(firmware_p, name, device, | 1252 | ret = _request_firmware(firmware_p, name, device, NULL, 0, |
1212 | FW_OPT_UEVENT | FW_OPT_NO_WARN); | 1253 | FW_OPT_UEVENT | FW_OPT_NO_WARN); |
1213 | module_put(THIS_MODULE); | 1254 | module_put(THIS_MODULE); |
1214 | return ret; | 1255 | return ret; |
@@ -1216,6 +1257,36 @@ int request_firmware_direct(const struct firmware **firmware_p, | |||
1216 | EXPORT_SYMBOL_GPL(request_firmware_direct); | 1257 | EXPORT_SYMBOL_GPL(request_firmware_direct); |
1217 | 1258 | ||
1218 | /** | 1259 | /** |
1260 | * request_firmware_into_buf - load firmware into a previously allocated buffer | ||
1261 | * @firmware_p: pointer to firmware image | ||
1262 | * @name: name of firmware file | ||
1263 | * @device: device for which firmware is being loaded and DMA region allocated | ||
1264 | * @buf: address of buffer to load firmware into | ||
1265 | * @size: size of buffer | ||
1266 | * | ||
1267 | * This function works pretty much like request_firmware(), but it doesn't | ||
1268 | * allocate a buffer to hold the firmware data. Instead, the firmware | ||
1269 | * is loaded directly into the buffer pointed to by @buf and the @firmware_p | ||
1270 | * data member is pointed at @buf. | ||
1271 | * | ||
1272 | * This function doesn't cache firmware either. | ||
1273 | */ | ||
1274 | int | ||
1275 | request_firmware_into_buf(const struct firmware **firmware_p, const char *name, | ||
1276 | struct device *device, void *buf, size_t size) | ||
1277 | { | ||
1278 | int ret; | ||
1279 | |||
1280 | __module_get(THIS_MODULE); | ||
1281 | ret = _request_firmware(firmware_p, name, device, buf, size, | ||
1282 | FW_OPT_UEVENT | FW_OPT_FALLBACK | | ||
1283 | FW_OPT_NOCACHE); | ||
1284 | module_put(THIS_MODULE); | ||
1285 | return ret; | ||
1286 | } | ||
1287 | EXPORT_SYMBOL(request_firmware_into_buf); | ||
1288 | |||
1289 | /** | ||
1219 | * release_firmware: - release the resource associated with a firmware image | 1290 | * release_firmware: - release the resource associated with a firmware image |
1220 | * @fw: firmware resource to release | 1291 | * @fw: firmware resource to release |
1221 | **/ | 1292 | **/ |
@@ -1247,7 +1318,7 @@ static void request_firmware_work_func(struct work_struct *work) | |||
1247 | 1318 | ||
1248 | fw_work = container_of(work, struct firmware_work, work); | 1319 | fw_work = container_of(work, struct firmware_work, work); |
1249 | 1320 | ||
1250 | _request_firmware(&fw, fw_work->name, fw_work->device, | 1321 | _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, |
1251 | fw_work->opt_flags); | 1322 | fw_work->opt_flags); |
1252 | fw_work->cont(fw, fw_work->context); | 1323 | fw_work->cont(fw, fw_work->context); |
1253 | put_device(fw_work->device); /* taken in request_firmware_nowait() */ | 1324 | put_device(fw_work->device); /* taken in request_firmware_nowait() */ |
@@ -1380,7 +1451,7 @@ static int uncache_firmware(const char *fw_name) | |||
1380 | 1451 | ||
1381 | pr_debug("%s: %s\n", __func__, fw_name); | 1452 | pr_debug("%s: %s\n", __func__, fw_name); |
1382 | 1453 | ||
1383 | if (fw_get_builtin_firmware(&fw, fw_name)) | 1454 | if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0)) |
1384 | return 0; | 1455 | return 0; |
1385 | 1456 | ||
1386 | buf = fw_lookup_buf(fw_name); | 1457 | buf = fw_lookup_buf(fw_name); |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 29cd96661b30..5548f9686016 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -370,7 +370,7 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) | |||
370 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 370 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
371 | #define page_initialized(page) (page->lru.next) | 371 | #define page_initialized(page) (page->lru.next) |
372 | 372 | ||
373 | static int __init_refok get_nid_for_pfn(unsigned long pfn) | 373 | static int __ref get_nid_for_pfn(unsigned long pfn) |
374 | { | 374 | { |
375 | struct page *page; | 375 | struct page *page; |
376 | 376 | ||
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 0a1aaf8c24c4..2d3d50ab74bf 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/crc32c.h> | 27 | #include <linux/crc32c.h> |
28 | #include <linux/drbd.h> | 28 | #include <linux/drbd.h> |
29 | #include <linux/drbd_limits.h> | 29 | #include <linux/drbd_limits.h> |
30 | #include <linux/dynamic_debug.h> | ||
31 | #include "drbd_int.h" | 30 | #include "drbd_int.h" |
32 | 31 | ||
33 | 32 | ||
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7b54354976a5..4cb8f21ff4ef 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/backing-dev.h> | 41 | #include <linux/backing-dev.h> |
42 | #include <linux/genhd.h> | 42 | #include <linux/genhd.h> |
43 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
44 | #include <linux/dynamic_debug.h> | ||
44 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
45 | #include <linux/lru_cache.h> | 46 | #include <linux/lru_cache.h> |
46 | #include <linux/prefetch.h> | 47 | #include <linux/prefetch.h> |
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 89cc700fbc37..97ae60fa1584 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c | |||
@@ -250,7 +250,7 @@ struct clk_lookup_alloc { | |||
250 | char con_id[MAX_CON_ID]; | 250 | char con_id[MAX_CON_ID]; |
251 | }; | 251 | }; |
252 | 252 | ||
253 | static struct clk_lookup * __init_refok | 253 | static struct clk_lookup * __ref |
254 | vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, | 254 | vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, |
255 | va_list ap) | 255 | va_list ap) |
256 | { | 256 | { |
@@ -287,7 +287,7 @@ vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt, | |||
287 | return cl; | 287 | return cl; |
288 | } | 288 | } |
289 | 289 | ||
290 | struct clk_lookup * __init_refok | 290 | struct clk_lookup * __ref |
291 | clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) | 291 | clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) |
292 | { | 292 | { |
293 | struct clk_lookup *cl; | 293 | struct clk_lookup *cl; |
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 40bb8ae5853c..aacf584f2a42 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c | |||
@@ -2338,23 +2338,11 @@ static struct memstick_driver msb_driver = { | |||
2338 | .resume = msb_resume | 2338 | .resume = msb_resume |
2339 | }; | 2339 | }; |
2340 | 2340 | ||
2341 | static int major; | ||
2342 | |||
2343 | static int __init msb_init(void) | 2341 | static int __init msb_init(void) |
2344 | { | 2342 | { |
2345 | int rc = register_blkdev(0, DRIVER_NAME); | 2343 | int rc = memstick_register_driver(&msb_driver); |
2346 | 2344 | if (rc) | |
2347 | if (rc < 0) { | ||
2348 | pr_err("failed to register major (error %d)\n", rc); | ||
2349 | return rc; | ||
2350 | } | ||
2351 | |||
2352 | major = rc; | ||
2353 | rc = memstick_register_driver(&msb_driver); | ||
2354 | if (rc) { | ||
2355 | unregister_blkdev(major, DRIVER_NAME); | ||
2356 | pr_err("failed to register memstick driver (error %d)\n", rc); | 2345 | pr_err("failed to register memstick driver (error %d)\n", rc); |
2357 | } | ||
2358 | 2346 | ||
2359 | return rc; | 2347 | return rc; |
2360 | } | 2348 | } |
@@ -2362,7 +2350,6 @@ static int __init msb_init(void) | |||
2362 | static void __exit msb_exit(void) | 2350 | static void __exit msb_exit(void) |
2363 | { | 2351 | { |
2364 | memstick_unregister_driver(&msb_driver); | 2352 | memstick_unregister_driver(&msb_driver); |
2365 | unregister_blkdev(major, DRIVER_NAME); | ||
2366 | idr_destroy(&msb_disk_idr); | 2353 | idr_destroy(&msb_disk_idr); |
2367 | } | 2354 | } |
2368 | 2355 | ||
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 5f70fee59a94..d6ff5e82377d 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -1086,7 +1086,7 @@ out: | |||
1086 | return err; | 1086 | return err; |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev, | 1089 | static void __ref pcifront_backend_changed(struct xenbus_device *xdev, |
1090 | enum xenbus_state be_state) | 1090 | enum xenbus_state be_state) |
1091 | { | 1091 | { |
1092 | struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); | 1092 | struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); |
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index b5a10d3c92c7..d6d2f20c4597 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig | |||
@@ -67,6 +67,15 @@ config RAPIDIO_ENUM_BASIC | |||
67 | 67 | ||
68 | endchoice | 68 | endchoice |
69 | 69 | ||
70 | config RAPIDIO_CHMAN | ||
71 | tristate "RapidIO Channelized Messaging driver" | ||
72 | depends on RAPIDIO | ||
73 | help | ||
74 | This option includes RapidIO channelized messaging driver which | ||
75 | provides socket-like interface to allow sharing of single RapidIO | ||
76 | messaging mailbox between multiple user-space applications. | ||
77 | See "Documentation/rapidio/rio_cm.txt" for driver description. | ||
78 | |||
70 | config RAPIDIO_MPORT_CDEV | 79 | config RAPIDIO_MPORT_CDEV |
71 | tristate "RapidIO /dev mport device driver" | 80 | tristate "RapidIO /dev mport device driver" |
72 | depends on RAPIDIO | 81 | depends on RAPIDIO |
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index 6271ada6993f..74dcea45ad49 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile | |||
@@ -5,6 +5,7 @@ obj-$(CONFIG_RAPIDIO) += rapidio.o | |||
5 | rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o | 5 | rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o |
6 | 6 | ||
7 | obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o | 7 | obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o |
8 | obj-$(CONFIG_RAPIDIO_CHMAN) += rio_cm.o | ||
8 | 9 | ||
9 | obj-$(CONFIG_RAPIDIO) += switches/ | 10 | obj-$(CONFIG_RAPIDIO) += switches/ |
10 | obj-$(CONFIG_RAPIDIO) += devices/ | 11 | obj-$(CONFIG_RAPIDIO) += devices/ |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index e165b7ce29d7..436dfe871d32 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
@@ -1813,7 +1813,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, | |||
1813 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { | 1813 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { |
1814 | rdev->efptr = rval & 0xffff; | 1814 | rdev->efptr = rval & 0xffff; |
1815 | rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, | 1815 | rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, |
1816 | hopcount); | 1816 | hopcount, &rdev->phys_rmap); |
1817 | 1817 | ||
1818 | rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, | 1818 | rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, |
1819 | hopcount, RIO_EFB_ERR_MGMNT); | 1819 | hopcount, RIO_EFB_ERR_MGMNT); |
@@ -2242,7 +2242,7 @@ static void mport_mm_open(struct vm_area_struct *vma) | |||
2242 | { | 2242 | { |
2243 | struct rio_mport_mapping *map = vma->vm_private_data; | 2243 | struct rio_mport_mapping *map = vma->vm_private_data; |
2244 | 2244 | ||
2245 | rmcd_debug(MMAP, "0x%pad", &map->phys_addr); | 2245 | rmcd_debug(MMAP, "%pad", &map->phys_addr); |
2246 | kref_get(&map->ref); | 2246 | kref_get(&map->ref); |
2247 | } | 2247 | } |
2248 | 2248 | ||
@@ -2250,7 +2250,7 @@ static void mport_mm_close(struct vm_area_struct *vma) | |||
2250 | { | 2250 | { |
2251 | struct rio_mport_mapping *map = vma->vm_private_data; | 2251 | struct rio_mport_mapping *map = vma->vm_private_data; |
2252 | 2252 | ||
2253 | rmcd_debug(MMAP, "0x%pad", &map->phys_addr); | 2253 | rmcd_debug(MMAP, "%pad", &map->phys_addr); |
2254 | mutex_lock(&map->md->buf_mutex); | 2254 | mutex_lock(&map->md->buf_mutex); |
2255 | kref_put(&map->ref, mport_release_mapping); | 2255 | kref_put(&map->ref, mport_release_mapping); |
2256 | mutex_unlock(&map->md->buf_mutex); | 2256 | mutex_unlock(&map->md->buf_mutex); |
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index b5b455614f8a..32f0f014a067 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -37,11 +37,20 @@ | |||
37 | #include "tsi721.h" | 37 | #include "tsi721.h" |
38 | 38 | ||
39 | #ifdef DEBUG | 39 | #ifdef DEBUG |
40 | u32 dbg_level = DBG_INIT | DBG_EXIT; | 40 | u32 dbg_level; |
41 | module_param(dbg_level, uint, S_IWUSR | S_IRUGO); | 41 | module_param(dbg_level, uint, S_IWUSR | S_IRUGO); |
42 | MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); | 42 | MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | static int pcie_mrrs = -1; | ||
46 | module_param(pcie_mrrs, int, S_IRUGO); | ||
47 | MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)"); | ||
48 | |||
49 | static u8 mbox_sel = 0x0f; | ||
50 | module_param(mbox_sel, byte, S_IRUGO); | ||
51 | MODULE_PARM_DESC(mbox_sel, | ||
52 | "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); | ||
53 | |||
45 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); | 54 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); |
46 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); | 55 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); |
47 | 56 | ||
@@ -1081,7 +1090,7 @@ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) | |||
1081 | * from rstart to lstart. | 1090 | * from rstart to lstart. |
1082 | */ | 1091 | */ |
1083 | static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | 1092 | static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, |
1084 | u64 rstart, u32 size, u32 flags) | 1093 | u64 rstart, u64 size, u32 flags) |
1085 | { | 1094 | { |
1086 | struct tsi721_device *priv = mport->priv; | 1095 | struct tsi721_device *priv = mport->priv; |
1087 | int i, avail = -1; | 1096 | int i, avail = -1; |
@@ -1094,6 +1103,10 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | |||
1094 | struct tsi721_ib_win_mapping *map = NULL; | 1103 | struct tsi721_ib_win_mapping *map = NULL; |
1095 | int ret = -EBUSY; | 1104 | int ret = -EBUSY; |
1096 | 1105 | ||
1106 | /* Max IBW size supported by HW is 16GB */ | ||
1107 | if (size > 0x400000000UL) | ||
1108 | return -EINVAL; | ||
1109 | |||
1097 | if (direct) { | 1110 | if (direct) { |
1098 | /* Calculate minimal acceptable window size and base address */ | 1111 | /* Calculate minimal acceptable window size and base address */ |
1099 | 1112 | ||
@@ -1101,15 +1114,15 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | |||
1101 | ibw_start = lstart & ~(ibw_size - 1); | 1114 | ibw_start = lstart & ~(ibw_size - 1); |
1102 | 1115 | ||
1103 | tsi_debug(IBW, &priv->pdev->dev, | 1116 | tsi_debug(IBW, &priv->pdev->dev, |
1104 | "Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx", | 1117 | "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx", |
1105 | rstart, &lstart, size, ibw_start); | 1118 | rstart, &lstart, size, ibw_start); |
1106 | 1119 | ||
1107 | while ((lstart + size) > (ibw_start + ibw_size)) { | 1120 | while ((lstart + size) > (ibw_start + ibw_size)) { |
1108 | ibw_size *= 2; | 1121 | ibw_size *= 2; |
1109 | ibw_start = lstart & ~(ibw_size - 1); | 1122 | ibw_start = lstart & ~(ibw_size - 1); |
1110 | if (ibw_size > 0x80000000) { /* Limit max size to 2GB */ | 1123 | /* Check for crossing IBW max size 16GB */ |
1124 | if (ibw_size > 0x400000000UL) | ||
1111 | return -EBUSY; | 1125 | return -EBUSY; |
1112 | } | ||
1113 | } | 1126 | } |
1114 | 1127 | ||
1115 | loc_start = ibw_start; | 1128 | loc_start = ibw_start; |
@@ -1120,7 +1133,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | |||
1120 | 1133 | ||
1121 | } else { | 1134 | } else { |
1122 | tsi_debug(IBW, &priv->pdev->dev, | 1135 | tsi_debug(IBW, &priv->pdev->dev, |
1123 | "Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x", | 1136 | "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx", |
1124 | rstart, &lstart, size); | 1137 | rstart, &lstart, size); |
1125 | 1138 | ||
1126 | if (!is_power_of_2(size) || size < 0x1000 || | 1139 | if (!is_power_of_2(size) || size < 0x1000 || |
@@ -1215,7 +1228,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | |||
1215 | priv->ibwin_cnt--; | 1228 | priv->ibwin_cnt--; |
1216 | 1229 | ||
1217 | tsi_debug(IBW, &priv->pdev->dev, | 1230 | tsi_debug(IBW, &priv->pdev->dev, |
1218 | "Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx", | 1231 | "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx", |
1219 | i, ibw_start, &loc_start, ibw_size); | 1232 | i, ibw_start, &loc_start, ibw_size); |
1220 | 1233 | ||
1221 | return 0; | 1234 | return 0; |
@@ -1237,7 +1250,7 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, | |||
1237 | int i; | 1250 | int i; |
1238 | 1251 | ||
1239 | tsi_debug(IBW, &priv->pdev->dev, | 1252 | tsi_debug(IBW, &priv->pdev->dev, |
1240 | "Unmap IBW mapped to PCIe_0x%pad", &lstart); | 1253 | "Unmap IBW mapped to PCIe_%pad", &lstart); |
1241 | 1254 | ||
1242 | /* Search for matching active inbound translation window */ | 1255 | /* Search for matching active inbound translation window */ |
1243 | for (i = 0; i < TSI721_IBWIN_NUM; i++) { | 1256 | for (i = 0; i < TSI721_IBWIN_NUM; i++) { |
@@ -1877,6 +1890,11 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, | |||
1877 | goto out; | 1890 | goto out; |
1878 | } | 1891 | } |
1879 | 1892 | ||
1893 | if ((mbox_sel & (1 << mbox)) == 0) { | ||
1894 | rc = -ENODEV; | ||
1895 | goto out; | ||
1896 | } | ||
1897 | |||
1880 | priv->omsg_ring[mbox].dev_id = dev_id; | 1898 | priv->omsg_ring[mbox].dev_id = dev_id; |
1881 | priv->omsg_ring[mbox].size = entries; | 1899 | priv->omsg_ring[mbox].size = entries; |
1882 | priv->omsg_ring[mbox].sts_rdptr = 0; | 1900 | priv->omsg_ring[mbox].sts_rdptr = 0; |
@@ -2161,6 +2179,11 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, | |||
2161 | goto out; | 2179 | goto out; |
2162 | } | 2180 | } |
2163 | 2181 | ||
2182 | if ((mbox_sel & (1 << mbox)) == 0) { | ||
2183 | rc = -ENODEV; | ||
2184 | goto out; | ||
2185 | } | ||
2186 | |||
2164 | /* Initialize IB Messaging Ring */ | 2187 | /* Initialize IB Messaging Ring */ |
2165 | priv->imsg_ring[mbox].dev_id = dev_id; | 2188 | priv->imsg_ring[mbox].dev_id = dev_id; |
2166 | priv->imsg_ring[mbox].size = entries; | 2189 | priv->imsg_ring[mbox].size = entries; |
@@ -2532,11 +2555,11 @@ static int tsi721_query_mport(struct rio_mport *mport, | |||
2532 | struct tsi721_device *priv = mport->priv; | 2555 | struct tsi721_device *priv = mport->priv; |
2533 | u32 rval; | 2556 | u32 rval; |
2534 | 2557 | ||
2535 | rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0))); | 2558 | rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0)); |
2536 | if (rval & RIO_PORT_N_ERR_STS_PORT_OK) { | 2559 | if (rval & RIO_PORT_N_ERR_STS_PORT_OK) { |
2537 | rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0))); | 2560 | rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0)); |
2538 | attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28; | 2561 | attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28; |
2539 | rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0))); | 2562 | rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0)); |
2540 | attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27; | 2563 | attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27; |
2541 | } else | 2564 | } else |
2542 | attr->link_speed = RIO_LINK_DOWN; | 2565 | attr->link_speed = RIO_LINK_DOWN; |
@@ -2650,9 +2673,9 @@ static int tsi721_setup_mport(struct tsi721_device *priv) | |||
2650 | mport->ops = &tsi721_rio_ops; | 2673 | mport->ops = &tsi721_rio_ops; |
2651 | mport->index = 0; | 2674 | mport->index = 0; |
2652 | mport->sys_size = 0; /* small system */ | 2675 | mport->sys_size = 0; /* small system */ |
2653 | mport->phy_type = RIO_PHY_SERIAL; | ||
2654 | mport->priv = (void *)priv; | 2676 | mport->priv = (void *)priv; |
2655 | mport->phys_efptr = 0x100; | 2677 | mport->phys_efptr = 0x100; |
2678 | mport->phys_rmap = 1; | ||
2656 | mport->dev.parent = &pdev->dev; | 2679 | mport->dev.parent = &pdev->dev; |
2657 | mport->dev.release = tsi721_mport_release; | 2680 | mport->dev.release = tsi721_mport_release; |
2658 | 2681 | ||
@@ -2840,6 +2863,16 @@ static int tsi721_probe(struct pci_dev *pdev, | |||
2840 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, | 2863 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, |
2841 | PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); | 2864 | PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); |
2842 | 2865 | ||
2866 | /* Override PCIe Maximum Read Request Size setting if requested */ | ||
2867 | if (pcie_mrrs >= 0) { | ||
2868 | if (pcie_mrrs <= 5) | ||
2869 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, | ||
2870 | PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12); | ||
2871 | else | ||
2872 | tsi_info(&pdev->dev, | ||
2873 | "Invalid MRRS override value %d", pcie_mrrs); | ||
2874 | } | ||
2875 | |||
2843 | /* Adjust PCIe completion timeout. */ | 2876 | /* Adjust PCIe completion timeout. */ |
2844 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); | 2877 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); |
2845 | 2878 | ||
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 5456dbddc929..5941437cbdd1 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h | |||
@@ -661,7 +661,7 @@ enum dma_rtype { | |||
661 | */ | 661 | */ |
662 | #define TSI721_DMA_CHNUM TSI721_DMA_MAXCH | 662 | #define TSI721_DMA_CHNUM TSI721_DMA_MAXCH |
663 | 663 | ||
664 | #define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ | 664 | #define TSI721_DMACH_MAINT 7 /* DMA channel for maint requests */ |
665 | #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ | 665 | #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ |
666 | 666 | ||
667 | #define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ | 667 | #define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 155cae1e62de..e2a418598129 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
@@ -36,18 +36,26 @@ | |||
36 | 36 | ||
37 | #include "tsi721.h" | 37 | #include "tsi721.h" |
38 | 38 | ||
39 | #define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */ | ||
40 | |||
41 | #ifdef CONFIG_PCI_MSI | 39 | #ifdef CONFIG_PCI_MSI |
42 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); | 40 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); |
43 | #endif | 41 | #endif |
44 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc); | 42 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc); |
45 | 43 | ||
46 | static unsigned int dma_desc_per_channel = 128; | 44 | static unsigned int dma_desc_per_channel = 128; |
47 | module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO); | 45 | module_param(dma_desc_per_channel, uint, S_IRUGO); |
48 | MODULE_PARM_DESC(dma_desc_per_channel, | 46 | MODULE_PARM_DESC(dma_desc_per_channel, |
49 | "Number of DMA descriptors per channel (default: 128)"); | 47 | "Number of DMA descriptors per channel (default: 128)"); |
50 | 48 | ||
49 | static unsigned int dma_txqueue_sz = 16; | ||
50 | module_param(dma_txqueue_sz, uint, S_IRUGO); | ||
51 | MODULE_PARM_DESC(dma_txqueue_sz, | ||
52 | "DMA Transactions Queue Size (default: 16)"); | ||
53 | |||
54 | static u8 dma_sel = 0x7f; | ||
55 | module_param(dma_sel, byte, S_IRUGO); | ||
56 | MODULE_PARM_DESC(dma_sel, | ||
57 | "DMA Channel Selection Mask (default: 0x7f = all)"); | ||
58 | |||
51 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) | 59 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) |
52 | { | 60 | { |
53 | return container_of(chan, struct tsi721_bdma_chan, dchan); | 61 | return container_of(chan, struct tsi721_bdma_chan, dchan); |
@@ -718,6 +726,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | |||
718 | cookie = dma_cookie_assign(txd); | 726 | cookie = dma_cookie_assign(txd); |
719 | desc->status = DMA_IN_PROGRESS; | 727 | desc->status = DMA_IN_PROGRESS; |
720 | list_add_tail(&desc->desc_node, &bdma_chan->queue); | 728 | list_add_tail(&desc->desc_node, &bdma_chan->queue); |
729 | tsi721_advance_work(bdma_chan, NULL); | ||
721 | 730 | ||
722 | spin_unlock_bh(&bdma_chan->lock); | 731 | spin_unlock_bh(&bdma_chan->lock); |
723 | return cookie; | 732 | return cookie; |
@@ -732,7 +741,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
732 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); | 741 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
733 | 742 | ||
734 | if (bdma_chan->bd_base) | 743 | if (bdma_chan->bd_base) |
735 | return TSI721_DMA_TX_QUEUE_SZ; | 744 | return dma_txqueue_sz; |
736 | 745 | ||
737 | /* Initialize BDMA channel */ | 746 | /* Initialize BDMA channel */ |
738 | if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { | 747 | if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { |
@@ -742,7 +751,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
742 | } | 751 | } |
743 | 752 | ||
744 | /* Allocate queue of transaction descriptors */ | 753 | /* Allocate queue of transaction descriptors */ |
745 | desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), | 754 | desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), |
746 | GFP_ATOMIC); | 755 | GFP_ATOMIC); |
747 | if (!desc) { | 756 | if (!desc) { |
748 | tsi_err(&dchan->dev->device, | 757 | tsi_err(&dchan->dev->device, |
@@ -754,7 +763,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
754 | 763 | ||
755 | bdma_chan->tx_desc = desc; | 764 | bdma_chan->tx_desc = desc; |
756 | 765 | ||
757 | for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) { | 766 | for (i = 0; i < dma_txqueue_sz; i++) { |
758 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); | 767 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); |
759 | desc[i].txd.tx_submit = tsi721_tx_submit; | 768 | desc[i].txd.tx_submit = tsi721_tx_submit; |
760 | desc[i].txd.flags = DMA_CTRL_ACK; | 769 | desc[i].txd.flags = DMA_CTRL_ACK; |
@@ -766,7 +775,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
766 | bdma_chan->active = true; | 775 | bdma_chan->active = true; |
767 | tsi721_bdma_interrupt_enable(bdma_chan, 1); | 776 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
768 | 777 | ||
769 | return TSI721_DMA_TX_QUEUE_SZ; | 778 | return dma_txqueue_sz; |
770 | } | 779 | } |
771 | 780 | ||
772 | static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) | 781 | static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) |
@@ -962,7 +971,7 @@ void tsi721_dma_stop_all(struct tsi721_device *priv) | |||
962 | int i; | 971 | int i; |
963 | 972 | ||
964 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { | 973 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { |
965 | if (i != TSI721_DMACH_MAINT) | 974 | if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) |
966 | tsi721_dma_stop(&priv->bdma[i]); | 975 | tsi721_dma_stop(&priv->bdma[i]); |
967 | } | 976 | } |
968 | } | 977 | } |
@@ -979,7 +988,7 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
979 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { | 988 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { |
980 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; | 989 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; |
981 | 990 | ||
982 | if (i == TSI721_DMACH_MAINT) | 991 | if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) |
983 | continue; | 992 | continue; |
984 | 993 | ||
985 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); | 994 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); |
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index a63a380809d1..23429bdaca84 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
@@ -49,15 +49,6 @@ struct rio_id_table { | |||
49 | static int next_destid = 0; | 49 | static int next_destid = 0; |
50 | static int next_comptag = 1; | 50 | static int next_comptag = 1; |
51 | 51 | ||
52 | static int rio_mport_phys_table[] = { | ||
53 | RIO_EFB_PAR_EP_ID, | ||
54 | RIO_EFB_PAR_EP_REC_ID, | ||
55 | RIO_EFB_SER_EP_ID, | ||
56 | RIO_EFB_SER_EP_REC_ID, | ||
57 | -1, | ||
58 | }; | ||
59 | |||
60 | |||
61 | /** | 52 | /** |
62 | * rio_destid_alloc - Allocate next available destID for given network | 53 | * rio_destid_alloc - Allocate next available destID for given network |
63 | * @net: RIO network | 54 | * @net: RIO network |
@@ -380,10 +371,15 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
380 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { | 371 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { |
381 | rdev->efptr = result & 0xffff; | 372 | rdev->efptr = result & 0xffff; |
382 | rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, | 373 | rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, |
383 | hopcount); | 374 | hopcount, &rdev->phys_rmap); |
375 | pr_debug("RIO: %s Register Map %d device\n", | ||
376 | __func__, rdev->phys_rmap); | ||
384 | 377 | ||
385 | rdev->em_efptr = rio_mport_get_feature(port, 0, destid, | 378 | rdev->em_efptr = rio_mport_get_feature(port, 0, destid, |
386 | hopcount, RIO_EFB_ERR_MGMNT); | 379 | hopcount, RIO_EFB_ERR_MGMNT); |
380 | if (!rdev->em_efptr) | ||
381 | rdev->em_efptr = rio_mport_get_feature(port, 0, destid, | ||
382 | hopcount, RIO_EFB_ERR_MGMNT_HS); | ||
387 | } | 383 | } |
388 | 384 | ||
389 | rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, | 385 | rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, |
@@ -445,7 +441,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
445 | rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); | 441 | rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); |
446 | } else { | 442 | } else { |
447 | if (do_enum) | 443 | if (do_enum) |
448 | /*Enable Input Output Port (transmitter reviever)*/ | 444 | /*Enable Input Output Port (transmitter receiver)*/ |
449 | rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); | 445 | rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); |
450 | 446 | ||
451 | dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, | 447 | dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, |
@@ -481,10 +477,8 @@ cleanup: | |||
481 | 477 | ||
482 | /** | 478 | /** |
483 | * rio_sport_is_active- Tests if a switch port has an active connection. | 479 | * rio_sport_is_active- Tests if a switch port has an active connection. |
484 | * @port: Master port to send transaction | 480 | * @rdev: RapidIO device object |
485 | * @destid: Associated destination ID for switch | 481 | * @sp: Switch port number |
486 | * @hopcount: Hopcount to reach switch | ||
487 | * @sport: Switch port number | ||
488 | * | 482 | * |
489 | * Reads the port error status CSR for a particular switch port to | 483 | * Reads the port error status CSR for a particular switch port to |
490 | * determine if the port has an active link. Returns | 484 | * determine if the port has an active link. Returns |
@@ -492,31 +486,12 @@ cleanup: | |||
492 | * inactive. | 486 | * inactive. |
493 | */ | 487 | */ |
494 | static int | 488 | static int |
495 | rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) | 489 | rio_sport_is_active(struct rio_dev *rdev, int sp) |
496 | { | 490 | { |
497 | u32 result = 0; | 491 | u32 result = 0; |
498 | u32 ext_ftr_ptr; | ||
499 | 492 | ||
500 | ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0); | 493 | rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp), |
501 | 494 | &result); | |
502 | while (ext_ftr_ptr) { | ||
503 | rio_mport_read_config_32(port, destid, hopcount, | ||
504 | ext_ftr_ptr, &result); | ||
505 | result = RIO_GET_BLOCK_ID(result); | ||
506 | if ((result == RIO_EFB_SER_EP_FREE_ID) || | ||
507 | (result == RIO_EFB_SER_EP_FREE_ID_V13P) || | ||
508 | (result == RIO_EFB_SER_EP_FREC_ID)) | ||
509 | break; | ||
510 | |||
511 | ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, | ||
512 | ext_ftr_ptr); | ||
513 | } | ||
514 | |||
515 | if (ext_ftr_ptr) | ||
516 | rio_mport_read_config_32(port, destid, hopcount, | ||
517 | ext_ftr_ptr + | ||
518 | RIO_PORT_N_ERR_STS_CSR(sport), | ||
519 | &result); | ||
520 | 495 | ||
521 | return result & RIO_PORT_N_ERR_STS_PORT_OK; | 496 | return result & RIO_PORT_N_ERR_STS_PORT_OK; |
522 | } | 497 | } |
@@ -655,9 +630,7 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
655 | 630 | ||
656 | cur_destid = next_destid; | 631 | cur_destid = next_destid; |
657 | 632 | ||
658 | if (rio_sport_is_active | 633 | if (rio_sport_is_active(rdev, port_num)) { |
659 | (port, RIO_ANY_DESTID(port->sys_size), hopcount, | ||
660 | port_num)) { | ||
661 | pr_debug( | 634 | pr_debug( |
662 | "RIO: scanning device on port %d\n", | 635 | "RIO: scanning device on port %d\n", |
663 | port_num); | 636 | port_num); |
@@ -785,8 +758,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, | |||
785 | if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num) | 758 | if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num) |
786 | continue; | 759 | continue; |
787 | 760 | ||
788 | if (rio_sport_is_active | 761 | if (rio_sport_is_active(rdev, port_num)) { |
789 | (port, destid, hopcount, port_num)) { | ||
790 | pr_debug( | 762 | pr_debug( |
791 | "RIO: scanning device on port %d\n", | 763 | "RIO: scanning device on port %d\n", |
792 | port_num); | 764 | port_num); |
@@ -831,21 +803,11 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, | |||
831 | static int rio_mport_is_active(struct rio_mport *port) | 803 | static int rio_mport_is_active(struct rio_mport *port) |
832 | { | 804 | { |
833 | u32 result = 0; | 805 | u32 result = 0; |
834 | u32 ext_ftr_ptr; | ||
835 | int *entry = rio_mport_phys_table; | ||
836 | |||
837 | do { | ||
838 | if ((ext_ftr_ptr = | ||
839 | rio_mport_get_feature(port, 1, 0, 0, *entry))) | ||
840 | break; | ||
841 | } while (*++entry >= 0); | ||
842 | |||
843 | if (ext_ftr_ptr) | ||
844 | rio_local_read_config_32(port, | ||
845 | ext_ftr_ptr + | ||
846 | RIO_PORT_N_ERR_STS_CSR(port->index), | ||
847 | &result); | ||
848 | 806 | ||
807 | rio_local_read_config_32(port, | ||
808 | port->phys_efptr + | ||
809 | RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap), | ||
810 | &result); | ||
849 | return result & RIO_PORT_N_ERR_STS_PORT_OK; | 811 | return result & RIO_PORT_N_ERR_STS_PORT_OK; |
850 | } | 812 | } |
851 | 813 | ||
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 0dcaa660cba1..37042858c2db 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
@@ -268,6 +268,12 @@ int rio_request_inb_mbox(struct rio_mport *mport, | |||
268 | mport->inb_msg[mbox].mcback = minb; | 268 | mport->inb_msg[mbox].mcback = minb; |
269 | 269 | ||
270 | rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); | 270 | rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); |
271 | if (rc) { | ||
272 | mport->inb_msg[mbox].mcback = NULL; | ||
273 | mport->inb_msg[mbox].res = NULL; | ||
274 | release_resource(res); | ||
275 | kfree(res); | ||
276 | } | ||
271 | } else | 277 | } else |
272 | rc = -ENOMEM; | 278 | rc = -ENOMEM; |
273 | 279 | ||
@@ -285,13 +291,22 @@ int rio_request_inb_mbox(struct rio_mport *mport, | |||
285 | */ | 291 | */ |
286 | int rio_release_inb_mbox(struct rio_mport *mport, int mbox) | 292 | int rio_release_inb_mbox(struct rio_mport *mport, int mbox) |
287 | { | 293 | { |
288 | if (mport->ops->close_inb_mbox) { | 294 | int rc; |
289 | mport->ops->close_inb_mbox(mport, mbox); | ||
290 | 295 | ||
291 | /* Release the mailbox resource */ | 296 | if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res) |
292 | return release_resource(mport->inb_msg[mbox].res); | 297 | return -EINVAL; |
293 | } else | 298 | |
294 | return -ENOSYS; | 299 | mport->ops->close_inb_mbox(mport, mbox); |
300 | mport->inb_msg[mbox].mcback = NULL; | ||
301 | |||
302 | rc = release_resource(mport->inb_msg[mbox].res); | ||
303 | if (rc) | ||
304 | return rc; | ||
305 | |||
306 | kfree(mport->inb_msg[mbox].res); | ||
307 | mport->inb_msg[mbox].res = NULL; | ||
308 | |||
309 | return 0; | ||
295 | } | 310 | } |
296 | 311 | ||
297 | /** | 312 | /** |
@@ -336,6 +351,12 @@ int rio_request_outb_mbox(struct rio_mport *mport, | |||
336 | mport->outb_msg[mbox].mcback = moutb; | 351 | mport->outb_msg[mbox].mcback = moutb; |
337 | 352 | ||
338 | rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); | 353 | rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); |
354 | if (rc) { | ||
355 | mport->outb_msg[mbox].mcback = NULL; | ||
356 | mport->outb_msg[mbox].res = NULL; | ||
357 | release_resource(res); | ||
358 | kfree(res); | ||
359 | } | ||
339 | } else | 360 | } else |
340 | rc = -ENOMEM; | 361 | rc = -ENOMEM; |
341 | 362 | ||
@@ -353,13 +374,22 @@ int rio_request_outb_mbox(struct rio_mport *mport, | |||
353 | */ | 374 | */ |
354 | int rio_release_outb_mbox(struct rio_mport *mport, int mbox) | 375 | int rio_release_outb_mbox(struct rio_mport *mport, int mbox) |
355 | { | 376 | { |
356 | if (mport->ops->close_outb_mbox) { | 377 | int rc; |
357 | mport->ops->close_outb_mbox(mport, mbox); | ||
358 | 378 | ||
359 | /* Release the mailbox resource */ | 379 | if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res) |
360 | return release_resource(mport->outb_msg[mbox].res); | 380 | return -EINVAL; |
361 | } else | 381 | |
362 | return -ENOSYS; | 382 | mport->ops->close_outb_mbox(mport, mbox); |
383 | mport->outb_msg[mbox].mcback = NULL; | ||
384 | |||
385 | rc = release_resource(mport->outb_msg[mbox].res); | ||
386 | if (rc) | ||
387 | return rc; | ||
388 | |||
389 | kfree(mport->outb_msg[mbox].res); | ||
390 | mport->outb_msg[mbox].res = NULL; | ||
391 | |||
392 | return 0; | ||
363 | } | 393 | } |
364 | 394 | ||
365 | /** | 395 | /** |
@@ -756,10 +786,11 @@ EXPORT_SYMBOL_GPL(rio_unmap_outb_region); | |||
756 | * @local: Indicate a local master port or remote device access | 786 | * @local: Indicate a local master port or remote device access |
757 | * @destid: Destination ID of the device | 787 | * @destid: Destination ID of the device |
758 | * @hopcount: Number of switch hops to the device | 788 | * @hopcount: Number of switch hops to the device |
789 | * @rmap: pointer to location to store register map type info | ||
759 | */ | 790 | */ |
760 | u32 | 791 | u32 |
761 | rio_mport_get_physefb(struct rio_mport *port, int local, | 792 | rio_mport_get_physefb(struct rio_mport *port, int local, |
762 | u16 destid, u8 hopcount) | 793 | u16 destid, u8 hopcount, u32 *rmap) |
763 | { | 794 | { |
764 | u32 ext_ftr_ptr; | 795 | u32 ext_ftr_ptr; |
765 | u32 ftr_header; | 796 | u32 ftr_header; |
@@ -777,14 +808,21 @@ rio_mport_get_physefb(struct rio_mport *port, int local, | |||
777 | ftr_header = RIO_GET_BLOCK_ID(ftr_header); | 808 | ftr_header = RIO_GET_BLOCK_ID(ftr_header); |
778 | switch (ftr_header) { | 809 | switch (ftr_header) { |
779 | 810 | ||
780 | case RIO_EFB_SER_EP_ID_V13P: | ||
781 | case RIO_EFB_SER_EP_REC_ID_V13P: | ||
782 | case RIO_EFB_SER_EP_FREE_ID_V13P: | ||
783 | case RIO_EFB_SER_EP_ID: | 811 | case RIO_EFB_SER_EP_ID: |
784 | case RIO_EFB_SER_EP_REC_ID: | 812 | case RIO_EFB_SER_EP_REC_ID: |
785 | case RIO_EFB_SER_EP_FREE_ID: | 813 | case RIO_EFB_SER_EP_FREE_ID: |
786 | case RIO_EFB_SER_EP_FREC_ID: | 814 | case RIO_EFB_SER_EP_M1_ID: |
815 | case RIO_EFB_SER_EP_SW_M1_ID: | ||
816 | case RIO_EFB_SER_EPF_M1_ID: | ||
817 | case RIO_EFB_SER_EPF_SW_M1_ID: | ||
818 | *rmap = 1; | ||
819 | return ext_ftr_ptr; | ||
787 | 820 | ||
821 | case RIO_EFB_SER_EP_M2_ID: | ||
822 | case RIO_EFB_SER_EP_SW_M2_ID: | ||
823 | case RIO_EFB_SER_EPF_M2_ID: | ||
824 | case RIO_EFB_SER_EPF_SW_M2_ID: | ||
825 | *rmap = 2; | ||
788 | return ext_ftr_ptr; | 826 | return ext_ftr_ptr; |
789 | 827 | ||
790 | default: | 828 | default: |
@@ -843,16 +881,16 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) | |||
843 | u32 regval; | 881 | u32 regval; |
844 | 882 | ||
845 | rio_read_config_32(rdev, | 883 | rio_read_config_32(rdev, |
846 | rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), | 884 | RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), |
847 | ®val); | 885 | ®val); |
848 | if (lock) | 886 | if (lock) |
849 | regval |= RIO_PORT_N_CTL_LOCKOUT; | 887 | regval |= RIO_PORT_N_CTL_LOCKOUT; |
850 | else | 888 | else |
851 | regval &= ~RIO_PORT_N_CTL_LOCKOUT; | 889 | regval &= ~RIO_PORT_N_CTL_LOCKOUT; |
852 | 890 | ||
853 | rio_write_config_32(rdev, | 891 | rio_write_config_32(rdev, |
854 | rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), | 892 | RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), |
855 | regval); | 893 | regval); |
856 | return 0; | 894 | return 0; |
857 | } | 895 | } |
858 | EXPORT_SYMBOL_GPL(rio_set_port_lockout); | 896 | EXPORT_SYMBOL_GPL(rio_set_port_lockout); |
@@ -876,6 +914,7 @@ int rio_enable_rx_tx_port(struct rio_mport *port, | |||
876 | #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS | 914 | #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS |
877 | u32 regval; | 915 | u32 regval; |
878 | u32 ext_ftr_ptr; | 916 | u32 ext_ftr_ptr; |
917 | u32 rmap; | ||
879 | 918 | ||
880 | /* | 919 | /* |
881 | * enable rx input tx output port | 920 | * enable rx input tx output port |
@@ -883,34 +922,29 @@ int rio_enable_rx_tx_port(struct rio_mport *port, | |||
883 | pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " | 922 | pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " |
884 | "%d, port_num = %d)\n", local, destid, hopcount, port_num); | 923 | "%d, port_num = %d)\n", local, destid, hopcount, port_num); |
885 | 924 | ||
886 | ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); | 925 | ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, |
926 | hopcount, &rmap); | ||
887 | 927 | ||
888 | if (local) { | 928 | if (local) { |
889 | rio_local_read_config_32(port, ext_ftr_ptr + | 929 | rio_local_read_config_32(port, |
890 | RIO_PORT_N_CTL_CSR(0), | 930 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), |
891 | ®val); | 931 | ®val); |
892 | } else { | 932 | } else { |
893 | if (rio_mport_read_config_32(port, destid, hopcount, | 933 | if (rio_mport_read_config_32(port, destid, hopcount, |
894 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) | 934 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), |
935 | ®val) < 0) | ||
895 | return -EIO; | 936 | return -EIO; |
896 | } | 937 | } |
897 | 938 | ||
898 | if (regval & RIO_PORT_N_CTL_P_TYP_SER) { | 939 | regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX; |
899 | /* serial */ | ||
900 | regval = regval | RIO_PORT_N_CTL_EN_RX_SER | ||
901 | | RIO_PORT_N_CTL_EN_TX_SER; | ||
902 | } else { | ||
903 | /* parallel */ | ||
904 | regval = regval | RIO_PORT_N_CTL_EN_RX_PAR | ||
905 | | RIO_PORT_N_CTL_EN_TX_PAR; | ||
906 | } | ||
907 | 940 | ||
908 | if (local) { | 941 | if (local) { |
909 | rio_local_write_config_32(port, ext_ftr_ptr + | 942 | rio_local_write_config_32(port, |
910 | RIO_PORT_N_CTL_CSR(0), regval); | 943 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval); |
911 | } else { | 944 | } else { |
912 | if (rio_mport_write_config_32(port, destid, hopcount, | 945 | if (rio_mport_write_config_32(port, destid, hopcount, |
913 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) | 946 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), |
947 | regval) < 0) | ||
914 | return -EIO; | 948 | return -EIO; |
915 | } | 949 | } |
916 | #endif | 950 | #endif |
@@ -1012,14 +1046,14 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) | |||
1012 | /* Read from link maintenance response register | 1046 | /* Read from link maintenance response register |
1013 | * to clear valid bit */ | 1047 | * to clear valid bit */ |
1014 | rio_read_config_32(rdev, | 1048 | rio_read_config_32(rdev, |
1015 | rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), | 1049 | RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), |
1016 | ®val); | 1050 | ®val); |
1017 | udelay(50); | 1051 | udelay(50); |
1018 | } | 1052 | } |
1019 | 1053 | ||
1020 | /* Issue Input-status command */ | 1054 | /* Issue Input-status command */ |
1021 | rio_write_config_32(rdev, | 1055 | rio_write_config_32(rdev, |
1022 | rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum), | 1056 | RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum), |
1023 | RIO_MNT_REQ_CMD_IS); | 1057 | RIO_MNT_REQ_CMD_IS); |
1024 | 1058 | ||
1025 | /* Exit if the response is not expected */ | 1059 | /* Exit if the response is not expected */ |
@@ -1030,7 +1064,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) | |||
1030 | while (checkcount--) { | 1064 | while (checkcount--) { |
1031 | udelay(50); | 1065 | udelay(50); |
1032 | rio_read_config_32(rdev, | 1066 | rio_read_config_32(rdev, |
1033 | rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), | 1067 | RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), |
1034 | ®val); | 1068 | ®val); |
1035 | if (regval & RIO_PORT_N_MNT_RSP_RVAL) { | 1069 | if (regval & RIO_PORT_N_MNT_RSP_RVAL) { |
1036 | *lnkresp = regval; | 1070 | *lnkresp = regval; |
@@ -1046,6 +1080,13 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) | |||
1046 | * @rdev: Pointer to RIO device control structure | 1080 | * @rdev: Pointer to RIO device control structure |
1047 | * @pnum: Switch port number to clear errors | 1081 | * @pnum: Switch port number to clear errors |
1048 | * @err_status: port error status (if 0 reads register from device) | 1082 | * @err_status: port error status (if 0 reads register from device) |
1083 | * | ||
1084 | * TODO: Currently this routine is not compatible with recovery process | ||
1085 | * specified for idt_gen3 RapidIO switch devices. It has to be reviewed | ||
1086 | * to implement universal recovery process that is compatible full range | ||
1087 | * off available devices. | ||
1088 | * IDT gen3 switch driver now implements HW-specific error handler that | ||
1089 | * issues soft port reset to the port to reset ERR_STOP bits and ackIDs. | ||
1049 | */ | 1090 | */ |
1050 | static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) | 1091 | static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) |
1051 | { | 1092 | { |
@@ -1055,10 +1096,10 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) | |||
1055 | 1096 | ||
1056 | if (err_status == 0) | 1097 | if (err_status == 0) |
1057 | rio_read_config_32(rdev, | 1098 | rio_read_config_32(rdev, |
1058 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), | 1099 | RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), |
1059 | &err_status); | 1100 | &err_status); |
1060 | 1101 | ||
1061 | if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) { | 1102 | if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) { |
1062 | pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); | 1103 | pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); |
1063 | /* | 1104 | /* |
1064 | * Send a Link-Request/Input-Status control symbol | 1105 | * Send a Link-Request/Input-Status control symbol |
@@ -1073,7 +1114,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) | |||
1073 | far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; | 1114 | far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; |
1074 | far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; | 1115 | far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; |
1075 | rio_read_config_32(rdev, | 1116 | rio_read_config_32(rdev, |
1076 | rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), | 1117 | RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), |
1077 | ®val); | 1118 | ®val); |
1078 | pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); | 1119 | pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); |
1079 | near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; | 1120 | near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; |
@@ -1091,43 +1132,43 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) | |||
1091 | * far inbound. | 1132 | * far inbound. |
1092 | */ | 1133 | */ |
1093 | rio_write_config_32(rdev, | 1134 | rio_write_config_32(rdev, |
1094 | rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), | 1135 | RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), |
1095 | (near_ackid << 24) | | 1136 | (near_ackid << 24) | |
1096 | (far_ackid << 8) | far_ackid); | 1137 | (far_ackid << 8) | far_ackid); |
1097 | /* Align far outstanding/outbound ackIDs with | 1138 | /* Align far outstanding/outbound ackIDs with |
1098 | * near inbound. | 1139 | * near inbound. |
1099 | */ | 1140 | */ |
1100 | far_ackid++; | 1141 | far_ackid++; |
1101 | if (nextdev) | 1142 | if (!nextdev) { |
1102 | rio_write_config_32(nextdev, | 1143 | pr_debug("RIO_EM: nextdev pointer == NULL\n"); |
1103 | nextdev->phys_efptr + | 1144 | goto rd_err; |
1104 | RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)), | 1145 | } |
1105 | (far_ackid << 24) | | 1146 | |
1106 | (near_ackid << 8) | near_ackid); | 1147 | rio_write_config_32(nextdev, |
1107 | else | 1148 | RIO_DEV_PORT_N_ACK_STS_CSR(nextdev, |
1108 | pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n"); | 1149 | RIO_GET_PORT_NUM(nextdev->swpinfo)), |
1150 | (far_ackid << 24) | | ||
1151 | (near_ackid << 8) | near_ackid); | ||
1109 | } | 1152 | } |
1110 | rd_err: | 1153 | rd_err: |
1111 | rio_read_config_32(rdev, | 1154 | rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), |
1112 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), | 1155 | &err_status); |
1113 | &err_status); | ||
1114 | pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); | 1156 | pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); |
1115 | } | 1157 | } |
1116 | 1158 | ||
1117 | if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) { | 1159 | if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) { |
1118 | pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); | 1160 | pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); |
1119 | rio_get_input_status(nextdev, | 1161 | rio_get_input_status(nextdev, |
1120 | RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); | 1162 | RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); |
1121 | udelay(50); | 1163 | udelay(50); |
1122 | 1164 | ||
1123 | rio_read_config_32(rdev, | 1165 | rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), |
1124 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), | 1166 | &err_status); |
1125 | &err_status); | ||
1126 | pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); | 1167 | pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); |
1127 | } | 1168 | } |
1128 | 1169 | ||
1129 | return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | | 1170 | return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | |
1130 | RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0; | 1171 | RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0; |
1131 | } | 1172 | } |
1132 | 1173 | ||
1133 | /** | 1174 | /** |
@@ -1227,9 +1268,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) | |||
1227 | if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) | 1268 | if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) |
1228 | rdev->rswitch->ops->em_handle(rdev, portnum); | 1269 | rdev->rswitch->ops->em_handle(rdev, portnum); |
1229 | 1270 | ||
1230 | rio_read_config_32(rdev, | 1271 | rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), |
1231 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), | 1272 | &err_status); |
1232 | &err_status); | ||
1233 | pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); | 1273 | pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); |
1234 | 1274 | ||
1235 | if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { | 1275 | if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { |
@@ -1246,8 +1286,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) | |||
1246 | * Depending on the link partner state, two attempts | 1286 | * Depending on the link partner state, two attempts |
1247 | * may be needed for successful recovery. | 1287 | * may be needed for successful recovery. |
1248 | */ | 1288 | */ |
1249 | if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | | 1289 | if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | |
1250 | RIO_PORT_N_ERR_STS_PW_INP_ES)) { | 1290 | RIO_PORT_N_ERR_STS_INP_ES)) { |
1251 | if (rio_clr_err_stopped(rdev, portnum, err_status)) | 1291 | if (rio_clr_err_stopped(rdev, portnum, err_status)) |
1252 | rio_clr_err_stopped(rdev, portnum, 0); | 1292 | rio_clr_err_stopped(rdev, portnum, 0); |
1253 | } | 1293 | } |
@@ -1257,10 +1297,18 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) | |||
1257 | rdev->rswitch->port_ok &= ~(1 << portnum); | 1297 | rdev->rswitch->port_ok &= ~(1 << portnum); |
1258 | rio_set_port_lockout(rdev, portnum, 1); | 1298 | rio_set_port_lockout(rdev, portnum, 1); |
1259 | 1299 | ||
1300 | if (rdev->phys_rmap == 1) { | ||
1260 | rio_write_config_32(rdev, | 1301 | rio_write_config_32(rdev, |
1261 | rdev->phys_efptr + | 1302 | RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum), |
1262 | RIO_PORT_N_ACK_STS_CSR(portnum), | ||
1263 | RIO_PORT_N_ACK_CLEAR); | 1303 | RIO_PORT_N_ACK_CLEAR); |
1304 | } else { | ||
1305 | rio_write_config_32(rdev, | ||
1306 | RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum), | ||
1307 | RIO_PORT_N_OB_ACK_CLEAR); | ||
1308 | rio_write_config_32(rdev, | ||
1309 | RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum), | ||
1310 | 0); | ||
1311 | } | ||
1264 | 1312 | ||
1265 | /* Schedule Extraction Service */ | 1313 | /* Schedule Extraction Service */ |
1266 | pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", | 1314 | pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", |
@@ -1289,9 +1337,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) | |||
1289 | } | 1337 | } |
1290 | 1338 | ||
1291 | /* Clear remaining error bits and Port-Write Pending bit */ | 1339 | /* Clear remaining error bits and Port-Write Pending bit */ |
1292 | rio_write_config_32(rdev, | 1340 | rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), |
1293 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), | 1341 | err_status); |
1294 | err_status); | ||
1295 | 1342 | ||
1296 | return 0; | 1343 | return 0; |
1297 | } | 1344 | } |
@@ -1342,20 +1389,7 @@ EXPORT_SYMBOL_GPL(rio_mport_get_efb); | |||
1342 | * Tell if a device supports a given RapidIO capability. | 1389 | * Tell if a device supports a given RapidIO capability. |
1343 | * Returns the offset of the requested extended feature | 1390 | * Returns the offset of the requested extended feature |
1344 | * block within the device's RIO configuration space or | 1391 | * block within the device's RIO configuration space or |
1345 | * 0 in case the device does not support it. Possible | 1392 | * 0 in case the device does not support it. |
1346 | * values for @ftr: | ||
1347 | * | ||
1348 | * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices | ||
1349 | * | ||
1350 | * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices | ||
1351 | * | ||
1352 | * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices | ||
1353 | * | ||
1354 | * %RIO_EFB_SER_EP_ID LP/Serial EP Devices | ||
1355 | * | ||
1356 | * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices | ||
1357 | * | ||
1358 | * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices | ||
1359 | */ | 1393 | */ |
1360 | u32 | 1394 | u32 |
1361 | rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, | 1395 | rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, |
@@ -1848,7 +1882,9 @@ EXPORT_SYMBOL_GPL(rio_release_dma); | |||
1848 | * Initializes RapidIO capable DMA channel for the specified data transfer. | 1882 | * Initializes RapidIO capable DMA channel for the specified data transfer. |
1849 | * Uses DMA channel private extension to pass information related to remote | 1883 | * Uses DMA channel private extension to pass information related to remote |
1850 | * target RIO device. | 1884 | * target RIO device. |
1851 | * Returns pointer to DMA transaction descriptor or NULL if failed. | 1885 | * |
1886 | * Returns: pointer to DMA transaction descriptor if successful, | ||
1887 | * error-valued pointer or NULL if failed. | ||
1852 | */ | 1888 | */ |
1853 | struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, | 1889 | struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, |
1854 | u16 destid, struct rio_dma_data *data, | 1890 | u16 destid, struct rio_dma_data *data, |
@@ -1883,7 +1919,9 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); | |||
1883 | * Initializes RapidIO capable DMA channel for the specified data transfer. | 1919 | * Initializes RapidIO capable DMA channel for the specified data transfer. |
1884 | * Uses DMA channel private extension to pass information related to remote | 1920 | * Uses DMA channel private extension to pass information related to remote |
1885 | * target RIO device. | 1921 | * target RIO device. |
1886 | * Returns pointer to DMA transaction descriptor or NULL if failed. | 1922 | * |
1923 | * Returns: pointer to DMA transaction descriptor if successful, | ||
1924 | * error-valued pointer or NULL if failed. | ||
1887 | */ | 1925 | */ |
1888 | struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, | 1926 | struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, |
1889 | struct dma_chan *dchan, struct rio_dma_data *data, | 1927 | struct dma_chan *dchan, struct rio_dma_data *data, |
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index 625d09add001..9796b3fee70d 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h | |||
@@ -22,7 +22,7 @@ | |||
22 | extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, | 22 | extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, |
23 | u8 hopcount, int ftr); | 23 | u8 hopcount, int ftr); |
24 | extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, | 24 | extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, |
25 | u16 destid, u8 hopcount); | 25 | u16 destid, u8 hopcount, u32 *rmap); |
26 | extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, | 26 | extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, |
27 | u8 hopcount, u32 from); | 27 | u8 hopcount, u32 from); |
28 | extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, | 28 | extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, |
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c new file mode 100644 index 000000000000..cecc15a880de --- /dev/null +++ b/drivers/rapidio/rio_cm.c | |||
@@ -0,0 +1,2366 @@ | |||
1 | /* | ||
2 | * rio_cm - RapidIO Channelized Messaging Driver | ||
3 | * | ||
4 | * Copyright 2013-2016 Integrated Device Technology, Inc. | ||
5 | * Copyright (c) 2015, Prodrive Technologies | ||
6 | * Copyright (c) 2015, RapidIO Trade Association | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, | ||
14 | * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF | ||
15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE | ||
16 | * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS. | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/rio.h> | ||
25 | #include <linux/rio_drv.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/idr.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/cdev.h> | ||
30 | #include <linux/fs.h> | ||
31 | #include <linux/poll.h> | ||
32 | #include <linux/reboot.h> | ||
33 | #include <linux/bitops.h> | ||
34 | #include <linux/printk.h> | ||
35 | #include <linux/rio_cm_cdev.h> | ||
36 | |||
37 | #define DRV_NAME "rio_cm" | ||
38 | #define DRV_VERSION "1.0.0" | ||
39 | #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" | ||
40 | #define DRV_DESC "RapidIO Channelized Messaging Driver" | ||
41 | #define DEV_NAME "rio_cm" | ||
42 | |||
43 | /* Debug output filtering masks */ | ||
44 | enum { | ||
45 | DBG_NONE = 0, | ||
46 | DBG_INIT = BIT(0), /* driver init */ | ||
47 | DBG_EXIT = BIT(1), /* driver exit */ | ||
48 | DBG_MPORT = BIT(2), /* mport add/remove */ | ||
49 | DBG_RDEV = BIT(3), /* RapidIO device add/remove */ | ||
50 | DBG_CHOP = BIT(4), /* channel operations */ | ||
51 | DBG_WAIT = BIT(5), /* waiting for events */ | ||
52 | DBG_TX = BIT(6), /* message TX */ | ||
53 | DBG_TX_EVENT = BIT(7), /* message TX event */ | ||
54 | DBG_RX_DATA = BIT(8), /* inbound data messages */ | ||
55 | DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */ | ||
56 | DBG_ALL = ~0, | ||
57 | }; | ||
58 | |||
59 | #ifdef DEBUG | ||
60 | #define riocm_debug(level, fmt, arg...) \ | ||
61 | do { \ | ||
62 | if (DBG_##level & dbg_level) \ | ||
63 | pr_debug(DRV_NAME ": %s " fmt "\n", \ | ||
64 | __func__, ##arg); \ | ||
65 | } while (0) | ||
66 | #else | ||
67 | #define riocm_debug(level, fmt, arg...) \ | ||
68 | no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) | ||
69 | #endif | ||
70 | |||
71 | #define riocm_warn(fmt, arg...) \ | ||
72 | pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) | ||
73 | |||
74 | #define riocm_error(fmt, arg...) \ | ||
75 | pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) | ||
76 | |||
77 | |||
78 | static int cmbox = 1; | ||
79 | module_param(cmbox, int, S_IRUGO); | ||
80 | MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)"); | ||
81 | |||
82 | static int chstart = 256; | ||
83 | module_param(chstart, int, S_IRUGO); | ||
84 | MODULE_PARM_DESC(chstart, | ||
85 | "Start channel number for dynamic allocation (default 256)"); | ||
86 | |||
87 | #ifdef DEBUG | ||
88 | static u32 dbg_level = DBG_NONE; | ||
89 | module_param(dbg_level, uint, S_IWUSR | S_IRUGO); | ||
90 | MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); | ||
91 | #endif | ||
92 | |||
93 | MODULE_AUTHOR(DRV_AUTHOR); | ||
94 | MODULE_DESCRIPTION(DRV_DESC); | ||
95 | MODULE_LICENSE("GPL"); | ||
96 | MODULE_VERSION(DRV_VERSION); | ||
97 | |||
98 | #define RIOCM_TX_RING_SIZE 128 | ||
99 | #define RIOCM_RX_RING_SIZE 128 | ||
100 | #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */ | ||
101 | |||
102 | #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */ | ||
103 | #define RIOCM_CHNUM_AUTO 0 | ||
104 | #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */ | ||
105 | |||
106 | enum rio_cm_state { | ||
107 | RIO_CM_IDLE, | ||
108 | RIO_CM_CONNECT, | ||
109 | RIO_CM_CONNECTED, | ||
110 | RIO_CM_DISCONNECT, | ||
111 | RIO_CM_CHAN_BOUND, | ||
112 | RIO_CM_LISTEN, | ||
113 | RIO_CM_DESTROYING, | ||
114 | }; | ||
115 | |||
116 | enum rio_cm_pkt_type { | ||
117 | RIO_CM_SYS = 0xaa, | ||
118 | RIO_CM_CHAN = 0x55, | ||
119 | }; | ||
120 | |||
121 | enum rio_cm_chop { | ||
122 | CM_CONN_REQ, | ||
123 | CM_CONN_ACK, | ||
124 | CM_CONN_CLOSE, | ||
125 | CM_DATA_MSG, | ||
126 | }; | ||
127 | |||
128 | struct rio_ch_base_bhdr { | ||
129 | u32 src_id; | ||
130 | u32 dst_id; | ||
131 | #define RIO_HDR_LETTER_MASK 0xffff0000 | ||
132 | #define RIO_HDR_MBOX_MASK 0x0000ffff | ||
133 | u8 src_mbox; | ||
134 | u8 dst_mbox; | ||
135 | u8 type; | ||
136 | } __attribute__((__packed__)); | ||
137 | |||
138 | struct rio_ch_chan_hdr { | ||
139 | struct rio_ch_base_bhdr bhdr; | ||
140 | u8 ch_op; | ||
141 | u16 dst_ch; | ||
142 | u16 src_ch; | ||
143 | u16 msg_len; | ||
144 | u16 rsrvd; | ||
145 | } __attribute__((__packed__)); | ||
146 | |||
147 | struct tx_req { | ||
148 | struct list_head node; | ||
149 | struct rio_dev *rdev; | ||
150 | void *buffer; | ||
151 | size_t len; | ||
152 | }; | ||
153 | |||
154 | struct cm_dev { | ||
155 | struct list_head list; | ||
156 | struct rio_mport *mport; | ||
157 | void *rx_buf[RIOCM_RX_RING_SIZE]; | ||
158 | int rx_slots; | ||
159 | struct mutex rx_lock; | ||
160 | |||
161 | void *tx_buf[RIOCM_TX_RING_SIZE]; | ||
162 | int tx_slot; | ||
163 | int tx_cnt; | ||
164 | int tx_ack_slot; | ||
165 | struct list_head tx_reqs; | ||
166 | spinlock_t tx_lock; | ||
167 | |||
168 | struct list_head peers; | ||
169 | u32 npeers; | ||
170 | struct workqueue_struct *rx_wq; | ||
171 | struct work_struct rx_work; | ||
172 | }; | ||
173 | |||
174 | struct chan_rx_ring { | ||
175 | void *buf[RIOCM_RX_RING_SIZE]; | ||
176 | int head; | ||
177 | int tail; | ||
178 | int count; | ||
179 | |||
180 | /* Tracking RX buffers reported to upper level */ | ||
181 | void *inuse[RIOCM_RX_RING_SIZE]; | ||
182 | int inuse_cnt; | ||
183 | }; | ||
184 | |||
185 | struct rio_channel { | ||
186 | u16 id; /* local channel ID */ | ||
187 | struct kref ref; /* channel refcount */ | ||
188 | struct file *filp; | ||
189 | struct cm_dev *cmdev; /* associated CM device object */ | ||
190 | struct rio_dev *rdev; /* remote RapidIO device */ | ||
191 | enum rio_cm_state state; | ||
192 | int error; | ||
193 | spinlock_t lock; | ||
194 | void *context; | ||
195 | u32 loc_destid; /* local destID */ | ||
196 | u32 rem_destid; /* remote destID */ | ||
197 | u16 rem_channel; /* remote channel ID */ | ||
198 | struct list_head accept_queue; | ||
199 | struct list_head ch_node; | ||
200 | struct completion comp; | ||
201 | struct completion comp_close; | ||
202 | struct chan_rx_ring rx_ring; | ||
203 | }; | ||
204 | |||
205 | struct cm_peer { | ||
206 | struct list_head node; | ||
207 | struct rio_dev *rdev; | ||
208 | }; | ||
209 | |||
210 | struct rio_cm_work { | ||
211 | struct work_struct work; | ||
212 | struct cm_dev *cm; | ||
213 | void *data; | ||
214 | }; | ||
215 | |||
216 | struct conn_req { | ||
217 | struct list_head node; | ||
218 | u32 destid; /* requester destID */ | ||
219 | u16 chan; /* requester channel ID */ | ||
220 | struct cm_dev *cmdev; | ||
221 | }; | ||
222 | |||
223 | /* | ||
224 | * A channel_dev structure represents a CM_CDEV | ||
225 | * @cdev Character device | ||
226 | * @dev Associated device object | ||
227 | */ | ||
228 | struct channel_dev { | ||
229 | struct cdev cdev; | ||
230 | struct device *dev; | ||
231 | }; | ||
232 | |||
233 | static struct rio_channel *riocm_ch_alloc(u16 ch_num); | ||
234 | static void riocm_ch_free(struct kref *ref); | ||
235 | static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, | ||
236 | void *buffer, size_t len); | ||
237 | static int riocm_ch_close(struct rio_channel *ch); | ||
238 | |||
239 | static DEFINE_SPINLOCK(idr_lock); | ||
240 | static DEFINE_IDR(ch_idr); | ||
241 | |||
242 | static LIST_HEAD(cm_dev_list); | ||
243 | static DECLARE_RWSEM(rdev_sem); | ||
244 | |||
245 | static struct class *dev_class; | ||
246 | static unsigned int dev_major; | ||
247 | static unsigned int dev_minor_base; | ||
248 | static dev_t dev_number; | ||
249 | static struct channel_dev riocm_cdev; | ||
250 | |||
251 | #define is_msg_capable(src_ops, dst_ops) \ | ||
252 | ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ | ||
253 | (dst_ops & RIO_DST_OPS_DATA_MSG)) | ||
254 | #define dev_cm_capable(dev) \ | ||
255 | is_msg_capable(dev->src_ops, dev->dst_ops) | ||
256 | |||
257 | static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) | ||
258 | { | ||
259 | int ret; | ||
260 | |||
261 | spin_lock_bh(&ch->lock); | ||
262 | ret = (ch->state == cmp); | ||
263 | spin_unlock_bh(&ch->lock); | ||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | static int riocm_cmp_exch(struct rio_channel *ch, | ||
268 | enum rio_cm_state cmp, enum rio_cm_state exch) | ||
269 | { | ||
270 | int ret; | ||
271 | |||
272 | spin_lock_bh(&ch->lock); | ||
273 | ret = (ch->state == cmp); | ||
274 | if (ret) | ||
275 | ch->state = exch; | ||
276 | spin_unlock_bh(&ch->lock); | ||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | static enum rio_cm_state riocm_exch(struct rio_channel *ch, | ||
281 | enum rio_cm_state exch) | ||
282 | { | ||
283 | enum rio_cm_state old; | ||
284 | |||
285 | spin_lock_bh(&ch->lock); | ||
286 | old = ch->state; | ||
287 | ch->state = exch; | ||
288 | spin_unlock_bh(&ch->lock); | ||
289 | return old; | ||
290 | } | ||
291 | |||
292 | static struct rio_channel *riocm_get_channel(u16 nr) | ||
293 | { | ||
294 | struct rio_channel *ch; | ||
295 | |||
296 | spin_lock_bh(&idr_lock); | ||
297 | ch = idr_find(&ch_idr, nr); | ||
298 | if (ch) | ||
299 | kref_get(&ch->ref); | ||
300 | spin_unlock_bh(&idr_lock); | ||
301 | return ch; | ||
302 | } | ||
303 | |||
304 | static void riocm_put_channel(struct rio_channel *ch) | ||
305 | { | ||
306 | kref_put(&ch->ref, riocm_ch_free); | ||
307 | } | ||
308 | |||
309 | static void *riocm_rx_get_msg(struct cm_dev *cm) | ||
310 | { | ||
311 | void *msg; | ||
312 | int i; | ||
313 | |||
314 | msg = rio_get_inb_message(cm->mport, cmbox); | ||
315 | if (msg) { | ||
316 | for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { | ||
317 | if (cm->rx_buf[i] == msg) { | ||
318 | cm->rx_buf[i] = NULL; | ||
319 | cm->rx_slots++; | ||
320 | break; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | if (i == RIOCM_RX_RING_SIZE) | ||
325 | riocm_warn("no record for buffer 0x%p", msg); | ||
326 | } | ||
327 | |||
328 | return msg; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * riocm_rx_fill - fills a ring of receive buffers for given cm device | ||
333 | * @cm: cm_dev object | ||
334 | * @nent: max number of entries to fill | ||
335 | * | ||
336 | * Returns: none | ||
337 | */ | ||
338 | static void riocm_rx_fill(struct cm_dev *cm, int nent) | ||
339 | { | ||
340 | int i; | ||
341 | |||
342 | if (cm->rx_slots == 0) | ||
343 | return; | ||
344 | |||
345 | for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { | ||
346 | if (cm->rx_buf[i] == NULL) { | ||
347 | cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); | ||
348 | if (cm->rx_buf[i] == NULL) | ||
349 | break; | ||
350 | rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); | ||
351 | cm->rx_slots--; | ||
352 | nent--; | ||
353 | } | ||
354 | } | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * riocm_rx_free - frees all receive buffers associated with given cm device | ||
359 | * @cm: cm_dev object | ||
360 | * | ||
361 | * Returns: none | ||
362 | */ | ||
363 | static void riocm_rx_free(struct cm_dev *cm) | ||
364 | { | ||
365 | int i; | ||
366 | |||
367 | for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { | ||
368 | if (cm->rx_buf[i] != NULL) { | ||
369 | kfree(cm->rx_buf[i]); | ||
370 | cm->rx_buf[i] = NULL; | ||
371 | } | ||
372 | } | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * riocm_req_handler - connection request handler | ||
377 | * @cm: cm_dev object | ||
378 | * @req_data: pointer to the request packet | ||
379 | * | ||
380 | * Returns: 0 if success, or | ||
381 | * -EINVAL if channel is not in correct state, | ||
382 | * -ENODEV if cannot find a channel with specified ID, | ||
383 | * -ENOMEM if unable to allocate memory to store the request | ||
384 | */ | ||
385 | static int riocm_req_handler(struct cm_dev *cm, void *req_data) | ||
386 | { | ||
387 | struct rio_channel *ch; | ||
388 | struct conn_req *req; | ||
389 | struct rio_ch_chan_hdr *hh = req_data; | ||
390 | u16 chnum; | ||
391 | |||
392 | chnum = ntohs(hh->dst_ch); | ||
393 | |||
394 | ch = riocm_get_channel(chnum); | ||
395 | |||
396 | if (!ch) | ||
397 | return -ENODEV; | ||
398 | |||
399 | if (ch->state != RIO_CM_LISTEN) { | ||
400 | riocm_debug(RX_CMD, "channel %d is not in listen state", chnum); | ||
401 | riocm_put_channel(ch); | ||
402 | return -EINVAL; | ||
403 | } | ||
404 | |||
405 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
406 | if (!req) { | ||
407 | riocm_put_channel(ch); | ||
408 | return -ENOMEM; | ||
409 | } | ||
410 | |||
411 | req->destid = ntohl(hh->bhdr.src_id); | ||
412 | req->chan = ntohs(hh->src_ch); | ||
413 | req->cmdev = cm; | ||
414 | |||
415 | spin_lock_bh(&ch->lock); | ||
416 | list_add_tail(&req->node, &ch->accept_queue); | ||
417 | spin_unlock_bh(&ch->lock); | ||
418 | complete(&ch->comp); | ||
419 | riocm_put_channel(ch); | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * riocm_resp_handler - response to connection request handler | ||
426 | * @resp_data: pointer to the response packet | ||
427 | * | ||
428 | * Returns: 0 if success, or | ||
429 | * -EINVAL if channel is not in correct state, | ||
430 | * -ENODEV if cannot find a channel with specified ID, | ||
431 | */ | ||
432 | static int riocm_resp_handler(void *resp_data) | ||
433 | { | ||
434 | struct rio_channel *ch; | ||
435 | struct rio_ch_chan_hdr *hh = resp_data; | ||
436 | u16 chnum; | ||
437 | |||
438 | chnum = ntohs(hh->dst_ch); | ||
439 | ch = riocm_get_channel(chnum); | ||
440 | if (!ch) | ||
441 | return -ENODEV; | ||
442 | |||
443 | if (ch->state != RIO_CM_CONNECT) { | ||
444 | riocm_put_channel(ch); | ||
445 | return -EINVAL; | ||
446 | } | ||
447 | |||
448 | riocm_exch(ch, RIO_CM_CONNECTED); | ||
449 | ch->rem_channel = ntohs(hh->src_ch); | ||
450 | complete(&ch->comp); | ||
451 | riocm_put_channel(ch); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * riocm_close_handler - channel close request handler | ||
458 | * @req_data: pointer to the request packet | ||
459 | * | ||
460 | * Returns: 0 if success, or | ||
461 | * -ENODEV if cannot find a channel with specified ID, | ||
462 | * + error codes returned by riocm_ch_close. | ||
463 | */ | ||
464 | static int riocm_close_handler(void *data) | ||
465 | { | ||
466 | struct rio_channel *ch; | ||
467 | struct rio_ch_chan_hdr *hh = data; | ||
468 | int ret; | ||
469 | |||
470 | riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch)); | ||
471 | |||
472 | spin_lock_bh(&idr_lock); | ||
473 | ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); | ||
474 | if (!ch) { | ||
475 | spin_unlock_bh(&idr_lock); | ||
476 | return -ENODEV; | ||
477 | } | ||
478 | idr_remove(&ch_idr, ch->id); | ||
479 | spin_unlock_bh(&idr_lock); | ||
480 | |||
481 | riocm_exch(ch, RIO_CM_DISCONNECT); | ||
482 | |||
483 | ret = riocm_ch_close(ch); | ||
484 | if (ret) | ||
485 | riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret); | ||
486 | |||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * rio_cm_handler - function that services request (non-data) packets | ||
492 | * @cm: cm_dev object | ||
493 | * @data: pointer to the packet | ||
494 | */ | ||
495 | static void rio_cm_handler(struct cm_dev *cm, void *data) | ||
496 | { | ||
497 | struct rio_ch_chan_hdr *hdr; | ||
498 | |||
499 | if (!rio_mport_is_running(cm->mport)) | ||
500 | goto out; | ||
501 | |||
502 | hdr = data; | ||
503 | |||
504 | riocm_debug(RX_CMD, "OP=%x for ch=%d from %d", | ||
505 | hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch)); | ||
506 | |||
507 | switch (hdr->ch_op) { | ||
508 | case CM_CONN_REQ: | ||
509 | riocm_req_handler(cm, data); | ||
510 | break; | ||
511 | case CM_CONN_ACK: | ||
512 | riocm_resp_handler(data); | ||
513 | break; | ||
514 | case CM_CONN_CLOSE: | ||
515 | riocm_close_handler(data); | ||
516 | break; | ||
517 | default: | ||
518 | riocm_error("Invalid packet header"); | ||
519 | break; | ||
520 | } | ||
521 | out: | ||
522 | kfree(data); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * rio_rx_data_handler - received data packet handler | ||
527 | * @cm: cm_dev object | ||
528 | * @buf: data packet | ||
529 | * | ||
530 | * Returns: 0 if success, or | ||
531 | * -ENODEV if cannot find a channel with specified ID, | ||
532 | * -EIO if channel is not in CONNECTED state, | ||
533 | * -ENOMEM if channel RX queue is full (packet discarded) | ||
534 | */ | ||
535 | static int rio_rx_data_handler(struct cm_dev *cm, void *buf) | ||
536 | { | ||
537 | struct rio_ch_chan_hdr *hdr; | ||
538 | struct rio_channel *ch; | ||
539 | |||
540 | hdr = buf; | ||
541 | |||
542 | riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch)); | ||
543 | |||
544 | ch = riocm_get_channel(ntohs(hdr->dst_ch)); | ||
545 | if (!ch) { | ||
546 | /* Discard data message for non-existing channel */ | ||
547 | kfree(buf); | ||
548 | return -ENODEV; | ||
549 | } | ||
550 | |||
551 | /* Place pointer to the buffer into channel's RX queue */ | ||
552 | spin_lock(&ch->lock); | ||
553 | |||
554 | if (ch->state != RIO_CM_CONNECTED) { | ||
555 | /* Channel is not ready to receive data, discard a packet */ | ||
556 | riocm_debug(RX_DATA, "ch=%d is in wrong state=%d", | ||
557 | ch->id, ch->state); | ||
558 | spin_unlock(&ch->lock); | ||
559 | kfree(buf); | ||
560 | riocm_put_channel(ch); | ||
561 | return -EIO; | ||
562 | } | ||
563 | |||
564 | if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { | ||
565 | /* If RX ring is full, discard a packet */ | ||
566 | riocm_debug(RX_DATA, "ch=%d is full", ch->id); | ||
567 | spin_unlock(&ch->lock); | ||
568 | kfree(buf); | ||
569 | riocm_put_channel(ch); | ||
570 | return -ENOMEM; | ||
571 | } | ||
572 | |||
573 | ch->rx_ring.buf[ch->rx_ring.head] = buf; | ||
574 | ch->rx_ring.head++; | ||
575 | ch->rx_ring.count++; | ||
576 | ch->rx_ring.head %= RIOCM_RX_RING_SIZE; | ||
577 | |||
578 | complete(&ch->comp); | ||
579 | |||
580 | spin_unlock(&ch->lock); | ||
581 | riocm_put_channel(ch); | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | /* | ||
587 | * rio_ibmsg_handler - inbound message packet handler | ||
588 | */ | ||
589 | static void rio_ibmsg_handler(struct work_struct *work) | ||
590 | { | ||
591 | struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); | ||
592 | void *data; | ||
593 | struct rio_ch_chan_hdr *hdr; | ||
594 | |||
595 | if (!rio_mport_is_running(cm->mport)) | ||
596 | return; | ||
597 | |||
598 | while (1) { | ||
599 | mutex_lock(&cm->rx_lock); | ||
600 | data = riocm_rx_get_msg(cm); | ||
601 | if (data) | ||
602 | riocm_rx_fill(cm, 1); | ||
603 | mutex_unlock(&cm->rx_lock); | ||
604 | |||
605 | if (data == NULL) | ||
606 | break; | ||
607 | |||
608 | hdr = data; | ||
609 | |||
610 | if (hdr->bhdr.type != RIO_CM_CHAN) { | ||
611 | /* For now simply discard packets other than channel */ | ||
612 | riocm_error("Unsupported TYPE code (0x%x). Msg dropped", | ||
613 | hdr->bhdr.type); | ||
614 | kfree(data); | ||
615 | continue; | ||
616 | } | ||
617 | |||
618 | /* Process a channel message */ | ||
619 | if (hdr->ch_op == CM_DATA_MSG) | ||
620 | rio_rx_data_handler(cm, data); | ||
621 | else | ||
622 | rio_cm_handler(cm, data); | ||
623 | } | ||
624 | } | ||
625 | |||
626 | static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, | ||
627 | int mbox, int slot) | ||
628 | { | ||
629 | struct cm_dev *cm = dev_id; | ||
630 | |||
631 | if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) | ||
632 | queue_work(cm->rx_wq, &cm->rx_work); | ||
633 | } | ||
634 | |||
635 | /* | ||
636 | * rio_txcq_handler - TX completion handler | ||
637 | * @cm: cm_dev object | ||
638 | * @slot: TX queue slot | ||
639 | * | ||
640 | * TX completion handler also ensures that pending request packets are placed | ||
641 | * into transmit queue as soon as a free slot becomes available. This is done | ||
642 | * to give higher priority to request packets during high intensity data flow. | ||
643 | */ | ||
644 | static void rio_txcq_handler(struct cm_dev *cm, int slot) | ||
645 | { | ||
646 | int ack_slot; | ||
647 | |||
648 | /* ATTN: Add TX completion notification if/when direct buffer | ||
649 | * transfer is implemented. At this moment only correct tracking | ||
650 | * of tx_count is important. | ||
651 | */ | ||
652 | riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d", | ||
653 | cm->mport->id, slot, cm->tx_cnt); | ||
654 | |||
655 | spin_lock(&cm->tx_lock); | ||
656 | ack_slot = cm->tx_ack_slot; | ||
657 | |||
658 | if (ack_slot == slot) | ||
659 | riocm_debug(TX_EVENT, "slot == ack_slot"); | ||
660 | |||
661 | while (cm->tx_cnt && ((ack_slot != slot) || | ||
662 | (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { | ||
663 | |||
664 | cm->tx_buf[ack_slot] = NULL; | ||
665 | ++ack_slot; | ||
666 | ack_slot &= (RIOCM_TX_RING_SIZE - 1); | ||
667 | cm->tx_cnt--; | ||
668 | } | ||
669 | |||
670 | if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) | ||
671 | riocm_error("tx_cnt %d out of sync", cm->tx_cnt); | ||
672 | |||
673 | WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); | ||
674 | |||
675 | cm->tx_ack_slot = ack_slot; | ||
676 | |||
677 | /* | ||
678 | * If there are pending requests, insert them into transmit queue | ||
679 | */ | ||
680 | if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { | ||
681 | struct tx_req *req, *_req; | ||
682 | int rc; | ||
683 | |||
684 | list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { | ||
685 | list_del(&req->node); | ||
686 | cm->tx_buf[cm->tx_slot] = req->buffer; | ||
687 | rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, | ||
688 | req->buffer, req->len); | ||
689 | kfree(req->buffer); | ||
690 | kfree(req); | ||
691 | |||
692 | ++cm->tx_cnt; | ||
693 | ++cm->tx_slot; | ||
694 | cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); | ||
695 | if (cm->tx_cnt == RIOCM_TX_RING_SIZE) | ||
696 | break; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | spin_unlock(&cm->tx_lock); | ||
701 | } | ||
702 | |||
703 | static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, | ||
704 | int mbox, int slot) | ||
705 | { | ||
706 | struct cm_dev *cm = dev_id; | ||
707 | |||
708 | if (cm && rio_mport_is_running(cm->mport)) | ||
709 | rio_txcq_handler(cm, slot); | ||
710 | } | ||
711 | |||
712 | static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, | ||
713 | void *buffer, size_t len) | ||
714 | { | ||
715 | unsigned long flags; | ||
716 | struct tx_req *treq; | ||
717 | |||
718 | treq = kzalloc(sizeof(*treq), GFP_KERNEL); | ||
719 | if (treq == NULL) | ||
720 | return -ENOMEM; | ||
721 | |||
722 | treq->rdev = rdev; | ||
723 | treq->buffer = buffer; | ||
724 | treq->len = len; | ||
725 | |||
726 | spin_lock_irqsave(&cm->tx_lock, flags); | ||
727 | list_add_tail(&treq->node, &cm->tx_reqs); | ||
728 | spin_unlock_irqrestore(&cm->tx_lock, flags); | ||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * riocm_post_send - helper function that places packet into msg TX queue | ||
734 | * @cm: cm_dev object | ||
735 | * @rdev: target RapidIO device object (required by outbound msg interface) | ||
736 | * @buffer: pointer to a packet buffer to send | ||
737 | * @len: length of data to transfer | ||
738 | * @req: request priority flag | ||
739 | * | ||
740 | * Returns: 0 if success, or error code otherwise. | ||
741 | */ | ||
742 | static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, | ||
743 | void *buffer, size_t len) | ||
744 | { | ||
745 | int rc; | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&cm->tx_lock, flags); | ||
749 | |||
750 | if (cm->mport == NULL) { | ||
751 | rc = -ENODEV; | ||
752 | goto err_out; | ||
753 | } | ||
754 | |||
755 | if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { | ||
756 | riocm_debug(TX, "Tx Queue is full"); | ||
757 | rc = -EBUSY; | ||
758 | goto err_out; | ||
759 | } | ||
760 | |||
761 | cm->tx_buf[cm->tx_slot] = buffer; | ||
762 | rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); | ||
763 | |||
764 | riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d", | ||
765 | buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); | ||
766 | |||
767 | ++cm->tx_cnt; | ||
768 | ++cm->tx_slot; | ||
769 | cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); | ||
770 | |||
771 | err_out: | ||
772 | spin_unlock_irqrestore(&cm->tx_lock, flags); | ||
773 | return rc; | ||
774 | } | ||
775 | |||
776 | /* | ||
777 | * riocm_ch_send - sends a data packet to a remote device | ||
778 | * @ch_id: local channel ID | ||
779 | * @buf: pointer to a data buffer to send (including CM header) | ||
780 | * @len: length of data to transfer (including CM header) | ||
781 | * | ||
782 | * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET | ||
783 | * | ||
784 | * Returns: 0 if success, or | ||
785 | * -EINVAL if one or more input parameters is/are not valid, | ||
786 | * -ENODEV if cannot find a channel with specified ID, | ||
787 | * -EAGAIN if a channel is not in CONNECTED state, | ||
788 | * + error codes returned by HW send routine. | ||
789 | */ | ||
790 | static int riocm_ch_send(u16 ch_id, void *buf, int len) | ||
791 | { | ||
792 | struct rio_channel *ch; | ||
793 | struct rio_ch_chan_hdr *hdr; | ||
794 | int ret; | ||
795 | |||
796 | if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) | ||
797 | return -EINVAL; | ||
798 | |||
799 | ch = riocm_get_channel(ch_id); | ||
800 | if (!ch) { | ||
801 | riocm_error("%s(%d) ch_%d not found", current->comm, | ||
802 | task_pid_nr(current), ch_id); | ||
803 | return -ENODEV; | ||
804 | } | ||
805 | |||
806 | if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { | ||
807 | ret = -EAGAIN; | ||
808 | goto err_out; | ||
809 | } | ||
810 | |||
811 | /* | ||
812 | * Fill buffer header section with corresponding channel data | ||
813 | */ | ||
814 | hdr = buf; | ||
815 | |||
816 | hdr->bhdr.src_id = htonl(ch->loc_destid); | ||
817 | hdr->bhdr.dst_id = htonl(ch->rem_destid); | ||
818 | hdr->bhdr.src_mbox = cmbox; | ||
819 | hdr->bhdr.dst_mbox = cmbox; | ||
820 | hdr->bhdr.type = RIO_CM_CHAN; | ||
821 | hdr->ch_op = CM_DATA_MSG; | ||
822 | hdr->dst_ch = htons(ch->rem_channel); | ||
823 | hdr->src_ch = htons(ch->id); | ||
824 | hdr->msg_len = htons((u16)len); | ||
825 | |||
826 | /* ATTN: the function call below relies on the fact that underlying | ||
827 | * HW-specific add_outb_message() routine copies TX data into its own | ||
828 | * internal transfer buffer (true for all RIONET compatible mport | ||
829 | * drivers). Must be reviewed if mport driver uses the buffer directly. | ||
830 | */ | ||
831 | |||
832 | ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); | ||
833 | if (ret) | ||
834 | riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); | ||
835 | err_out: | ||
836 | riocm_put_channel(ch); | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) | ||
841 | { | ||
842 | int i, ret = -EINVAL; | ||
843 | |||
844 | spin_lock_bh(&ch->lock); | ||
845 | |||
846 | for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { | ||
847 | if (ch->rx_ring.inuse[i] == buf) { | ||
848 | ch->rx_ring.inuse[i] = NULL; | ||
849 | ch->rx_ring.inuse_cnt--; | ||
850 | ret = 0; | ||
851 | break; | ||
852 | } | ||
853 | } | ||
854 | |||
855 | spin_unlock_bh(&ch->lock); | ||
856 | |||
857 | if (!ret) | ||
858 | kfree(buf); | ||
859 | |||
860 | return ret; | ||
861 | } | ||
862 | |||
863 | /* | ||
864 | * riocm_ch_receive - fetch a data packet received for the specified channel | ||
865 | * @ch: local channel ID | ||
866 | * @buf: pointer to a packet buffer | ||
867 | * @timeout: timeout to wait for incoming packet (in jiffies) | ||
868 | * | ||
869 | * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of: | ||
870 | * -EAGAIN if a channel is not in CONNECTED state, | ||
871 | * -ENOMEM if in-use tracking queue is full, | ||
872 | * -ETIME if wait timeout expired, | ||
873 | * -EINTR if wait was interrupted. | ||
874 | */ | ||
875 | static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) | ||
876 | { | ||
877 | void *rxmsg = NULL; | ||
878 | int i, ret = 0; | ||
879 | long wret; | ||
880 | |||
881 | if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { | ||
882 | ret = -EAGAIN; | ||
883 | goto out; | ||
884 | } | ||
885 | |||
886 | if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { | ||
887 | /* If we do not have entries to track buffers given to upper | ||
888 | * layer, reject request. | ||
889 | */ | ||
890 | ret = -ENOMEM; | ||
891 | goto out; | ||
892 | } | ||
893 | |||
894 | wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); | ||
895 | |||
896 | riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); | ||
897 | |||
898 | if (!wret) | ||
899 | ret = -ETIME; | ||
900 | else if (wret == -ERESTARTSYS) | ||
901 | ret = -EINTR; | ||
902 | else | ||
903 | ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; | ||
904 | |||
905 | if (ret) | ||
906 | goto out; | ||
907 | |||
908 | spin_lock_bh(&ch->lock); | ||
909 | |||
910 | rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; | ||
911 | ch->rx_ring.buf[ch->rx_ring.tail] = NULL; | ||
912 | ch->rx_ring.count--; | ||
913 | ch->rx_ring.tail++; | ||
914 | ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; | ||
915 | ret = -ENOMEM; | ||
916 | |||
917 | for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { | ||
918 | if (ch->rx_ring.inuse[i] == NULL) { | ||
919 | ch->rx_ring.inuse[i] = rxmsg; | ||
920 | ch->rx_ring.inuse_cnt++; | ||
921 | ret = 0; | ||
922 | break; | ||
923 | } | ||
924 | } | ||
925 | |||
926 | if (ret) { | ||
927 | /* We have no entry to store pending message: drop it */ | ||
928 | kfree(rxmsg); | ||
929 | rxmsg = NULL; | ||
930 | } | ||
931 | |||
932 | spin_unlock_bh(&ch->lock); | ||
933 | out: | ||
934 | *buf = rxmsg; | ||
935 | return ret; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * riocm_ch_connect - sends a connect request to a remote device | ||
940 | * @loc_ch: local channel ID | ||
941 | * @cm: CM device to send connect request | ||
942 | * @peer: target RapidIO device | ||
943 | * @rem_ch: remote channel ID | ||
944 | * | ||
945 | * Returns: 0 if success, or | ||
946 | * -EINVAL if the channel is not in IDLE state, | ||
947 | * -EAGAIN if no connection request available immediately, | ||
948 | * -ETIME if ACK response timeout expired, | ||
949 | * -EINTR if wait for response was interrupted. | ||
950 | */ | ||
951 | static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, | ||
952 | struct cm_peer *peer, u16 rem_ch) | ||
953 | { | ||
954 | struct rio_channel *ch = NULL; | ||
955 | struct rio_ch_chan_hdr *hdr; | ||
956 | int ret; | ||
957 | long wret; | ||
958 | |||
959 | ch = riocm_get_channel(loc_ch); | ||
960 | if (!ch) | ||
961 | return -ENODEV; | ||
962 | |||
963 | if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { | ||
964 | ret = -EINVAL; | ||
965 | goto conn_done; | ||
966 | } | ||
967 | |||
968 | ch->cmdev = cm; | ||
969 | ch->rdev = peer->rdev; | ||
970 | ch->context = NULL; | ||
971 | ch->loc_destid = cm->mport->host_deviceid; | ||
972 | ch->rem_channel = rem_ch; | ||
973 | |||
974 | /* | ||
975 | * Send connect request to the remote RapidIO device | ||
976 | */ | ||
977 | |||
978 | hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); | ||
979 | if (hdr == NULL) { | ||
980 | ret = -ENOMEM; | ||
981 | goto conn_done; | ||
982 | } | ||
983 | |||
984 | hdr->bhdr.src_id = htonl(ch->loc_destid); | ||
985 | hdr->bhdr.dst_id = htonl(peer->rdev->destid); | ||
986 | hdr->bhdr.src_mbox = cmbox; | ||
987 | hdr->bhdr.dst_mbox = cmbox; | ||
988 | hdr->bhdr.type = RIO_CM_CHAN; | ||
989 | hdr->ch_op = CM_CONN_REQ; | ||
990 | hdr->dst_ch = htons(rem_ch); | ||
991 | hdr->src_ch = htons(loc_ch); | ||
992 | |||
993 | /* ATTN: the function call below relies on the fact that underlying | ||
994 | * HW-specific add_outb_message() routine copies TX data into its | ||
995 | * internal transfer buffer. Must be reviewed if mport driver uses | ||
996 | * this buffer directly. | ||
997 | */ | ||
998 | ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); | ||
999 | |||
1000 | if (ret != -EBUSY) { | ||
1001 | kfree(hdr); | ||
1002 | } else { | ||
1003 | ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); | ||
1004 | if (ret) | ||
1005 | kfree(hdr); | ||
1006 | } | ||
1007 | |||
1008 | if (ret) { | ||
1009 | riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); | ||
1010 | goto conn_done; | ||
1011 | } | ||
1012 | |||
1013 | /* Wait for connect response from the remote device */ | ||
1014 | wret = wait_for_completion_interruptible_timeout(&ch->comp, | ||
1015 | RIOCM_CONNECT_TO * HZ); | ||
1016 | riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); | ||
1017 | |||
1018 | if (!wret) | ||
1019 | ret = -ETIME; | ||
1020 | else if (wret == -ERESTARTSYS) | ||
1021 | ret = -EINTR; | ||
1022 | else | ||
1023 | ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; | ||
1024 | |||
1025 | conn_done: | ||
1026 | riocm_put_channel(ch); | ||
1027 | return ret; | ||
1028 | } | ||
1029 | |||
1030 | static int riocm_send_ack(struct rio_channel *ch) | ||
1031 | { | ||
1032 | struct rio_ch_chan_hdr *hdr; | ||
1033 | int ret; | ||
1034 | |||
1035 | hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); | ||
1036 | if (hdr == NULL) | ||
1037 | return -ENOMEM; | ||
1038 | |||
1039 | hdr->bhdr.src_id = htonl(ch->loc_destid); | ||
1040 | hdr->bhdr.dst_id = htonl(ch->rem_destid); | ||
1041 | hdr->dst_ch = htons(ch->rem_channel); | ||
1042 | hdr->src_ch = htons(ch->id); | ||
1043 | hdr->bhdr.src_mbox = cmbox; | ||
1044 | hdr->bhdr.dst_mbox = cmbox; | ||
1045 | hdr->bhdr.type = RIO_CM_CHAN; | ||
1046 | hdr->ch_op = CM_CONN_ACK; | ||
1047 | |||
1048 | /* ATTN: the function call below relies on the fact that underlying | ||
1049 | * add_outb_message() routine copies TX data into its internal transfer | ||
1050 | * buffer. Review if switching to direct buffer version. | ||
1051 | */ | ||
1052 | ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); | ||
1053 | |||
1054 | if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, | ||
1055 | ch->rdev, hdr, sizeof(*hdr))) | ||
1056 | return 0; | ||
1057 | kfree(hdr); | ||
1058 | |||
1059 | if (ret) | ||
1060 | riocm_error("send ACK to ch_%d on %s failed (ret=%d)", | ||
1061 | ch->id, rio_name(ch->rdev), ret); | ||
1062 | return ret; | ||
1063 | } | ||
1064 | |||
1065 | /* | ||
1066 | * riocm_ch_accept - accept incoming connection request | ||
1067 | * @ch_id: channel ID | ||
1068 | * @new_ch_id: local mport device | ||
1069 | * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection | ||
1070 | * request is not available). | ||
1071 | * | ||
1072 | * Returns: pointer to new channel struct if success, or error-valued pointer: | ||
1073 | * -ENODEV - cannot find specified channel or mport, | ||
1074 | * -EINVAL - the channel is not in IDLE state, | ||
1075 | * -EAGAIN - no connection request available immediately (timeout=0), | ||
1076 | * -ENOMEM - unable to allocate new channel, | ||
1077 | * -ETIME - wait timeout expired, | ||
1078 | * -EINTR - wait was interrupted. | ||
1079 | */ | ||
1080 | static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | ||
1081 | long timeout) | ||
1082 | { | ||
1083 | struct rio_channel *ch = NULL; | ||
1084 | struct rio_channel *new_ch = NULL; | ||
1085 | struct conn_req *req; | ||
1086 | struct cm_peer *peer; | ||
1087 | int found = 0; | ||
1088 | int err = 0; | ||
1089 | long wret; | ||
1090 | |||
1091 | ch = riocm_get_channel(ch_id); | ||
1092 | if (!ch) | ||
1093 | return ERR_PTR(-EINVAL); | ||
1094 | |||
1095 | if (!riocm_cmp(ch, RIO_CM_LISTEN)) { | ||
1096 | err = -EINVAL; | ||
1097 | goto err_put; | ||
1098 | } | ||
1099 | |||
1100 | /* Don't sleep if this is a non blocking call */ | ||
1101 | if (!timeout) { | ||
1102 | if (!try_wait_for_completion(&ch->comp)) { | ||
1103 | err = -EAGAIN; | ||
1104 | goto err_put; | ||
1105 | } | ||
1106 | } else { | ||
1107 | riocm_debug(WAIT, "on %d", ch->id); | ||
1108 | |||
1109 | wret = wait_for_completion_interruptible_timeout(&ch->comp, | ||
1110 | timeout); | ||
1111 | if (!wret) { | ||
1112 | err = -ETIME; | ||
1113 | goto err_put; | ||
1114 | } else if (wret == -ERESTARTSYS) { | ||
1115 | err = -EINTR; | ||
1116 | goto err_put; | ||
1117 | } | ||
1118 | } | ||
1119 | |||
1120 | spin_lock_bh(&ch->lock); | ||
1121 | |||
1122 | if (ch->state != RIO_CM_LISTEN) { | ||
1123 | err = -ECANCELED; | ||
1124 | } else if (list_empty(&ch->accept_queue)) { | ||
1125 | riocm_debug(WAIT, "on %d accept_queue is empty on completion", | ||
1126 | ch->id); | ||
1127 | err = -EIO; | ||
1128 | } | ||
1129 | |||
1130 | spin_unlock_bh(&ch->lock); | ||
1131 | |||
1132 | if (err) { | ||
1133 | riocm_debug(WAIT, "on %d returns %d", ch->id, err); | ||
1134 | goto err_put; | ||
1135 | } | ||
1136 | |||
1137 | /* Create new channel for this connection */ | ||
1138 | new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO); | ||
1139 | |||
1140 | if (IS_ERR(new_ch)) { | ||
1141 | riocm_error("failed to get channel for new req (%ld)", | ||
1142 | PTR_ERR(new_ch)); | ||
1143 | err = -ENOMEM; | ||
1144 | goto err_put; | ||
1145 | } | ||
1146 | |||
1147 | spin_lock_bh(&ch->lock); | ||
1148 | |||
1149 | req = list_first_entry(&ch->accept_queue, struct conn_req, node); | ||
1150 | list_del(&req->node); | ||
1151 | new_ch->cmdev = ch->cmdev; | ||
1152 | new_ch->loc_destid = ch->loc_destid; | ||
1153 | new_ch->rem_destid = req->destid; | ||
1154 | new_ch->rem_channel = req->chan; | ||
1155 | |||
1156 | spin_unlock_bh(&ch->lock); | ||
1157 | riocm_put_channel(ch); | ||
1158 | kfree(req); | ||
1159 | |||
1160 | down_read(&rdev_sem); | ||
1161 | /* Find requester's device object */ | ||
1162 | list_for_each_entry(peer, &new_ch->cmdev->peers, node) { | ||
1163 | if (peer->rdev->destid == new_ch->rem_destid) { | ||
1164 | riocm_debug(RX_CMD, "found matching device(%s)", | ||
1165 | rio_name(peer->rdev)); | ||
1166 | found = 1; | ||
1167 | break; | ||
1168 | } | ||
1169 | } | ||
1170 | up_read(&rdev_sem); | ||
1171 | |||
1172 | if (!found) { | ||
1173 | /* If peer device object not found, simply ignore the request */ | ||
1174 | err = -ENODEV; | ||
1175 | goto err_nodev; | ||
1176 | } | ||
1177 | |||
1178 | new_ch->rdev = peer->rdev; | ||
1179 | new_ch->state = RIO_CM_CONNECTED; | ||
1180 | spin_lock_init(&new_ch->lock); | ||
1181 | |||
1182 | /* Acknowledge the connection request. */ | ||
1183 | riocm_send_ack(new_ch); | ||
1184 | |||
1185 | *new_ch_id = new_ch->id; | ||
1186 | return new_ch; | ||
1187 | err_put: | ||
1188 | riocm_put_channel(ch); | ||
1189 | err_nodev: | ||
1190 | if (new_ch) { | ||
1191 | spin_lock_bh(&idr_lock); | ||
1192 | idr_remove(&ch_idr, new_ch->id); | ||
1193 | spin_unlock_bh(&idr_lock); | ||
1194 | riocm_put_channel(new_ch); | ||
1195 | } | ||
1196 | *new_ch_id = 0; | ||
1197 | return ERR_PTR(err); | ||
1198 | } | ||
1199 | |||
1200 | /* | ||
1201 | * riocm_ch_listen - puts a channel into LISTEN state | ||
1202 | * @ch_id: channel ID | ||
1203 | * | ||
1204 | * Returns: 0 if success, or | ||
1205 | * -EINVAL if the specified channel does not exists or | ||
1206 | * is not in CHAN_BOUND state. | ||
1207 | */ | ||
1208 | static int riocm_ch_listen(u16 ch_id) | ||
1209 | { | ||
1210 | struct rio_channel *ch = NULL; | ||
1211 | int ret = 0; | ||
1212 | |||
1213 | riocm_debug(CHOP, "(ch_%d)", ch_id); | ||
1214 | |||
1215 | ch = riocm_get_channel(ch_id); | ||
1216 | if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) | ||
1217 | ret = -EINVAL; | ||
1218 | riocm_put_channel(ch); | ||
1219 | return ret; | ||
1220 | } | ||
1221 | |||
1222 | /* | ||
1223 | * riocm_ch_bind - associate a channel object and an mport device | ||
1224 | * @ch_id: channel ID | ||
1225 | * @mport_id: local mport device ID | ||
1226 | * @context: pointer to the additional caller's context | ||
1227 | * | ||
1228 | * Returns: 0 if success, or | ||
1229 | * -ENODEV if cannot find specified mport, | ||
1230 | * -EINVAL if the specified channel does not exist or | ||
1231 | * is not in IDLE state. | ||
1232 | */ | ||
1233 | static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context) | ||
1234 | { | ||
1235 | struct rio_channel *ch = NULL; | ||
1236 | struct cm_dev *cm; | ||
1237 | int rc = -ENODEV; | ||
1238 | |||
1239 | riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id); | ||
1240 | |||
1241 | /* Find matching cm_dev object */ | ||
1242 | down_read(&rdev_sem); | ||
1243 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
1244 | if ((cm->mport->id == mport_id) && | ||
1245 | rio_mport_is_running(cm->mport)) { | ||
1246 | rc = 0; | ||
1247 | break; | ||
1248 | } | ||
1249 | } | ||
1250 | |||
1251 | if (rc) | ||
1252 | goto exit; | ||
1253 | |||
1254 | ch = riocm_get_channel(ch_id); | ||
1255 | if (!ch) { | ||
1256 | rc = -EINVAL; | ||
1257 | goto exit; | ||
1258 | } | ||
1259 | |||
1260 | spin_lock_bh(&ch->lock); | ||
1261 | if (ch->state != RIO_CM_IDLE) { | ||
1262 | spin_unlock_bh(&ch->lock); | ||
1263 | rc = -EINVAL; | ||
1264 | goto err_put; | ||
1265 | } | ||
1266 | |||
1267 | ch->cmdev = cm; | ||
1268 | ch->loc_destid = cm->mport->host_deviceid; | ||
1269 | ch->context = context; | ||
1270 | ch->state = RIO_CM_CHAN_BOUND; | ||
1271 | spin_unlock_bh(&ch->lock); | ||
1272 | err_put: | ||
1273 | riocm_put_channel(ch); | ||
1274 | exit: | ||
1275 | up_read(&rdev_sem); | ||
1276 | return rc; | ||
1277 | } | ||
1278 | |||
1279 | /* | ||
1280 | * riocm_ch_alloc - channel object allocation helper routine | ||
1281 | * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) | ||
1282 | * | ||
1283 | * Return value: pointer to newly created channel object, | ||
1284 | * or error-valued pointer | ||
1285 | */ | ||
1286 | static struct rio_channel *riocm_ch_alloc(u16 ch_num) | ||
1287 | { | ||
1288 | int id; | ||
1289 | int start, end; | ||
1290 | struct rio_channel *ch; | ||
1291 | |||
1292 | ch = kzalloc(sizeof(*ch), GFP_KERNEL); | ||
1293 | if (!ch) | ||
1294 | return ERR_PTR(-ENOMEM); | ||
1295 | |||
1296 | if (ch_num) { | ||
1297 | /* If requested, try to obtain the specified channel ID */ | ||
1298 | start = ch_num; | ||
1299 | end = ch_num + 1; | ||
1300 | } else { | ||
1301 | /* Obtain channel ID from the dynamic allocation range */ | ||
1302 | start = chstart; | ||
1303 | end = RIOCM_MAX_CHNUM + 1; | ||
1304 | } | ||
1305 | |||
1306 | idr_preload(GFP_KERNEL); | ||
1307 | spin_lock_bh(&idr_lock); | ||
1308 | id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); | ||
1309 | spin_unlock_bh(&idr_lock); | ||
1310 | idr_preload_end(); | ||
1311 | |||
1312 | if (id < 0) { | ||
1313 | kfree(ch); | ||
1314 | return ERR_PTR(id == -ENOSPC ? -EBUSY : id); | ||
1315 | } | ||
1316 | |||
1317 | ch->id = (u16)id; | ||
1318 | ch->state = RIO_CM_IDLE; | ||
1319 | spin_lock_init(&ch->lock); | ||
1320 | INIT_LIST_HEAD(&ch->accept_queue); | ||
1321 | INIT_LIST_HEAD(&ch->ch_node); | ||
1322 | init_completion(&ch->comp); | ||
1323 | init_completion(&ch->comp_close); | ||
1324 | kref_init(&ch->ref); | ||
1325 | ch->rx_ring.head = 0; | ||
1326 | ch->rx_ring.tail = 0; | ||
1327 | ch->rx_ring.count = 0; | ||
1328 | ch->rx_ring.inuse_cnt = 0; | ||
1329 | |||
1330 | return ch; | ||
1331 | } | ||
1332 | |||
1333 | /* | ||
1334 | * riocm_ch_create - creates a new channel object and allocates ID for it | ||
1335 | * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) | ||
1336 | * | ||
1337 | * Allocates and initializes a new channel object. If the parameter ch_num > 0 | ||
1338 | * and is within the valid range, riocm_ch_create tries to allocate the | ||
1339 | * specified ID for the new channel. If ch_num = 0, channel ID will be assigned | ||
1340 | * automatically from the range (chstart ... RIOCM_MAX_CHNUM). | ||
1341 | * Module parameter 'chstart' defines start of an ID range available for dynamic | ||
1342 | * allocation. Range below 'chstart' is reserved for pre-defined ID numbers. | ||
1343 | * Available channel numbers are limited by 16-bit size of channel numbers used | ||
1344 | * in the packet header. | ||
1345 | * | ||
1346 | * Return value: PTR to rio_channel structure if successful (with channel number | ||
1347 | * updated via pointer) or error-valued pointer if error. | ||
1348 | */ | ||
1349 | static struct rio_channel *riocm_ch_create(u16 *ch_num) | ||
1350 | { | ||
1351 | struct rio_channel *ch = NULL; | ||
1352 | |||
1353 | ch = riocm_ch_alloc(*ch_num); | ||
1354 | |||
1355 | if (IS_ERR(ch)) | ||
1356 | riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)", | ||
1357 | *ch_num, PTR_ERR(ch)); | ||
1358 | else | ||
1359 | *ch_num = ch->id; | ||
1360 | |||
1361 | return ch; | ||
1362 | } | ||
1363 | |||
1364 | /* | ||
1365 | * riocm_ch_free - channel object release routine | ||
1366 | * @ref: pointer to a channel's kref structure | ||
1367 | */ | ||
1368 | static void riocm_ch_free(struct kref *ref) | ||
1369 | { | ||
1370 | struct rio_channel *ch = container_of(ref, struct rio_channel, ref); | ||
1371 | int i; | ||
1372 | |||
1373 | riocm_debug(CHOP, "(ch_%d)", ch->id); | ||
1374 | |||
1375 | if (ch->rx_ring.inuse_cnt) { | ||
1376 | for (i = 0; | ||
1377 | i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { | ||
1378 | if (ch->rx_ring.inuse[i] != NULL) { | ||
1379 | kfree(ch->rx_ring.inuse[i]); | ||
1380 | ch->rx_ring.inuse_cnt--; | ||
1381 | } | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | if (ch->rx_ring.count) | ||
1386 | for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { | ||
1387 | if (ch->rx_ring.buf[i] != NULL) { | ||
1388 | kfree(ch->rx_ring.buf[i]); | ||
1389 | ch->rx_ring.count--; | ||
1390 | } | ||
1391 | } | ||
1392 | |||
1393 | complete(&ch->comp_close); | ||
1394 | } | ||
1395 | |||
1396 | static int riocm_send_close(struct rio_channel *ch) | ||
1397 | { | ||
1398 | struct rio_ch_chan_hdr *hdr; | ||
1399 | int ret; | ||
1400 | |||
1401 | /* | ||
1402 | * Send CH_CLOSE notification to the remote RapidIO device | ||
1403 | */ | ||
1404 | |||
1405 | hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); | ||
1406 | if (hdr == NULL) | ||
1407 | return -ENOMEM; | ||
1408 | |||
1409 | hdr->bhdr.src_id = htonl(ch->loc_destid); | ||
1410 | hdr->bhdr.dst_id = htonl(ch->rem_destid); | ||
1411 | hdr->bhdr.src_mbox = cmbox; | ||
1412 | hdr->bhdr.dst_mbox = cmbox; | ||
1413 | hdr->bhdr.type = RIO_CM_CHAN; | ||
1414 | hdr->ch_op = CM_CONN_CLOSE; | ||
1415 | hdr->dst_ch = htons(ch->rem_channel); | ||
1416 | hdr->src_ch = htons(ch->id); | ||
1417 | |||
1418 | /* ATTN: the function call below relies on the fact that underlying | ||
1419 | * add_outb_message() routine copies TX data into its internal transfer | ||
1420 | * buffer. Needs to be reviewed if switched to direct buffer mode. | ||
1421 | */ | ||
1422 | ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); | ||
1423 | |||
1424 | if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, | ||
1425 | hdr, sizeof(*hdr))) | ||
1426 | return 0; | ||
1427 | kfree(hdr); | ||
1428 | |||
1429 | if (ret) | ||
1430 | riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); | ||
1431 | |||
1432 | return ret; | ||
1433 | } | ||
1434 | |||
1435 | /* | ||
1436 | * riocm_ch_close - closes a channel object with specified ID (by local request) | ||
1437 | * @ch: channel to be closed | ||
1438 | */ | ||
1439 | static int riocm_ch_close(struct rio_channel *ch) | ||
1440 | { | ||
1441 | unsigned long tmo = msecs_to_jiffies(3000); | ||
1442 | enum rio_cm_state state; | ||
1443 | long wret; | ||
1444 | int ret = 0; | ||
1445 | |||
1446 | riocm_debug(CHOP, "ch_%d by %s(%d)", | ||
1447 | ch->id, current->comm, task_pid_nr(current)); | ||
1448 | |||
1449 | state = riocm_exch(ch, RIO_CM_DESTROYING); | ||
1450 | if (state == RIO_CM_CONNECTED) | ||
1451 | riocm_send_close(ch); | ||
1452 | |||
1453 | complete_all(&ch->comp); | ||
1454 | |||
1455 | riocm_put_channel(ch); | ||
1456 | wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); | ||
1457 | |||
1458 | riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); | ||
1459 | |||
1460 | if (wret == 0) { | ||
1461 | /* Timeout on wait occurred */ | ||
1462 | riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d", | ||
1463 | current->comm, task_pid_nr(current), ch->id); | ||
1464 | ret = -ETIMEDOUT; | ||
1465 | } else if (wret == -ERESTARTSYS) { | ||
1466 | /* Wait_for_completion was interrupted by a signal */ | ||
1467 | riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted", | ||
1468 | current->comm, task_pid_nr(current), ch->id); | ||
1469 | ret = -EINTR; | ||
1470 | } | ||
1471 | |||
1472 | if (!ret) { | ||
1473 | riocm_debug(CHOP, "ch_%d resources released", ch->id); | ||
1474 | kfree(ch); | ||
1475 | } else { | ||
1476 | riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); | ||
1477 | } | ||
1478 | |||
1479 | return ret; | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * riocm_cdev_open() - Open character device | ||
1484 | */ | ||
1485 | static int riocm_cdev_open(struct inode *inode, struct file *filp) | ||
1486 | { | ||
1487 | riocm_debug(INIT, "by %s(%d) filp=%p ", | ||
1488 | current->comm, task_pid_nr(current), filp); | ||
1489 | |||
1490 | if (list_empty(&cm_dev_list)) | ||
1491 | return -ENODEV; | ||
1492 | |||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | /* | ||
1497 | * riocm_cdev_release() - Release character device | ||
1498 | */ | ||
1499 | static int riocm_cdev_release(struct inode *inode, struct file *filp) | ||
1500 | { | ||
1501 | struct rio_channel *ch, *_c; | ||
1502 | unsigned int i; | ||
1503 | LIST_HEAD(list); | ||
1504 | |||
1505 | riocm_debug(EXIT, "by %s(%d) filp=%p", | ||
1506 | current->comm, task_pid_nr(current), filp); | ||
1507 | |||
1508 | /* Check if there are channels associated with this file descriptor */ | ||
1509 | spin_lock_bh(&idr_lock); | ||
1510 | idr_for_each_entry(&ch_idr, ch, i) { | ||
1511 | if (ch && ch->filp == filp) { | ||
1512 | riocm_debug(EXIT, "ch_%d not released by %s(%d)", | ||
1513 | ch->id, current->comm, | ||
1514 | task_pid_nr(current)); | ||
1515 | idr_remove(&ch_idr, ch->id); | ||
1516 | list_add(&ch->ch_node, &list); | ||
1517 | } | ||
1518 | } | ||
1519 | spin_unlock_bh(&idr_lock); | ||
1520 | |||
1521 | if (!list_empty(&list)) { | ||
1522 | list_for_each_entry_safe(ch, _c, &list, ch_node) { | ||
1523 | list_del(&ch->ch_node); | ||
1524 | riocm_ch_close(ch); | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | return 0; | ||
1529 | } | ||
1530 | |||
1531 | /* | ||
1532 | * cm_ep_get_list_size() - Reports number of endpoints in the network | ||
1533 | */ | ||
1534 | static int cm_ep_get_list_size(void __user *arg) | ||
1535 | { | ||
1536 | u32 __user *p = arg; | ||
1537 | u32 mport_id; | ||
1538 | u32 count = 0; | ||
1539 | struct cm_dev *cm; | ||
1540 | |||
1541 | if (get_user(mport_id, p)) | ||
1542 | return -EFAULT; | ||
1543 | if (mport_id >= RIO_MAX_MPORTS) | ||
1544 | return -EINVAL; | ||
1545 | |||
1546 | /* Find a matching cm_dev object */ | ||
1547 | down_read(&rdev_sem); | ||
1548 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
1549 | if (cm->mport->id == mport_id) { | ||
1550 | count = cm->npeers; | ||
1551 | up_read(&rdev_sem); | ||
1552 | if (copy_to_user(arg, &count, sizeof(u32))) | ||
1553 | return -EFAULT; | ||
1554 | return 0; | ||
1555 | } | ||
1556 | } | ||
1557 | up_read(&rdev_sem); | ||
1558 | |||
1559 | return -ENODEV; | ||
1560 | } | ||
1561 | |||
1562 | /* | ||
1563 | * cm_ep_get_list() - Returns list of attached endpoints | ||
1564 | */ | ||
1565 | static int cm_ep_get_list(void __user *arg) | ||
1566 | { | ||
1567 | struct cm_dev *cm; | ||
1568 | struct cm_peer *peer; | ||
1569 | u32 info[2]; | ||
1570 | void *buf; | ||
1571 | u32 nent; | ||
1572 | u32 *entry_ptr; | ||
1573 | u32 i = 0; | ||
1574 | int ret = 0; | ||
1575 | |||
1576 | if (copy_from_user(&info, arg, sizeof(info))) | ||
1577 | return -EFAULT; | ||
1578 | |||
1579 | if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT) | ||
1580 | return -EINVAL; | ||
1581 | |||
1582 | /* Find a matching cm_dev object */ | ||
1583 | down_read(&rdev_sem); | ||
1584 | list_for_each_entry(cm, &cm_dev_list, list) | ||
1585 | if (cm->mport->id == (u8)info[1]) | ||
1586 | goto found; | ||
1587 | |||
1588 | up_read(&rdev_sem); | ||
1589 | return -ENODEV; | ||
1590 | |||
1591 | found: | ||
1592 | nent = min(info[0], cm->npeers); | ||
1593 | buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); | ||
1594 | if (!buf) { | ||
1595 | up_read(&rdev_sem); | ||
1596 | return -ENOMEM; | ||
1597 | } | ||
1598 | |||
1599 | entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32)); | ||
1600 | |||
1601 | list_for_each_entry(peer, &cm->peers, node) { | ||
1602 | *entry_ptr = (u32)peer->rdev->destid; | ||
1603 | entry_ptr++; | ||
1604 | if (++i == nent) | ||
1605 | break; | ||
1606 | } | ||
1607 | up_read(&rdev_sem); | ||
1608 | |||
1609 | ((u32 *)buf)[0] = i; /* report an updated number of entries */ | ||
1610 | ((u32 *)buf)[1] = info[1]; /* put back an mport ID */ | ||
1611 | if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2))) | ||
1612 | ret = -EFAULT; | ||
1613 | |||
1614 | kfree(buf); | ||
1615 | return ret; | ||
1616 | } | ||
1617 | |||
1618 | /* | ||
1619 | * cm_mport_get_list() - Returns list of available local mport devices | ||
1620 | */ | ||
1621 | static int cm_mport_get_list(void __user *arg) | ||
1622 | { | ||
1623 | int ret = 0; | ||
1624 | u32 entries; | ||
1625 | void *buf; | ||
1626 | struct cm_dev *cm; | ||
1627 | u32 *entry_ptr; | ||
1628 | int count = 0; | ||
1629 | |||
1630 | if (copy_from_user(&entries, arg, sizeof(entries))) | ||
1631 | return -EFAULT; | ||
1632 | if (entries == 0 || entries > RIO_MAX_MPORTS) | ||
1633 | return -EINVAL; | ||
1634 | buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); | ||
1635 | if (!buf) | ||
1636 | return -ENOMEM; | ||
1637 | |||
1638 | /* Scan all registered cm_dev objects */ | ||
1639 | entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32)); | ||
1640 | down_read(&rdev_sem); | ||
1641 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
1642 | if (count++ < entries) { | ||
1643 | *entry_ptr = (cm->mport->id << 16) | | ||
1644 | cm->mport->host_deviceid; | ||
1645 | entry_ptr++; | ||
1646 | } | ||
1647 | } | ||
1648 | up_read(&rdev_sem); | ||
1649 | |||
1650 | *((u32 *)buf) = count; /* report a real number of entries */ | ||
1651 | if (copy_to_user(arg, buf, sizeof(u32) * (count + 1))) | ||
1652 | ret = -EFAULT; | ||
1653 | |||
1654 | kfree(buf); | ||
1655 | return ret; | ||
1656 | } | ||
1657 | |||
1658 | /* | ||
1659 | * cm_chan_create() - Create a message exchange channel | ||
1660 | */ | ||
1661 | static int cm_chan_create(struct file *filp, void __user *arg) | ||
1662 | { | ||
1663 | u16 __user *p = arg; | ||
1664 | u16 ch_num; | ||
1665 | struct rio_channel *ch; | ||
1666 | |||
1667 | if (get_user(ch_num, p)) | ||
1668 | return -EFAULT; | ||
1669 | |||
1670 | riocm_debug(CHOP, "ch_%d requested by %s(%d)", | ||
1671 | ch_num, current->comm, task_pid_nr(current)); | ||
1672 | ch = riocm_ch_create(&ch_num); | ||
1673 | if (IS_ERR(ch)) | ||
1674 | return PTR_ERR(ch); | ||
1675 | |||
1676 | ch->filp = filp; | ||
1677 | riocm_debug(CHOP, "ch_%d created by %s(%d)", | ||
1678 | ch_num, current->comm, task_pid_nr(current)); | ||
1679 | return put_user(ch_num, p); | ||
1680 | } | ||
1681 | |||
1682 | /* | ||
1683 | * cm_chan_close() - Close channel | ||
1684 | * @filp: Pointer to file object | ||
1685 | * @arg: Channel to close | ||
1686 | */ | ||
1687 | static int cm_chan_close(struct file *filp, void __user *arg) | ||
1688 | { | ||
1689 | u16 __user *p = arg; | ||
1690 | u16 ch_num; | ||
1691 | struct rio_channel *ch; | ||
1692 | |||
1693 | if (get_user(ch_num, p)) | ||
1694 | return -EFAULT; | ||
1695 | |||
1696 | riocm_debug(CHOP, "ch_%d by %s(%d)", | ||
1697 | ch_num, current->comm, task_pid_nr(current)); | ||
1698 | |||
1699 | spin_lock_bh(&idr_lock); | ||
1700 | ch = idr_find(&ch_idr, ch_num); | ||
1701 | if (!ch) { | ||
1702 | spin_unlock_bh(&idr_lock); | ||
1703 | return 0; | ||
1704 | } | ||
1705 | if (ch->filp != filp) { | ||
1706 | spin_unlock_bh(&idr_lock); | ||
1707 | return -EINVAL; | ||
1708 | } | ||
1709 | idr_remove(&ch_idr, ch->id); | ||
1710 | spin_unlock_bh(&idr_lock); | ||
1711 | |||
1712 | return riocm_ch_close(ch); | ||
1713 | } | ||
1714 | |||
1715 | /* | ||
1716 | * cm_chan_bind() - Bind channel | ||
1717 | * @arg: Channel number | ||
1718 | */ | ||
1719 | static int cm_chan_bind(void __user *arg) | ||
1720 | { | ||
1721 | struct rio_cm_channel chan; | ||
1722 | |||
1723 | if (copy_from_user(&chan, arg, sizeof(chan))) | ||
1724 | return -EFAULT; | ||
1725 | if (chan.mport_id >= RIO_MAX_MPORTS) | ||
1726 | return -EINVAL; | ||
1727 | |||
1728 | return riocm_ch_bind(chan.id, chan.mport_id, NULL); | ||
1729 | } | ||
1730 | |||
1731 | /* | ||
1732 | * cm_chan_listen() - Listen on channel | ||
1733 | * @arg: Channel number | ||
1734 | */ | ||
1735 | static int cm_chan_listen(void __user *arg) | ||
1736 | { | ||
1737 | u16 __user *p = arg; | ||
1738 | u16 ch_num; | ||
1739 | |||
1740 | if (get_user(ch_num, p)) | ||
1741 | return -EFAULT; | ||
1742 | |||
1743 | return riocm_ch_listen(ch_num); | ||
1744 | } | ||
1745 | |||
1746 | /* | ||
1747 | * cm_chan_accept() - Accept incoming connection | ||
1748 | * @filp: Pointer to file object | ||
1749 | * @arg: Channel number | ||
1750 | */ | ||
1751 | static int cm_chan_accept(struct file *filp, void __user *arg) | ||
1752 | { | ||
1753 | struct rio_cm_accept param; | ||
1754 | long accept_to; | ||
1755 | struct rio_channel *ch; | ||
1756 | |||
1757 | if (copy_from_user(¶m, arg, sizeof(param))) | ||
1758 | return -EFAULT; | ||
1759 | |||
1760 | riocm_debug(CHOP, "on ch_%d by %s(%d)", | ||
1761 | param.ch_num, current->comm, task_pid_nr(current)); | ||
1762 | |||
1763 | accept_to = param.wait_to ? | ||
1764 | msecs_to_jiffies(param.wait_to) : 0; | ||
1765 | |||
1766 | ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to); | ||
1767 | if (IS_ERR(ch)) | ||
1768 | return PTR_ERR(ch); | ||
1769 | ch->filp = filp; | ||
1770 | |||
1771 | riocm_debug(CHOP, "new ch_%d for %s(%d)", | ||
1772 | ch->id, current->comm, task_pid_nr(current)); | ||
1773 | |||
1774 | if (copy_to_user(arg, ¶m, sizeof(param))) | ||
1775 | return -EFAULT; | ||
1776 | return 0; | ||
1777 | } | ||
1778 | |||
1779 | /* | ||
1780 | * cm_chan_connect() - Connect on channel | ||
1781 | * @arg: Channel information | ||
1782 | */ | ||
1783 | static int cm_chan_connect(void __user *arg) | ||
1784 | { | ||
1785 | struct rio_cm_channel chan; | ||
1786 | struct cm_dev *cm; | ||
1787 | struct cm_peer *peer; | ||
1788 | int ret = -ENODEV; | ||
1789 | |||
1790 | if (copy_from_user(&chan, arg, sizeof(chan))) | ||
1791 | return -EFAULT; | ||
1792 | if (chan.mport_id >= RIO_MAX_MPORTS) | ||
1793 | return -EINVAL; | ||
1794 | |||
1795 | down_read(&rdev_sem); | ||
1796 | |||
1797 | /* Find matching cm_dev object */ | ||
1798 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
1799 | if (cm->mport->id == chan.mport_id) { | ||
1800 | ret = 0; | ||
1801 | break; | ||
1802 | } | ||
1803 | } | ||
1804 | |||
1805 | if (ret) | ||
1806 | goto err_out; | ||
1807 | |||
1808 | if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { | ||
1809 | ret = -EINVAL; | ||
1810 | goto err_out; | ||
1811 | } | ||
1812 | |||
1813 | /* Find corresponding RapidIO endpoint device object */ | ||
1814 | ret = -ENODEV; | ||
1815 | |||
1816 | list_for_each_entry(peer, &cm->peers, node) { | ||
1817 | if (peer->rdev->destid == chan.remote_destid) { | ||
1818 | ret = 0; | ||
1819 | break; | ||
1820 | } | ||
1821 | } | ||
1822 | |||
1823 | if (ret) | ||
1824 | goto err_out; | ||
1825 | |||
1826 | up_read(&rdev_sem); | ||
1827 | |||
1828 | return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); | ||
1829 | err_out: | ||
1830 | up_read(&rdev_sem); | ||
1831 | return ret; | ||
1832 | } | ||
1833 | |||
1834 | /* | ||
1835 | * cm_chan_msg_send() - Send a message through channel | ||
1836 | * @arg: Outbound message information | ||
1837 | */ | ||
1838 | static int cm_chan_msg_send(void __user *arg) | ||
1839 | { | ||
1840 | struct rio_cm_msg msg; | ||
1841 | void *buf; | ||
1842 | int ret = 0; | ||
1843 | |||
1844 | if (copy_from_user(&msg, arg, sizeof(msg))) | ||
1845 | return -EFAULT; | ||
1846 | if (msg.size > RIO_MAX_MSG_SIZE) | ||
1847 | return -EINVAL; | ||
1848 | |||
1849 | buf = kmalloc(msg.size, GFP_KERNEL); | ||
1850 | if (!buf) | ||
1851 | return -ENOMEM; | ||
1852 | |||
1853 | if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) { | ||
1854 | ret = -EFAULT; | ||
1855 | goto out; | ||
1856 | } | ||
1857 | |||
1858 | ret = riocm_ch_send(msg.ch_num, buf, msg.size); | ||
1859 | out: | ||
1860 | kfree(buf); | ||
1861 | return ret; | ||
1862 | } | ||
1863 | |||
1864 | /* | ||
1865 | * cm_chan_msg_rcv() - Receive a message through channel | ||
1866 | * @arg: Inbound message information | ||
1867 | */ | ||
1868 | static int cm_chan_msg_rcv(void __user *arg) | ||
1869 | { | ||
1870 | struct rio_cm_msg msg; | ||
1871 | struct rio_channel *ch; | ||
1872 | void *buf; | ||
1873 | long rxto; | ||
1874 | int ret = 0, msg_size; | ||
1875 | |||
1876 | if (copy_from_user(&msg, arg, sizeof(msg))) | ||
1877 | return -EFAULT; | ||
1878 | |||
1879 | if (msg.ch_num == 0 || msg.size == 0) | ||
1880 | return -EINVAL; | ||
1881 | |||
1882 | ch = riocm_get_channel(msg.ch_num); | ||
1883 | if (!ch) | ||
1884 | return -ENODEV; | ||
1885 | |||
1886 | rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT; | ||
1887 | |||
1888 | ret = riocm_ch_receive(ch, &buf, rxto); | ||
1889 | if (ret) | ||
1890 | goto out; | ||
1891 | |||
1892 | msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE)); | ||
1893 | |||
1894 | if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size)) | ||
1895 | ret = -EFAULT; | ||
1896 | |||
1897 | riocm_ch_free_rxbuf(ch, buf); | ||
1898 | out: | ||
1899 | riocm_put_channel(ch); | ||
1900 | return ret; | ||
1901 | } | ||
1902 | |||
1903 | /* | ||
1904 | * riocm_cdev_ioctl() - IOCTL requests handler | ||
1905 | */ | ||
1906 | static long | ||
1907 | riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
1908 | { | ||
1909 | switch (cmd) { | ||
1910 | case RIO_CM_EP_GET_LIST_SIZE: | ||
1911 | return cm_ep_get_list_size((void __user *)arg); | ||
1912 | case RIO_CM_EP_GET_LIST: | ||
1913 | return cm_ep_get_list((void __user *)arg); | ||
1914 | case RIO_CM_CHAN_CREATE: | ||
1915 | return cm_chan_create(filp, (void __user *)arg); | ||
1916 | case RIO_CM_CHAN_CLOSE: | ||
1917 | return cm_chan_close(filp, (void __user *)arg); | ||
1918 | case RIO_CM_CHAN_BIND: | ||
1919 | return cm_chan_bind((void __user *)arg); | ||
1920 | case RIO_CM_CHAN_LISTEN: | ||
1921 | return cm_chan_listen((void __user *)arg); | ||
1922 | case RIO_CM_CHAN_ACCEPT: | ||
1923 | return cm_chan_accept(filp, (void __user *)arg); | ||
1924 | case RIO_CM_CHAN_CONNECT: | ||
1925 | return cm_chan_connect((void __user *)arg); | ||
1926 | case RIO_CM_CHAN_SEND: | ||
1927 | return cm_chan_msg_send((void __user *)arg); | ||
1928 | case RIO_CM_CHAN_RECEIVE: | ||
1929 | return cm_chan_msg_rcv((void __user *)arg); | ||
1930 | case RIO_CM_MPORT_GET_LIST: | ||
1931 | return cm_mport_get_list((void __user *)arg); | ||
1932 | default: | ||
1933 | break; | ||
1934 | } | ||
1935 | |||
1936 | return -EINVAL; | ||
1937 | } | ||
1938 | |||
1939 | static const struct file_operations riocm_cdev_fops = { | ||
1940 | .owner = THIS_MODULE, | ||
1941 | .open = riocm_cdev_open, | ||
1942 | .release = riocm_cdev_release, | ||
1943 | .unlocked_ioctl = riocm_cdev_ioctl, | ||
1944 | }; | ||
1945 | |||
1946 | /* | ||
1947 | * riocm_add_dev - add new remote RapidIO device into channel management core | ||
1948 | * @dev: device object associated with RapidIO device | ||
1949 | * @sif: subsystem interface | ||
1950 | * | ||
1951 | * Adds the specified RapidIO device (if applicable) into peers list of | ||
1952 | * the corresponding channel management device (cm_dev). | ||
1953 | */ | ||
1954 | static int riocm_add_dev(struct device *dev, struct subsys_interface *sif) | ||
1955 | { | ||
1956 | struct cm_peer *peer; | ||
1957 | struct rio_dev *rdev = to_rio_dev(dev); | ||
1958 | struct cm_dev *cm; | ||
1959 | |||
1960 | /* Check if the remote device has capabilities required to support CM */ | ||
1961 | if (!dev_cm_capable(rdev)) | ||
1962 | return 0; | ||
1963 | |||
1964 | riocm_debug(RDEV, "(%s)", rio_name(rdev)); | ||
1965 | |||
1966 | peer = kmalloc(sizeof(*peer), GFP_KERNEL); | ||
1967 | if (!peer) | ||
1968 | return -ENOMEM; | ||
1969 | |||
1970 | /* Find a corresponding cm_dev object */ | ||
1971 | down_write(&rdev_sem); | ||
1972 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
1973 | if (cm->mport == rdev->net->hport) | ||
1974 | goto found; | ||
1975 | } | ||
1976 | |||
1977 | up_write(&rdev_sem); | ||
1978 | kfree(peer); | ||
1979 | return -ENODEV; | ||
1980 | |||
1981 | found: | ||
1982 | peer->rdev = rdev; | ||
1983 | list_add_tail(&peer->node, &cm->peers); | ||
1984 | cm->npeers++; | ||
1985 | |||
1986 | up_write(&rdev_sem); | ||
1987 | return 0; | ||
1988 | } | ||
1989 | |||
1990 | /* | ||
1991 | * riocm_remove_dev - remove remote RapidIO device from channel management core | ||
1992 | * @dev: device object associated with RapidIO device | ||
1993 | * @sif: subsystem interface | ||
1994 | * | ||
1995 | * Removes the specified RapidIO device (if applicable) from peers list of | ||
1996 | * the corresponding channel management device (cm_dev). | ||
1997 | */ | ||
1998 | static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif) | ||
1999 | { | ||
2000 | struct rio_dev *rdev = to_rio_dev(dev); | ||
2001 | struct cm_dev *cm; | ||
2002 | struct cm_peer *peer; | ||
2003 | struct rio_channel *ch, *_c; | ||
2004 | unsigned int i; | ||
2005 | bool found = false; | ||
2006 | LIST_HEAD(list); | ||
2007 | |||
2008 | /* Check if the remote device has capabilities required to support CM */ | ||
2009 | if (!dev_cm_capable(rdev)) | ||
2010 | return; | ||
2011 | |||
2012 | riocm_debug(RDEV, "(%s)", rio_name(rdev)); | ||
2013 | |||
2014 | /* Find matching cm_dev object */ | ||
2015 | down_write(&rdev_sem); | ||
2016 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
2017 | if (cm->mport == rdev->net->hport) { | ||
2018 | found = true; | ||
2019 | break; | ||
2020 | } | ||
2021 | } | ||
2022 | |||
2023 | if (!found) { | ||
2024 | up_write(&rdev_sem); | ||
2025 | return; | ||
2026 | } | ||
2027 | |||
2028 | /* Remove remote device from the list of peers */ | ||
2029 | found = false; | ||
2030 | list_for_each_entry(peer, &cm->peers, node) { | ||
2031 | if (peer->rdev == rdev) { | ||
2032 | riocm_debug(RDEV, "removing peer %s", rio_name(rdev)); | ||
2033 | found = true; | ||
2034 | list_del(&peer->node); | ||
2035 | cm->npeers--; | ||
2036 | kfree(peer); | ||
2037 | break; | ||
2038 | } | ||
2039 | } | ||
2040 | |||
2041 | up_write(&rdev_sem); | ||
2042 | |||
2043 | if (!found) | ||
2044 | return; | ||
2045 | |||
2046 | /* | ||
2047 | * Release channels associated with this peer | ||
2048 | */ | ||
2049 | |||
2050 | spin_lock_bh(&idr_lock); | ||
2051 | idr_for_each_entry(&ch_idr, ch, i) { | ||
2052 | if (ch && ch->rdev == rdev) { | ||
2053 | if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN) | ||
2054 | riocm_exch(ch, RIO_CM_DISCONNECT); | ||
2055 | idr_remove(&ch_idr, ch->id); | ||
2056 | list_add(&ch->ch_node, &list); | ||
2057 | } | ||
2058 | } | ||
2059 | spin_unlock_bh(&idr_lock); | ||
2060 | |||
2061 | if (!list_empty(&list)) { | ||
2062 | list_for_each_entry_safe(ch, _c, &list, ch_node) { | ||
2063 | list_del(&ch->ch_node); | ||
2064 | riocm_ch_close(ch); | ||
2065 | } | ||
2066 | } | ||
2067 | } | ||
2068 | |||
2069 | /* | ||
2070 | * riocm_cdev_add() - Create rio_cm char device | ||
2071 | * @devno: device number assigned to device (MAJ + MIN) | ||
2072 | */ | ||
2073 | static int riocm_cdev_add(dev_t devno) | ||
2074 | { | ||
2075 | int ret; | ||
2076 | |||
2077 | cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops); | ||
2078 | riocm_cdev.cdev.owner = THIS_MODULE; | ||
2079 | ret = cdev_add(&riocm_cdev.cdev, devno, 1); | ||
2080 | if (ret < 0) { | ||
2081 | riocm_error("Cannot register a device with error %d", ret); | ||
2082 | return ret; | ||
2083 | } | ||
2084 | |||
2085 | riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME); | ||
2086 | if (IS_ERR(riocm_cdev.dev)) { | ||
2087 | cdev_del(&riocm_cdev.cdev); | ||
2088 | return PTR_ERR(riocm_cdev.dev); | ||
2089 | } | ||
2090 | |||
2091 | riocm_debug(MPORT, "Added %s cdev(%d:%d)", | ||
2092 | DEV_NAME, MAJOR(devno), MINOR(devno)); | ||
2093 | |||
2094 | return 0; | ||
2095 | } | ||
2096 | |||
2097 | /* | ||
2098 | * riocm_add_mport - add new local mport device into channel management core | ||
2099 | * @dev: device object associated with mport | ||
2100 | * @class_intf: class interface | ||
2101 | * | ||
2102 | * When a new mport device is added, CM immediately reserves inbound and | ||
2103 | * outbound RapidIO mailboxes that will be used. | ||
2104 | */ | ||
2105 | static int riocm_add_mport(struct device *dev, | ||
2106 | struct class_interface *class_intf) | ||
2107 | { | ||
2108 | int rc; | ||
2109 | int i; | ||
2110 | struct cm_dev *cm; | ||
2111 | struct rio_mport *mport = to_rio_mport(dev); | ||
2112 | |||
2113 | riocm_debug(MPORT, "add mport %s", mport->name); | ||
2114 | |||
2115 | cm = kzalloc(sizeof(*cm), GFP_KERNEL); | ||
2116 | if (!cm) | ||
2117 | return -ENOMEM; | ||
2118 | |||
2119 | cm->mport = mport; | ||
2120 | |||
2121 | rc = rio_request_outb_mbox(mport, cm, cmbox, | ||
2122 | RIOCM_TX_RING_SIZE, riocm_outb_msg_event); | ||
2123 | if (rc) { | ||
2124 | riocm_error("failed to allocate OBMBOX_%d on %s", | ||
2125 | cmbox, mport->name); | ||
2126 | kfree(cm); | ||
2127 | return -ENODEV; | ||
2128 | } | ||
2129 | |||
2130 | rc = rio_request_inb_mbox(mport, cm, cmbox, | ||
2131 | RIOCM_RX_RING_SIZE, riocm_inb_msg_event); | ||
2132 | if (rc) { | ||
2133 | riocm_error("failed to allocate IBMBOX_%d on %s", | ||
2134 | cmbox, mport->name); | ||
2135 | rio_release_outb_mbox(mport, cmbox); | ||
2136 | kfree(cm); | ||
2137 | return -ENODEV; | ||
2138 | } | ||
2139 | |||
2140 | /* | ||
2141 | * Allocate and register inbound messaging buffers to be ready | ||
2142 | * to receive channel and system management requests | ||
2143 | */ | ||
2144 | for (i = 0; i < RIOCM_RX_RING_SIZE; i++) | ||
2145 | cm->rx_buf[i] = NULL; | ||
2146 | |||
2147 | cm->rx_slots = RIOCM_RX_RING_SIZE; | ||
2148 | mutex_init(&cm->rx_lock); | ||
2149 | riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); | ||
2150 | cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); | ||
2151 | INIT_WORK(&cm->rx_work, rio_ibmsg_handler); | ||
2152 | |||
2153 | cm->tx_slot = 0; | ||
2154 | cm->tx_cnt = 0; | ||
2155 | cm->tx_ack_slot = 0; | ||
2156 | spin_lock_init(&cm->tx_lock); | ||
2157 | |||
2158 | INIT_LIST_HEAD(&cm->peers); | ||
2159 | cm->npeers = 0; | ||
2160 | INIT_LIST_HEAD(&cm->tx_reqs); | ||
2161 | |||
2162 | down_write(&rdev_sem); | ||
2163 | list_add_tail(&cm->list, &cm_dev_list); | ||
2164 | up_write(&rdev_sem); | ||
2165 | |||
2166 | return 0; | ||
2167 | } | ||
2168 | |||
2169 | /* | ||
2170 | * riocm_remove_mport - remove local mport device from channel management core | ||
2171 | * @dev: device object associated with mport | ||
2172 | * @class_intf: class interface | ||
2173 | * | ||
2174 | * Removes a local mport device from the list of registered devices that provide | ||
2175 | * channel management services. Returns an error if the specified mport is not | ||
2176 | * registered with the CM core. | ||
2177 | */ | ||
2178 | static void riocm_remove_mport(struct device *dev, | ||
2179 | struct class_interface *class_intf) | ||
2180 | { | ||
2181 | struct rio_mport *mport = to_rio_mport(dev); | ||
2182 | struct cm_dev *cm; | ||
2183 | struct cm_peer *peer, *temp; | ||
2184 | struct rio_channel *ch, *_c; | ||
2185 | unsigned int i; | ||
2186 | bool found = false; | ||
2187 | LIST_HEAD(list); | ||
2188 | |||
2189 | riocm_debug(MPORT, "%s", mport->name); | ||
2190 | |||
2191 | /* Find a matching cm_dev object */ | ||
2192 | down_write(&rdev_sem); | ||
2193 | list_for_each_entry(cm, &cm_dev_list, list) { | ||
2194 | if (cm->mport == mport) { | ||
2195 | list_del(&cm->list); | ||
2196 | found = true; | ||
2197 | break; | ||
2198 | } | ||
2199 | } | ||
2200 | up_write(&rdev_sem); | ||
2201 | if (!found) | ||
2202 | return; | ||
2203 | |||
2204 | flush_workqueue(cm->rx_wq); | ||
2205 | destroy_workqueue(cm->rx_wq); | ||
2206 | |||
2207 | /* Release channels bound to this mport */ | ||
2208 | spin_lock_bh(&idr_lock); | ||
2209 | idr_for_each_entry(&ch_idr, ch, i) { | ||
2210 | if (ch->cmdev == cm) { | ||
2211 | riocm_debug(RDEV, "%s drop ch_%d", | ||
2212 | mport->name, ch->id); | ||
2213 | idr_remove(&ch_idr, ch->id); | ||
2214 | list_add(&ch->ch_node, &list); | ||
2215 | } | ||
2216 | } | ||
2217 | spin_unlock_bh(&idr_lock); | ||
2218 | |||
2219 | if (!list_empty(&list)) { | ||
2220 | list_for_each_entry_safe(ch, _c, &list, ch_node) { | ||
2221 | list_del(&ch->ch_node); | ||
2222 | riocm_ch_close(ch); | ||
2223 | } | ||
2224 | } | ||
2225 | |||
2226 | rio_release_inb_mbox(mport, cmbox); | ||
2227 | rio_release_outb_mbox(mport, cmbox); | ||
2228 | |||
2229 | /* Remove and free peer entries */ | ||
2230 | if (!list_empty(&cm->peers)) | ||
2231 | riocm_debug(RDEV, "ATTN: peer list not empty"); | ||
2232 | list_for_each_entry_safe(peer, temp, &cm->peers, node) { | ||
2233 | riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev)); | ||
2234 | list_del(&peer->node); | ||
2235 | kfree(peer); | ||
2236 | } | ||
2237 | |||
2238 | riocm_rx_free(cm); | ||
2239 | kfree(cm); | ||
2240 | riocm_debug(MPORT, "%s done", mport->name); | ||
2241 | } | ||
2242 | |||
2243 | static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, | ||
2244 | void *unused) | ||
2245 | { | ||
2246 | struct rio_channel *ch; | ||
2247 | unsigned int i; | ||
2248 | |||
2249 | riocm_debug(EXIT, "."); | ||
2250 | |||
2251 | spin_lock_bh(&idr_lock); | ||
2252 | idr_for_each_entry(&ch_idr, ch, i) { | ||
2253 | riocm_debug(EXIT, "close ch %d", ch->id); | ||
2254 | if (ch->state == RIO_CM_CONNECTED) | ||
2255 | riocm_send_close(ch); | ||
2256 | } | ||
2257 | spin_unlock_bh(&idr_lock); | ||
2258 | |||
2259 | return NOTIFY_DONE; | ||
2260 | } | ||
2261 | |||
2262 | /* | ||
2263 | * riocm_interface handles addition/removal of remote RapidIO devices | ||
2264 | */ | ||
2265 | static struct subsys_interface riocm_interface = { | ||
2266 | .name = "rio_cm", | ||
2267 | .subsys = &rio_bus_type, | ||
2268 | .add_dev = riocm_add_dev, | ||
2269 | .remove_dev = riocm_remove_dev, | ||
2270 | }; | ||
2271 | |||
2272 | /* | ||
2273 | * rio_mport_interface handles addition/removal local mport devices | ||
2274 | */ | ||
2275 | static struct class_interface rio_mport_interface __refdata = { | ||
2276 | .class = &rio_mport_class, | ||
2277 | .add_dev = riocm_add_mport, | ||
2278 | .remove_dev = riocm_remove_mport, | ||
2279 | }; | ||
2280 | |||
2281 | static struct notifier_block rio_cm_notifier = { | ||
2282 | .notifier_call = rio_cm_shutdown, | ||
2283 | }; | ||
2284 | |||
2285 | static int __init riocm_init(void) | ||
2286 | { | ||
2287 | int ret; | ||
2288 | |||
2289 | /* Create device class needed by udev */ | ||
2290 | dev_class = class_create(THIS_MODULE, DRV_NAME); | ||
2291 | if (IS_ERR(dev_class)) { | ||
2292 | riocm_error("Cannot create " DRV_NAME " class"); | ||
2293 | return PTR_ERR(dev_class); | ||
2294 | } | ||
2295 | |||
2296 | ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME); | ||
2297 | if (ret) { | ||
2298 | class_destroy(dev_class); | ||
2299 | return ret; | ||
2300 | } | ||
2301 | |||
2302 | dev_major = MAJOR(dev_number); | ||
2303 | dev_minor_base = MINOR(dev_number); | ||
2304 | riocm_debug(INIT, "Registered class with %d major", dev_major); | ||
2305 | |||
2306 | /* | ||
2307 | * Register as rapidio_port class interface to get notifications about | ||
2308 | * mport additions and removals. | ||
2309 | */ | ||
2310 | ret = class_interface_register(&rio_mport_interface); | ||
2311 | if (ret) { | ||
2312 | riocm_error("class_interface_register error: %d", ret); | ||
2313 | goto err_reg; | ||
2314 | } | ||
2315 | |||
2316 | /* | ||
2317 | * Register as RapidIO bus interface to get notifications about | ||
2318 | * addition/removal of remote RapidIO devices. | ||
2319 | */ | ||
2320 | ret = subsys_interface_register(&riocm_interface); | ||
2321 | if (ret) { | ||
2322 | riocm_error("subsys_interface_register error: %d", ret); | ||
2323 | goto err_cl; | ||
2324 | } | ||
2325 | |||
2326 | ret = register_reboot_notifier(&rio_cm_notifier); | ||
2327 | if (ret) { | ||
2328 | riocm_error("failed to register reboot notifier (err=%d)", ret); | ||
2329 | goto err_sif; | ||
2330 | } | ||
2331 | |||
2332 | ret = riocm_cdev_add(dev_number); | ||
2333 | if (ret) { | ||
2334 | unregister_reboot_notifier(&rio_cm_notifier); | ||
2335 | ret = -ENODEV; | ||
2336 | goto err_sif; | ||
2337 | } | ||
2338 | |||
2339 | return 0; | ||
2340 | err_sif: | ||
2341 | subsys_interface_unregister(&riocm_interface); | ||
2342 | err_cl: | ||
2343 | class_interface_unregister(&rio_mport_interface); | ||
2344 | err_reg: | ||
2345 | unregister_chrdev_region(dev_number, 1); | ||
2346 | class_destroy(dev_class); | ||
2347 | return ret; | ||
2348 | } | ||
2349 | |||
2350 | static void __exit riocm_exit(void) | ||
2351 | { | ||
2352 | riocm_debug(EXIT, "enter"); | ||
2353 | unregister_reboot_notifier(&rio_cm_notifier); | ||
2354 | subsys_interface_unregister(&riocm_interface); | ||
2355 | class_interface_unregister(&rio_mport_interface); | ||
2356 | idr_destroy(&ch_idr); | ||
2357 | |||
2358 | device_unregister(riocm_cdev.dev); | ||
2359 | cdev_del(&(riocm_cdev.cdev)); | ||
2360 | |||
2361 | class_destroy(dev_class); | ||
2362 | unregister_chrdev_region(dev_number, 1); | ||
2363 | } | ||
2364 | |||
2365 | late_initcall(riocm_init); | ||
2366 | module_exit(riocm_exit); | ||
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig index 345841562f95..92767fd3b541 100644 --- a/drivers/rapidio/switches/Kconfig +++ b/drivers/rapidio/switches/Kconfig | |||
@@ -22,3 +22,9 @@ config RAPIDIO_CPS_GEN2 | |||
22 | default n | 22 | default n |
23 | ---help--- | 23 | ---help--- |
24 | Includes support for ITD CPS Gen.2 serial RapidIO switches. | 24 | Includes support for ITD CPS Gen.2 serial RapidIO switches. |
25 | |||
26 | config RAPIDIO_RXS_GEN3 | ||
27 | tristate "IDT RXS Gen.3 SRIO switch support" | ||
28 | default n | ||
29 | ---help--- | ||
30 | Includes support for ITD RXS Gen.3 serial RapidIO switches. | ||
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index 051cc6b38188..6bdd54c4e733 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile | |||
@@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o | |||
6 | obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o | 6 | obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o |
7 | obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o | 7 | obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o |
8 | obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o | 8 | obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o |
9 | obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o | ||
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 9f7fe21580bb..e67b923b1ca6 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
@@ -436,10 +436,11 @@ static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
436 | RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); | 436 | RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); |
437 | } | 437 | } |
438 | 438 | ||
439 | spin_unlock(&rdev->rswitch->lock); | ||
440 | |||
439 | /* Create device-specific sysfs attributes */ | 441 | /* Create device-specific sysfs attributes */ |
440 | idtg2_sysfs(rdev, true); | 442 | idtg2_sysfs(rdev, true); |
441 | 443 | ||
442 | spin_unlock(&rdev->rswitch->lock); | ||
443 | return 0; | 444 | return 0; |
444 | } | 445 | } |
445 | 446 | ||
@@ -452,11 +453,9 @@ static void idtg2_remove(struct rio_dev *rdev) | |||
452 | return; | 453 | return; |
453 | } | 454 | } |
454 | rdev->rswitch->ops = NULL; | 455 | rdev->rswitch->ops = NULL; |
455 | 456 | spin_unlock(&rdev->rswitch->lock); | |
456 | /* Remove device-specific sysfs attributes */ | 457 | /* Remove device-specific sysfs attributes */ |
457 | idtg2_sysfs(rdev, false); | 458 | idtg2_sysfs(rdev, false); |
458 | |||
459 | spin_unlock(&rdev->rswitch->lock); | ||
460 | } | 459 | } |
461 | 460 | ||
462 | static struct rio_device_id idtg2_id_table[] = { | 461 | static struct rio_device_id idtg2_id_table[] = { |
diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c new file mode 100644 index 000000000000..c5923a547bed --- /dev/null +++ b/drivers/rapidio/switches/idt_gen3.c | |||
@@ -0,0 +1,382 @@ | |||
1 | /* | ||
2 | * IDT RXS Gen.3 Serial RapidIO switch family support | ||
3 | * | ||
4 | * Copyright 2016 Integrated Device Technology, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/stat.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/rio.h> | ||
15 | #include <linux/rio_drv.h> | ||
16 | #include <linux/rio_ids.h> | ||
17 | #include <linux/delay.h> | ||
18 | |||
19 | #include <asm/page.h> | ||
20 | #include "../rio.h" | ||
21 | |||
22 | #define RIO_EM_PW_STAT 0x40020 | ||
23 | #define RIO_PW_CTL 0x40204 | ||
24 | #define RIO_PW_CTL_PW_TMR 0xffffff00 | ||
25 | #define RIO_PW_ROUTE 0x40208 | ||
26 | |||
27 | #define RIO_EM_DEV_INT_EN 0x40030 | ||
28 | |||
29 | #define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100) | ||
30 | #define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000 | ||
31 | |||
32 | #define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100) | ||
33 | #define RIO_PLM_SPx_PW_EN_OK2U 0x40000000 | ||
34 | #define RIO_PLM_SPx_PW_EN_LINIT 0x10000000 | ||
35 | |||
36 | #define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4) | ||
37 | #define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \ | ||
38 | (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4) | ||
39 | |||
40 | static int | ||
41 | idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | ||
42 | u16 table, u16 route_destid, u8 route_port) | ||
43 | { | ||
44 | u32 rval; | ||
45 | u32 entry = route_port; | ||
46 | int err = 0; | ||
47 | |||
48 | pr_debug("RIO: %s t=0x%x did_%x to p_%x\n", | ||
49 | __func__, table, route_destid, entry); | ||
50 | |||
51 | if (route_destid > 0xFF) | ||
52 | return -EINVAL; | ||
53 | |||
54 | if (route_port == RIO_INVALID_ROUTE) | ||
55 | entry = RIO_RT_ENTRY_DROP_PKT; | ||
56 | |||
57 | if (table == RIO_GLOBAL_TABLE) { | ||
58 | /* Use broadcast register to update all per-port tables */ | ||
59 | err = rio_mport_write_config_32(mport, destid, hopcount, | ||
60 | RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid), | ||
61 | entry); | ||
62 | return err; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Verify that specified port/table number is valid | ||
67 | */ | ||
68 | err = rio_mport_read_config_32(mport, destid, hopcount, | ||
69 | RIO_SWP_INFO_CAR, &rval); | ||
70 | if (err) | ||
71 | return err; | ||
72 | |||
73 | if (table >= RIO_GET_TOTAL_PORTS(rval)) | ||
74 | return -EINVAL; | ||
75 | |||
76 | err = rio_mport_write_config_32(mport, destid, hopcount, | ||
77 | RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), | ||
78 | entry); | ||
79 | return err; | ||
80 | } | ||
81 | |||
82 | static int | ||
83 | idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | ||
84 | u16 table, u16 route_destid, u8 *route_port) | ||
85 | { | ||
86 | u32 rval; | ||
87 | int err; | ||
88 | |||
89 | if (route_destid > 0xFF) | ||
90 | return -EINVAL; | ||
91 | |||
92 | err = rio_mport_read_config_32(mport, destid, hopcount, | ||
93 | RIO_SWP_INFO_CAR, &rval); | ||
94 | if (err) | ||
95 | return err; | ||
96 | |||
97 | /* | ||
98 | * This switch device does not have the dedicated global routing table. | ||
99 | * It is substituted by reading routing table of the ingress port of | ||
100 | * maintenance read requests. | ||
101 | */ | ||
102 | if (table == RIO_GLOBAL_TABLE) | ||
103 | table = RIO_GET_PORT_NUM(rval); | ||
104 | else if (table >= RIO_GET_TOTAL_PORTS(rval)) | ||
105 | return -EINVAL; | ||
106 | |||
107 | err = rio_mport_read_config_32(mport, destid, hopcount, | ||
108 | RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), | ||
109 | &rval); | ||
110 | if (err) | ||
111 | return err; | ||
112 | |||
113 | if (rval == RIO_RT_ENTRY_DROP_PKT) | ||
114 | *route_port = RIO_INVALID_ROUTE; | ||
115 | else | ||
116 | *route_port = (u8)rval; | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int | ||
122 | idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, | ||
123 | u16 table) | ||
124 | { | ||
125 | u32 i; | ||
126 | u32 rval; | ||
127 | int err; | ||
128 | |||
129 | if (table == RIO_GLOBAL_TABLE) { | ||
130 | for (i = 0; i <= 0xff; i++) { | ||
131 | err = rio_mport_write_config_32(mport, destid, hopcount, | ||
132 | RIO_BC_L2_Gn_ENTRYx_CSR(0, i), | ||
133 | RIO_RT_ENTRY_DROP_PKT); | ||
134 | if (err) | ||
135 | break; | ||
136 | } | ||
137 | |||
138 | return err; | ||
139 | } | ||
140 | |||
141 | err = rio_mport_read_config_32(mport, destid, hopcount, | ||
142 | RIO_SWP_INFO_CAR, &rval); | ||
143 | if (err) | ||
144 | return err; | ||
145 | |||
146 | if (table >= RIO_GET_TOTAL_PORTS(rval)) | ||
147 | return -EINVAL; | ||
148 | |||
149 | for (i = 0; i <= 0xff; i++) { | ||
150 | err = rio_mport_write_config_32(mport, destid, hopcount, | ||
151 | RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i), | ||
152 | RIO_RT_ENTRY_DROP_PKT); | ||
153 | if (err) | ||
154 | break; | ||
155 | } | ||
156 | |||
157 | return err; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * This routine performs device-specific initialization only. | ||
162 | * All standard EM configuration should be performed at upper level. | ||
163 | */ | ||
164 | static int | ||
165 | idtg3_em_init(struct rio_dev *rdev) | ||
166 | { | ||
167 | int i, tmp; | ||
168 | u32 rval; | ||
169 | |||
170 | pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); | ||
171 | |||
172 | /* Disable assertion of interrupt signal */ | ||
173 | rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0); | ||
174 | |||
175 | /* Disable port-write event notifications during initialization */ | ||
176 | rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, | ||
177 | RIO_EM_PW_TX_CTRL_PW_DIS); | ||
178 | |||
179 | /* Configure Port-Write notifications for hot-swap events */ | ||
180 | tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); | ||
181 | for (i = 0; i < tmp; i++) { | ||
182 | |||
183 | rio_read_config_32(rdev, | ||
184 | RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i), | ||
185 | &rval); | ||
186 | if (rval & RIO_PORT_N_ERR_STS_PORT_UA) | ||
187 | continue; | ||
188 | |||
189 | /* Clear events signaled before enabling notification */ | ||
190 | rio_write_config_32(rdev, | ||
191 | rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0); | ||
192 | |||
193 | /* Enable event notifications */ | ||
194 | rio_write_config_32(rdev, | ||
195 | rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i), | ||
196 | RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK); | ||
197 | /* Enable port-write generation on events */ | ||
198 | rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i), | ||
199 | RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT); | ||
200 | |||
201 | } | ||
202 | |||
203 | /* Set Port-Write destination port */ | ||
204 | tmp = RIO_GET_PORT_NUM(rdev->swpinfo); | ||
205 | rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp); | ||
206 | |||
207 | |||
208 | /* Enable sending port-write event notifications */ | ||
209 | rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); | ||
210 | |||
211 | /* set TVAL = ~50us */ | ||
212 | rio_write_config_32(rdev, | ||
213 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | |||
218 | /* | ||
219 | * idtg3_em_handler - device-specific error handler | ||
220 | * | ||
221 | * If the link is down (PORT_UNINIT) does nothing - this is considered | ||
222 | * as link partner removal from the port. | ||
223 | * | ||
224 | * If the link is up (PORT_OK) - situation is handled as *new* device insertion. | ||
225 | * In this case ERR_STOP bits are cleared by issuing soft reset command to the | ||
226 | * reporting port. Inbound and outbound ackIDs are cleared by the reset as well. | ||
227 | * This way the port is synchronized with freshly inserted device (assuming it | ||
228 | * was reset/powered-up on insertion). | ||
229 | * | ||
230 | * TODO: This is not sufficient in a situation when a link between two devices | ||
231 | * was down and up again (e.g. cable disconnect). For that situation full ackID | ||
232 | * realignment process has to be implemented. | ||
233 | */ | ||
234 | static int | ||
235 | idtg3_em_handler(struct rio_dev *rdev, u8 pnum) | ||
236 | { | ||
237 | u32 err_status; | ||
238 | u32 rval; | ||
239 | |||
240 | rio_read_config_32(rdev, | ||
241 | RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), | ||
242 | &err_status); | ||
243 | |||
244 | /* Do nothing for device/link removal */ | ||
245 | if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) | ||
246 | return 0; | ||
247 | |||
248 | /* When link is OK we have a device insertion. | ||
249 | * Request port soft reset to clear errors if they present. | ||
250 | * Inbound and outbound ackIDs will be 0 after reset. | ||
251 | */ | ||
252 | if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | | ||
253 | RIO_PORT_N_ERR_STS_INP_ES)) { | ||
254 | rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval); | ||
255 | rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), | ||
256 | rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST); | ||
257 | udelay(10); | ||
258 | rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval); | ||
259 | msleep(500); | ||
260 | } | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static struct rio_switch_ops idtg3_switch_ops = { | ||
266 | .owner = THIS_MODULE, | ||
267 | .add_entry = idtg3_route_add_entry, | ||
268 | .get_entry = idtg3_route_get_entry, | ||
269 | .clr_table = idtg3_route_clr_table, | ||
270 | .em_init = idtg3_em_init, | ||
271 | .em_handle = idtg3_em_handler, | ||
272 | }; | ||
273 | |||
274 | static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id) | ||
275 | { | ||
276 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | ||
277 | |||
278 | spin_lock(&rdev->rswitch->lock); | ||
279 | |||
280 | if (rdev->rswitch->ops) { | ||
281 | spin_unlock(&rdev->rswitch->lock); | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | |||
285 | rdev->rswitch->ops = &idtg3_switch_ops; | ||
286 | |||
287 | if (rdev->do_enum) { | ||
288 | /* Disable hierarchical routing support: Existing fabric | ||
289 | * enumeration/discovery process (see rio-scan.c) uses 8-bit | ||
290 | * flat destination ID routing only. | ||
291 | */ | ||
292 | rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0); | ||
293 | } | ||
294 | |||
295 | spin_unlock(&rdev->rswitch->lock); | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static void idtg3_remove(struct rio_dev *rdev) | ||
301 | { | ||
302 | pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); | ||
303 | spin_lock(&rdev->rswitch->lock); | ||
304 | if (rdev->rswitch->ops == &idtg3_switch_ops) | ||
305 | rdev->rswitch->ops = NULL; | ||
306 | spin_unlock(&rdev->rswitch->lock); | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Gen3 switches repeat sending PW messages until a corresponding event flag | ||
311 | * is cleared. Use shutdown notification to disable generation of port-write | ||
312 | * messages if their destination node is shut down. | ||
313 | */ | ||
314 | static void idtg3_shutdown(struct rio_dev *rdev) | ||
315 | { | ||
316 | int i; | ||
317 | u32 rval; | ||
318 | u16 destid; | ||
319 | |||
320 | /* Currently the enumerator node acts also as PW handler */ | ||
321 | if (!rdev->do_enum) | ||
322 | return; | ||
323 | |||
324 | pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev)); | ||
325 | |||
326 | rio_read_config_32(rdev, RIO_PW_ROUTE, &rval); | ||
327 | i = RIO_GET_PORT_NUM(rdev->swpinfo); | ||
328 | |||
329 | /* Check port-write destination port */ | ||
330 | if (!((1 << i) & rval)) | ||
331 | return; | ||
332 | |||
333 | /* Disable sending port-write event notifications if PW destID | ||
334 | * matches to one of the enumerator node | ||
335 | */ | ||
336 | rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval); | ||
337 | |||
338 | if (rval & RIO_EM_PW_TGT_DEVID_DEV16) | ||
339 | destid = rval >> 16; | ||
340 | else | ||
341 | destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16); | ||
342 | |||
343 | if (rdev->net->hport->host_deviceid == destid) { | ||
344 | rio_write_config_32(rdev, | ||
345 | rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); | ||
346 | pr_debug("RIO: %s(%s) PW transmission disabled\n", | ||
347 | __func__, rio_name(rdev)); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | static struct rio_device_id idtg3_id_table[] = { | ||
352 | {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)}, | ||
353 | {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)}, | ||
354 | { 0, } /* terminate list */ | ||
355 | }; | ||
356 | |||
357 | static struct rio_driver idtg3_driver = { | ||
358 | .name = "idt_gen3", | ||
359 | .id_table = idtg3_id_table, | ||
360 | .probe = idtg3_probe, | ||
361 | .remove = idtg3_remove, | ||
362 | .shutdown = idtg3_shutdown, | ||
363 | }; | ||
364 | |||
365 | static int __init idtg3_init(void) | ||
366 | { | ||
367 | return rio_register_driver(&idtg3_driver); | ||
368 | } | ||
369 | |||
370 | static void __exit idtg3_exit(void) | ||
371 | { | ||
372 | pr_debug("RIO: %s\n", __func__); | ||
373 | rio_unregister_driver(&idtg3_driver); | ||
374 | pr_debug("RIO: %s done\n", __func__); | ||
375 | } | ||
376 | |||
377 | device_initcall(idtg3_init); | ||
378 | module_exit(idtg3_exit); | ||
379 | |||
380 | MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver"); | ||
381 | MODULE_AUTHOR("Integrated Device Technology, Inc."); | ||
382 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 42c8b014fe15..2700d15f7584 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c | |||
@@ -175,12 +175,10 @@ tsi57x_em_init(struct rio_dev *rdev) | |||
175 | 175 | ||
176 | /* Clear all pending interrupts */ | 176 | /* Clear all pending interrupts */ |
177 | rio_read_config_32(rdev, | 177 | rio_read_config_32(rdev, |
178 | rdev->phys_efptr + | 178 | RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), |
179 | RIO_PORT_N_ERR_STS_CSR(portnum), | ||
180 | ®val); | 179 | ®val); |
181 | rio_write_config_32(rdev, | 180 | rio_write_config_32(rdev, |
182 | rdev->phys_efptr + | 181 | RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), |
183 | RIO_PORT_N_ERR_STS_CSR(portnum), | ||
184 | regval & 0x07120214); | 182 | regval & 0x07120214); |
185 | 183 | ||
186 | rio_read_config_32(rdev, | 184 | rio_read_config_32(rdev, |
@@ -198,7 +196,7 @@ tsi57x_em_init(struct rio_dev *rdev) | |||
198 | 196 | ||
199 | /* Skip next (odd) port if the current port is in x4 mode */ | 197 | /* Skip next (odd) port if the current port is in x4 mode */ |
200 | rio_read_config_32(rdev, | 198 | rio_read_config_32(rdev, |
201 | rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), | 199 | RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), |
202 | ®val); | 200 | ®val); |
203 | if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) | 201 | if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) |
204 | portnum++; | 202 | portnum++; |
@@ -221,23 +219,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) | |||
221 | u32 regval; | 219 | u32 regval; |
222 | 220 | ||
223 | rio_read_config_32(rdev, | 221 | rio_read_config_32(rdev, |
224 | rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), | 222 | RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), |
225 | &err_status); | 223 | &err_status); |
226 | 224 | ||
227 | if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && | 225 | if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && |
228 | (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | | 226 | (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | |
229 | RIO_PORT_N_ERR_STS_PW_INP_ES))) { | 227 | RIO_PORT_N_ERR_STS_INP_ES))) { |
230 | /* Remove any queued packets by locking/unlocking port */ | 228 | /* Remove any queued packets by locking/unlocking port */ |
231 | rio_read_config_32(rdev, | 229 | rio_read_config_32(rdev, |
232 | rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), | 230 | RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), |
233 | ®val); | 231 | ®val); |
234 | if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { | 232 | if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { |
235 | rio_write_config_32(rdev, | 233 | rio_write_config_32(rdev, |
236 | rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), | 234 | RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), |
237 | regval | RIO_PORT_N_CTL_LOCKOUT); | 235 | regval | RIO_PORT_N_CTL_LOCKOUT); |
238 | udelay(50); | 236 | udelay(50); |
239 | rio_write_config_32(rdev, | 237 | rio_write_config_32(rdev, |
240 | rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), | 238 | RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), |
241 | regval); | 239 | regval); |
242 | } | 240 | } |
243 | 241 | ||
@@ -245,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) | |||
245 | * valid bit | 243 | * valid bit |
246 | */ | 244 | */ |
247 | rio_read_config_32(rdev, | 245 | rio_read_config_32(rdev, |
248 | rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), | 246 | RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum), |
249 | ®val); | 247 | ®val); |
250 | 248 | ||
251 | /* Send a Packet-Not-Accepted/Link-Request-Input-Status control | 249 | /* Send a Packet-Not-Accepted/Link-Request-Input-Status control |
@@ -259,8 +257,8 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) | |||
259 | while (checkcount--) { | 257 | while (checkcount--) { |
260 | udelay(50); | 258 | udelay(50); |
261 | rio_read_config_32(rdev, | 259 | rio_read_config_32(rdev, |
262 | rdev->phys_efptr + | 260 | RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, |
263 | RIO_PORT_N_MNT_RSP_CSR(portnum), | 261 | portnum), |
264 | ®val); | 262 | ®val); |
265 | if (regval & RIO_PORT_N_MNT_RSP_RVAL) | 263 | if (regval & RIO_PORT_N_MNT_RSP_RVAL) |
266 | goto exit_es; | 264 | goto exit_es; |
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c index 8fe41caac38e..e2d7d039ce3b 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.c +++ b/drivers/video/fbdev/bfin_adv7393fb.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * TODO: Code Cleanup | 10 | * TODO: Code Cleanup |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define DRIVER_NAME "bfin-adv7393" | ||
14 | |||
13 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | 15 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt |
14 | 16 | ||
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
diff --git a/drivers/video/fbdev/bfin_adv7393fb.h b/drivers/video/fbdev/bfin_adv7393fb.h index cd591b5152a5..afd0380e19e1 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.h +++ b/drivers/video/fbdev/bfin_adv7393fb.h | |||
@@ -59,8 +59,6 @@ enum { | |||
59 | BLANK_OFF, | 59 | BLANK_OFF, |
60 | }; | 60 | }; |
61 | 61 | ||
62 | #define DRIVER_NAME "bfin-adv7393" | ||
63 | |||
64 | struct adv7393fb_modes { | 62 | struct adv7393fb_modes { |
65 | const s8 name[25]; /* Full name */ | 63 | const s8 name[25]; /* Full name */ |
66 | u16 xres; /* Active Horizonzal Pixels */ | 64 | u16 xres; /* Active Horizonzal Pixels */ |
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c index 10fbfd8ab963..b6bc4a0bda2a 100644 --- a/drivers/video/logo/logo.c +++ b/drivers/video/logo/logo.c | |||
@@ -36,11 +36,11 @@ static int __init fb_logo_late_init(void) | |||
36 | 36 | ||
37 | late_initcall(fb_logo_late_init); | 37 | late_initcall(fb_logo_late_init); |
38 | 38 | ||
39 | /* logo's are marked __initdata. Use __init_refok to tell | 39 | /* logo's are marked __initdata. Use __ref to tell |
40 | * modpost that it is intended that this function uses data | 40 | * modpost that it is intended that this function uses data |
41 | * marked __initdata. | 41 | * marked __initdata. |
42 | */ | 42 | */ |
43 | const struct linux_logo * __init_refok fb_find_logo(int depth) | 43 | const struct linux_logo * __ref fb_find_logo(int depth) |
44 | { | 44 | { |
45 | const struct linux_logo *logo = NULL; | 45 | const struct linux_logo *logo = NULL; |
46 | 46 | ||
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index a2eec97d5064..bb09de633939 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) | |||
390 | goto out; | 390 | goto out; |
391 | } | 391 | } |
392 | 392 | ||
393 | hdq_data->hdq_irqstatus = 0; | ||
394 | |||
395 | if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { | 393 | if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { |
396 | hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, | 394 | hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, |
397 | OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, | 395 | OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, |
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c index d488961a8c90..51f2f66d6555 100644 --- a/drivers/w1/slaves/w1_ds2406.c +++ b/drivers/w1/slaves/w1_ds2406.c | |||
@@ -153,16 +153,4 @@ static struct w1_family w1_family_12 = { | |||
153 | .fid = W1_FAMILY_DS2406, | 153 | .fid = W1_FAMILY_DS2406, |
154 | .fops = &w1_f12_fops, | 154 | .fops = &w1_f12_fops, |
155 | }; | 155 | }; |
156 | 156 | module_w1_family(w1_family_12); | |
157 | static int __init w1_f12_init(void) | ||
158 | { | ||
159 | return w1_register_family(&w1_family_12); | ||
160 | } | ||
161 | |||
162 | static void __exit w1_f12_exit(void) | ||
163 | { | ||
164 | w1_unregister_family(&w1_family_12); | ||
165 | } | ||
166 | |||
167 | module_init(w1_f12_init); | ||
168 | module_exit(w1_f12_exit); | ||
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index 7dfa0e11688a..aec5958e66e9 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c | |||
@@ -351,16 +351,4 @@ static struct w1_family w1_family_29 = { | |||
351 | .fid = W1_FAMILY_DS2408, | 351 | .fid = W1_FAMILY_DS2408, |
352 | .fops = &w1_f29_fops, | 352 | .fops = &w1_f29_fops, |
353 | }; | 353 | }; |
354 | 354 | module_w1_family(w1_family_29); | |
355 | static int __init w1_f29_init(void) | ||
356 | { | ||
357 | return w1_register_family(&w1_family_29); | ||
358 | } | ||
359 | |||
360 | static void __exit w1_f29_exit(void) | ||
361 | { | ||
362 | w1_unregister_family(&w1_family_29); | ||
363 | } | ||
364 | |||
365 | module_init(w1_f29_init); | ||
366 | module_exit(w1_f29_exit); | ||
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c index ee28fc1ff390..f2e1c51533b9 100644 --- a/drivers/w1/slaves/w1_ds2413.c +++ b/drivers/w1/slaves/w1_ds2413.c | |||
@@ -135,16 +135,4 @@ static struct w1_family w1_family_3a = { | |||
135 | .fid = W1_FAMILY_DS2413, | 135 | .fid = W1_FAMILY_DS2413, |
136 | .fops = &w1_f3a_fops, | 136 | .fops = &w1_f3a_fops, |
137 | }; | 137 | }; |
138 | 138 | module_w1_family(w1_family_3a); | |
139 | static int __init w1_f3a_init(void) | ||
140 | { | ||
141 | return w1_register_family(&w1_family_3a); | ||
142 | } | ||
143 | |||
144 | static void __exit w1_f3a_exit(void) | ||
145 | { | ||
146 | w1_unregister_family(&w1_family_3a); | ||
147 | } | ||
148 | |||
149 | module_init(w1_f3a_init); | ||
150 | module_exit(w1_f3a_exit); | ||
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c index 7e41b7d91fb5..4ab54fd9dde2 100644 --- a/drivers/w1/slaves/w1_ds2423.c +++ b/drivers/w1/slaves/w1_ds2423.c | |||
@@ -138,19 +138,7 @@ static struct w1_family w1_family_1d = { | |||
138 | .fid = W1_COUNTER_DS2423, | 138 | .fid = W1_COUNTER_DS2423, |
139 | .fops = &w1_f1d_fops, | 139 | .fops = &w1_f1d_fops, |
140 | }; | 140 | }; |
141 | 141 | module_w1_family(w1_family_1d); | |
142 | static int __init w1_f1d_init(void) | ||
143 | { | ||
144 | return w1_register_family(&w1_family_1d); | ||
145 | } | ||
146 | |||
147 | static void __exit w1_f1d_exit(void) | ||
148 | { | ||
149 | w1_unregister_family(&w1_family_1d); | ||
150 | } | ||
151 | |||
152 | module_init(w1_f1d_init); | ||
153 | module_exit(w1_f1d_exit); | ||
154 | 142 | ||
155 | MODULE_LICENSE("GPL"); | 143 | MODULE_LICENSE("GPL"); |
156 | MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>"); | 144 | MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>"); |
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c index 9c4ff9d28adc..80572cb63ba8 100644 --- a/drivers/w1/slaves/w1_ds2431.c +++ b/drivers/w1/slaves/w1_ds2431.c | |||
@@ -288,19 +288,7 @@ static struct w1_family w1_family_2d = { | |||
288 | .fid = W1_EEPROM_DS2431, | 288 | .fid = W1_EEPROM_DS2431, |
289 | .fops = &w1_f2d_fops, | 289 | .fops = &w1_f2d_fops, |
290 | }; | 290 | }; |
291 | 291 | module_w1_family(w1_family_2d); | |
292 | static int __init w1_f2d_init(void) | ||
293 | { | ||
294 | return w1_register_family(&w1_family_2d); | ||
295 | } | ||
296 | |||
297 | static void __exit w1_f2d_fini(void) | ||
298 | { | ||
299 | w1_unregister_family(&w1_family_2d); | ||
300 | } | ||
301 | |||
302 | module_init(w1_f2d_init); | ||
303 | module_exit(w1_f2d_fini); | ||
304 | 292 | ||
305 | MODULE_LICENSE("GPL"); | 293 | MODULE_LICENSE("GPL"); |
306 | MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>"); | 294 | MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>"); |
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c index 72319a968a9e..6cf378c89ecb 100644 --- a/drivers/w1/slaves/w1_ds2433.c +++ b/drivers/w1/slaves/w1_ds2433.c | |||
@@ -305,16 +305,4 @@ static struct w1_family w1_family_23 = { | |||
305 | .fid = W1_EEPROM_DS2433, | 305 | .fid = W1_EEPROM_DS2433, |
306 | .fops = &w1_f23_fops, | 306 | .fops = &w1_f23_fops, |
307 | }; | 307 | }; |
308 | 308 | module_w1_family(w1_family_23); | |
309 | static int __init w1_f23_init(void) | ||
310 | { | ||
311 | return w1_register_family(&w1_family_23); | ||
312 | } | ||
313 | |||
314 | static void __exit w1_f23_fini(void) | ||
315 | { | ||
316 | w1_unregister_family(&w1_family_23); | ||
317 | } | ||
318 | |||
319 | module_init(w1_f23_init); | ||
320 | module_exit(w1_f23_fini); | ||
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c index d9079d48d112..ffa37f773b3b 100644 --- a/drivers/w1/slaves/w1_ds2760.c +++ b/drivers/w1/slaves/w1_ds2760.c | |||
@@ -121,25 +121,14 @@ static const struct attribute_group *w1_ds2760_groups[] = { | |||
121 | NULL, | 121 | NULL, |
122 | }; | 122 | }; |
123 | 123 | ||
124 | static DEFINE_IDA(bat_ida); | ||
125 | |||
126 | static int w1_ds2760_add_slave(struct w1_slave *sl) | 124 | static int w1_ds2760_add_slave(struct w1_slave *sl) |
127 | { | 125 | { |
128 | int ret; | 126 | int ret; |
129 | int id; | ||
130 | struct platform_device *pdev; | 127 | struct platform_device *pdev; |
131 | 128 | ||
132 | id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); | 129 | pdev = platform_device_alloc("ds2760-battery", PLATFORM_DEVID_AUTO); |
133 | if (id < 0) { | 130 | if (!pdev) |
134 | ret = id; | 131 | return -ENOMEM; |
135 | goto noid; | ||
136 | } | ||
137 | |||
138 | pdev = platform_device_alloc("ds2760-battery", id); | ||
139 | if (!pdev) { | ||
140 | ret = -ENOMEM; | ||
141 | goto pdev_alloc_failed; | ||
142 | } | ||
143 | pdev->dev.parent = &sl->dev; | 132 | pdev->dev.parent = &sl->dev; |
144 | 133 | ||
145 | ret = platform_device_add(pdev); | 134 | ret = platform_device_add(pdev); |
@@ -148,24 +137,19 @@ static int w1_ds2760_add_slave(struct w1_slave *sl) | |||
148 | 137 | ||
149 | dev_set_drvdata(&sl->dev, pdev); | 138 | dev_set_drvdata(&sl->dev, pdev); |
150 | 139 | ||
151 | goto success; | 140 | return 0; |
152 | 141 | ||
153 | pdev_add_failed: | 142 | pdev_add_failed: |
154 | platform_device_put(pdev); | 143 | platform_device_put(pdev); |
155 | pdev_alloc_failed: | 144 | |
156 | ida_simple_remove(&bat_ida, id); | ||
157 | noid: | ||
158 | success: | ||
159 | return ret; | 145 | return ret; |
160 | } | 146 | } |
161 | 147 | ||
162 | static void w1_ds2760_remove_slave(struct w1_slave *sl) | 148 | static void w1_ds2760_remove_slave(struct w1_slave *sl) |
163 | { | 149 | { |
164 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); | 150 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); |
165 | int id = pdev->id; | ||
166 | 151 | ||
167 | platform_device_unregister(pdev); | 152 | platform_device_unregister(pdev); |
168 | ida_simple_remove(&bat_ida, id); | ||
169 | } | 153 | } |
170 | 154 | ||
171 | static struct w1_family_ops w1_ds2760_fops = { | 155 | static struct w1_family_ops w1_ds2760_fops = { |
@@ -178,28 +162,13 @@ static struct w1_family w1_ds2760_family = { | |||
178 | .fid = W1_FAMILY_DS2760, | 162 | .fid = W1_FAMILY_DS2760, |
179 | .fops = &w1_ds2760_fops, | 163 | .fops = &w1_ds2760_fops, |
180 | }; | 164 | }; |
181 | 165 | module_w1_family(w1_ds2760_family); | |
182 | static int __init w1_ds2760_init(void) | ||
183 | { | ||
184 | pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n"); | ||
185 | ida_init(&bat_ida); | ||
186 | return w1_register_family(&w1_ds2760_family); | ||
187 | } | ||
188 | |||
189 | static void __exit w1_ds2760_exit(void) | ||
190 | { | ||
191 | w1_unregister_family(&w1_ds2760_family); | ||
192 | ida_destroy(&bat_ida); | ||
193 | } | ||
194 | 166 | ||
195 | EXPORT_SYMBOL(w1_ds2760_read); | 167 | EXPORT_SYMBOL(w1_ds2760_read); |
196 | EXPORT_SYMBOL(w1_ds2760_write); | 168 | EXPORT_SYMBOL(w1_ds2760_write); |
197 | EXPORT_SYMBOL(w1_ds2760_store_eeprom); | 169 | EXPORT_SYMBOL(w1_ds2760_store_eeprom); |
198 | EXPORT_SYMBOL(w1_ds2760_recall_eeprom); | 170 | EXPORT_SYMBOL(w1_ds2760_recall_eeprom); |
199 | 171 | ||
200 | module_init(w1_ds2760_init); | ||
201 | module_exit(w1_ds2760_exit); | ||
202 | |||
203 | MODULE_LICENSE("GPL"); | 172 | MODULE_LICENSE("GPL"); |
204 | MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>"); | 173 | MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>"); |
205 | MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip"); | 174 | MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip"); |
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c index 50e85f7929d4..f5c2aa429a92 100644 --- a/drivers/w1/slaves/w1_ds2780.c +++ b/drivers/w1/slaves/w1_ds2780.c | |||
@@ -113,25 +113,14 @@ static const struct attribute_group *w1_ds2780_groups[] = { | |||
113 | NULL, | 113 | NULL, |
114 | }; | 114 | }; |
115 | 115 | ||
116 | static DEFINE_IDA(bat_ida); | ||
117 | |||
118 | static int w1_ds2780_add_slave(struct w1_slave *sl) | 116 | static int w1_ds2780_add_slave(struct w1_slave *sl) |
119 | { | 117 | { |
120 | int ret; | 118 | int ret; |
121 | int id; | ||
122 | struct platform_device *pdev; | 119 | struct platform_device *pdev; |
123 | 120 | ||
124 | id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); | 121 | pdev = platform_device_alloc("ds2780-battery", PLATFORM_DEVID_AUTO); |
125 | if (id < 0) { | 122 | if (!pdev) |
126 | ret = id; | 123 | return -ENOMEM; |
127 | goto noid; | ||
128 | } | ||
129 | |||
130 | pdev = platform_device_alloc("ds2780-battery", id); | ||
131 | if (!pdev) { | ||
132 | ret = -ENOMEM; | ||
133 | goto pdev_alloc_failed; | ||
134 | } | ||
135 | pdev->dev.parent = &sl->dev; | 124 | pdev->dev.parent = &sl->dev; |
136 | 125 | ||
137 | ret = platform_device_add(pdev); | 126 | ret = platform_device_add(pdev); |
@@ -144,19 +133,15 @@ static int w1_ds2780_add_slave(struct w1_slave *sl) | |||
144 | 133 | ||
145 | pdev_add_failed: | 134 | pdev_add_failed: |
146 | platform_device_put(pdev); | 135 | platform_device_put(pdev); |
147 | pdev_alloc_failed: | 136 | |
148 | ida_simple_remove(&bat_ida, id); | ||
149 | noid: | ||
150 | return ret; | 137 | return ret; |
151 | } | 138 | } |
152 | 139 | ||
153 | static void w1_ds2780_remove_slave(struct w1_slave *sl) | 140 | static void w1_ds2780_remove_slave(struct w1_slave *sl) |
154 | { | 141 | { |
155 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); | 142 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); |
156 | int id = pdev->id; | ||
157 | 143 | ||
158 | platform_device_unregister(pdev); | 144 | platform_device_unregister(pdev); |
159 | ida_simple_remove(&bat_ida, id); | ||
160 | } | 145 | } |
161 | 146 | ||
162 | static struct w1_family_ops w1_ds2780_fops = { | 147 | static struct w1_family_ops w1_ds2780_fops = { |
@@ -169,21 +154,7 @@ static struct w1_family w1_ds2780_family = { | |||
169 | .fid = W1_FAMILY_DS2780, | 154 | .fid = W1_FAMILY_DS2780, |
170 | .fops = &w1_ds2780_fops, | 155 | .fops = &w1_ds2780_fops, |
171 | }; | 156 | }; |
172 | 157 | module_w1_family(w1_ds2780_family); | |
173 | static int __init w1_ds2780_init(void) | ||
174 | { | ||
175 | ida_init(&bat_ida); | ||
176 | return w1_register_family(&w1_ds2780_family); | ||
177 | } | ||
178 | |||
179 | static void __exit w1_ds2780_exit(void) | ||
180 | { | ||
181 | w1_unregister_family(&w1_ds2780_family); | ||
182 | ida_destroy(&bat_ida); | ||
183 | } | ||
184 | |||
185 | module_init(w1_ds2780_init); | ||
186 | module_exit(w1_ds2780_exit); | ||
187 | 158 | ||
188 | MODULE_LICENSE("GPL"); | 159 | MODULE_LICENSE("GPL"); |
189 | MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>"); | 160 | MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>"); |
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c index 1eb98fb1688d..9c03e014cf9e 100644 --- a/drivers/w1/slaves/w1_ds2781.c +++ b/drivers/w1/slaves/w1_ds2781.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/idr.h> | ||
21 | 20 | ||
22 | #include "../w1.h" | 21 | #include "../w1.h" |
23 | #include "../w1_int.h" | 22 | #include "../w1_int.h" |
@@ -111,25 +110,14 @@ static const struct attribute_group *w1_ds2781_groups[] = { | |||
111 | NULL, | 110 | NULL, |
112 | }; | 111 | }; |
113 | 112 | ||
114 | static DEFINE_IDA(bat_ida); | ||
115 | |||
116 | static int w1_ds2781_add_slave(struct w1_slave *sl) | 113 | static int w1_ds2781_add_slave(struct w1_slave *sl) |
117 | { | 114 | { |
118 | int ret; | 115 | int ret; |
119 | int id; | ||
120 | struct platform_device *pdev; | 116 | struct platform_device *pdev; |
121 | 117 | ||
122 | id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); | 118 | pdev = platform_device_alloc("ds2781-battery", PLATFORM_DEVID_AUTO); |
123 | if (id < 0) { | 119 | if (!pdev) |
124 | ret = id; | 120 | return -ENOMEM; |
125 | goto noid; | ||
126 | } | ||
127 | |||
128 | pdev = platform_device_alloc("ds2781-battery", id); | ||
129 | if (!pdev) { | ||
130 | ret = -ENOMEM; | ||
131 | goto pdev_alloc_failed; | ||
132 | } | ||
133 | pdev->dev.parent = &sl->dev; | 121 | pdev->dev.parent = &sl->dev; |
134 | 122 | ||
135 | ret = platform_device_add(pdev); | 123 | ret = platform_device_add(pdev); |
@@ -142,19 +130,15 @@ static int w1_ds2781_add_slave(struct w1_slave *sl) | |||
142 | 130 | ||
143 | pdev_add_failed: | 131 | pdev_add_failed: |
144 | platform_device_put(pdev); | 132 | platform_device_put(pdev); |
145 | pdev_alloc_failed: | 133 | |
146 | ida_simple_remove(&bat_ida, id); | ||
147 | noid: | ||
148 | return ret; | 134 | return ret; |
149 | } | 135 | } |
150 | 136 | ||
151 | static void w1_ds2781_remove_slave(struct w1_slave *sl) | 137 | static void w1_ds2781_remove_slave(struct w1_slave *sl) |
152 | { | 138 | { |
153 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); | 139 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); |
154 | int id = pdev->id; | ||
155 | 140 | ||
156 | platform_device_unregister(pdev); | 141 | platform_device_unregister(pdev); |
157 | ida_simple_remove(&bat_ida, id); | ||
158 | } | 142 | } |
159 | 143 | ||
160 | static struct w1_family_ops w1_ds2781_fops = { | 144 | static struct w1_family_ops w1_ds2781_fops = { |
@@ -167,21 +151,7 @@ static struct w1_family w1_ds2781_family = { | |||
167 | .fid = W1_FAMILY_DS2781, | 151 | .fid = W1_FAMILY_DS2781, |
168 | .fops = &w1_ds2781_fops, | 152 | .fops = &w1_ds2781_fops, |
169 | }; | 153 | }; |
170 | 154 | module_w1_family(w1_ds2781_family); | |
171 | static int __init w1_ds2781_init(void) | ||
172 | { | ||
173 | ida_init(&bat_ida); | ||
174 | return w1_register_family(&w1_ds2781_family); | ||
175 | } | ||
176 | |||
177 | static void __exit w1_ds2781_exit(void) | ||
178 | { | ||
179 | w1_unregister_family(&w1_ds2781_family); | ||
180 | ida_destroy(&bat_ida); | ||
181 | } | ||
182 | |||
183 | module_init(w1_ds2781_init); | ||
184 | module_exit(w1_ds2781_exit); | ||
185 | 155 | ||
186 | MODULE_LICENSE("GPL"); | 156 | MODULE_LICENSE("GPL"); |
187 | MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>"); | 157 | MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>"); |
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c index 365d6dff21de..5e348d38ec5c 100644 --- a/drivers/w1/slaves/w1_ds28e04.c +++ b/drivers/w1/slaves/w1_ds28e04.c | |||
@@ -427,16 +427,4 @@ static struct w1_family w1_family_1C = { | |||
427 | .fid = W1_FAMILY_DS28E04, | 427 | .fid = W1_FAMILY_DS28E04, |
428 | .fops = &w1_f1C_fops, | 428 | .fops = &w1_f1C_fops, |
429 | }; | 429 | }; |
430 | 430 | module_w1_family(w1_family_1C); | |
431 | static int __init w1_f1C_init(void) | ||
432 | { | ||
433 | return w1_register_family(&w1_family_1C); | ||
434 | } | ||
435 | |||
436 | static void __exit w1_f1C_fini(void) | ||
437 | { | ||
438 | w1_unregister_family(&w1_family_1C); | ||
439 | } | ||
440 | |||
441 | module_init(w1_f1C_init); | ||
442 | module_exit(w1_f1C_fini); | ||
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index ed5dcb80a1f7..10a7a0767187 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h | |||
@@ -88,4 +88,16 @@ struct w1_family * w1_family_registered(u8); | |||
88 | void w1_unregister_family(struct w1_family *); | 88 | void w1_unregister_family(struct w1_family *); |
89 | int w1_register_family(struct w1_family *); | 89 | int w1_register_family(struct w1_family *); |
90 | 90 | ||
91 | /** | ||
92 | * module_w1_driver() - Helper macro for registering a 1-Wire families | ||
93 | * @__w1_family: w1_family struct | ||
94 | * | ||
95 | * Helper macro for 1-Wire families which do not do anything special in module | ||
96 | * init/exit. This eliminates a lot of boilerplate. Each module may only | ||
97 | * use this macro once, and calling it replaces module_init() and module_exit() | ||
98 | */ | ||
99 | #define module_w1_family(__w1_family) \ | ||
100 | module_driver(__w1_family, w1_register_family, \ | ||
101 | w1_unregister_family) | ||
102 | |||
91 | #endif /* __W1_FAMILY_H */ | 103 | #endif /* __W1_FAMILY_H */ |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index a7a28110dc80..7f6aff3f72eb 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -605,28 +605,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |||
605 | * Do the same thing for the memory mapping - between | 605 | * Do the same thing for the memory mapping - between |
606 | * elf_bss and last_bss is the bss section. | 606 | * elf_bss and last_bss is the bss section. |
607 | */ | 607 | */ |
608 | k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; | 608 | k = load_addr + eppnt->p_vaddr + eppnt->p_memsz; |
609 | if (k > last_bss) | 609 | if (k > last_bss) |
610 | last_bss = k; | 610 | last_bss = k; |
611 | } | 611 | } |
612 | } | 612 | } |
613 | 613 | ||
614 | /* | ||
615 | * Now fill out the bss section: first pad the last page from | ||
616 | * the file up to the page boundary, and zero it from elf_bss | ||
617 | * up to the end of the page. | ||
618 | */ | ||
619 | if (padzero(elf_bss)) { | ||
620 | error = -EFAULT; | ||
621 | goto out; | ||
622 | } | ||
623 | /* | ||
624 | * Next, align both the file and mem bss up to the page size, | ||
625 | * since this is where elf_bss was just zeroed up to, and where | ||
626 | * last_bss will end after the vm_brk() below. | ||
627 | */ | ||
628 | elf_bss = ELF_PAGEALIGN(elf_bss); | ||
629 | last_bss = ELF_PAGEALIGN(last_bss); | ||
630 | /* Finally, if there is still more bss to allocate, do it. */ | ||
614 | if (last_bss > elf_bss) { | 631 | if (last_bss > elf_bss) { |
615 | /* | ||
616 | * Now fill out the bss section. First pad the last page up | ||
617 | * to the page boundary, and then perform a mmap to make sure | ||
618 | * that there are zero-mapped pages up to and including the | ||
619 | * last bss page. | ||
620 | */ | ||
621 | if (padzero(elf_bss)) { | ||
622 | error = -EFAULT; | ||
623 | goto out; | ||
624 | } | ||
625 | |||
626 | /* What we have mapped so far */ | ||
627 | elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); | ||
628 | |||
629 | /* Map the last of the bss segment */ | ||
630 | error = vm_brk(elf_bss, last_bss - elf_bss); | 632 | error = vm_brk(elf_bss, last_bss - elf_bss); |
631 | if (error) | 633 | if (error) |
632 | goto out; | 634 | goto out; |
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c index 490538536cb4..dd2d3f0cd55d 100644 --- a/fs/binfmt_em86.c +++ b/fs/binfmt_em86.c | |||
@@ -24,7 +24,8 @@ | |||
24 | 24 | ||
25 | static int load_em86(struct linux_binprm *bprm) | 25 | static int load_em86(struct linux_binprm *bprm) |
26 | { | 26 | { |
27 | char *interp, *i_name, *i_arg; | 27 | const char *i_name, *i_arg; |
28 | char *interp; | ||
28 | struct file * file; | 29 | struct file * file; |
29 | int retval; | 30 | int retval; |
30 | struct elfhdr elf_ex; | 31 | struct elfhdr elf_ex; |
@@ -866,7 +866,8 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size, | |||
866 | goto out; | 866 | goto out; |
867 | } | 867 | } |
868 | 868 | ||
869 | *buf = vmalloc(i_size); | 869 | if (id != READING_FIRMWARE_PREALLOC_BUFFER) |
870 | *buf = vmalloc(i_size); | ||
870 | if (!*buf) { | 871 | if (!*buf) { |
871 | ret = -ENOMEM; | 872 | ret = -ENOMEM; |
872 | goto out; | 873 | goto out; |
@@ -897,8 +898,10 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size, | |||
897 | 898 | ||
898 | out_free: | 899 | out_free: |
899 | if (ret < 0) { | 900 | if (ret < 0) { |
900 | vfree(*buf); | 901 | if (id != READING_FIRMWARE_PREALLOC_BUFFER) { |
901 | *buf = NULL; | 902 | vfree(*buf); |
903 | *buf = NULL; | ||
904 | } | ||
902 | } | 905 | } |
903 | 906 | ||
904 | out: | 907 | out: |
diff --git a/fs/inode.c b/fs/inode.c index 9cef4e16aeda..ad445542c285 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink); | |||
345 | void address_space_init_once(struct address_space *mapping) | 345 | void address_space_init_once(struct address_space *mapping) |
346 | { | 346 | { |
347 | memset(mapping, 0, sizeof(*mapping)); | 347 | memset(mapping, 0, sizeof(*mapping)); |
348 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | 348 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); |
349 | spin_lock_init(&mapping->tree_lock); | 349 | spin_lock_init(&mapping->tree_lock); |
350 | init_rwsem(&mapping->i_mmap_rwsem); | 350 | init_rwsem(&mapping->i_mmap_rwsem); |
351 | INIT_LIST_HEAD(&mapping->private_list); | 351 | INIT_LIST_HEAD(&mapping->private_list); |
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 1a85d94f5b25..2c90e285d7c6 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c | |||
@@ -622,10 +622,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, | |||
622 | lock = nilfs_mdt_bgl_lock(inode, group); | 622 | lock = nilfs_mdt_bgl_lock(inode, group); |
623 | 623 | ||
624 | if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) | 624 | if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) |
625 | nilfs_warning(inode->i_sb, __func__, | 625 | nilfs_msg(inode->i_sb, KERN_WARNING, |
626 | "entry number %llu already freed: ino=%lu", | 626 | "%s (ino=%lu): entry number %llu already freed", |
627 | (unsigned long long)req->pr_entry_nr, | 627 | __func__, inode->i_ino, |
628 | (unsigned long)inode->i_ino); | 628 | (unsigned long long)req->pr_entry_nr); |
629 | else | 629 | else |
630 | nilfs_palloc_group_desc_add_entries(desc, lock, 1); | 630 | nilfs_palloc_group_desc_add_entries(desc, lock, 1); |
631 | 631 | ||
@@ -663,10 +663,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, | |||
663 | lock = nilfs_mdt_bgl_lock(inode, group); | 663 | lock = nilfs_mdt_bgl_lock(inode, group); |
664 | 664 | ||
665 | if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) | 665 | if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) |
666 | nilfs_warning(inode->i_sb, __func__, | 666 | nilfs_msg(inode->i_sb, KERN_WARNING, |
667 | "entry number %llu already freed: ino=%lu", | 667 | "%s (ino=%lu): entry number %llu already freed", |
668 | (unsigned long long)req->pr_entry_nr, | 668 | __func__, inode->i_ino, |
669 | (unsigned long)inode->i_ino); | 669 | (unsigned long long)req->pr_entry_nr); |
670 | else | 670 | else |
671 | nilfs_palloc_group_desc_add_entries(desc, lock, 1); | 671 | nilfs_palloc_group_desc_add_entries(desc, lock, 1); |
672 | 672 | ||
@@ -772,10 +772,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) | |||
772 | do { | 772 | do { |
773 | if (!nilfs_clear_bit_atomic(lock, group_offset, | 773 | if (!nilfs_clear_bit_atomic(lock, group_offset, |
774 | bitmap)) { | 774 | bitmap)) { |
775 | nilfs_warning(inode->i_sb, __func__, | 775 | nilfs_msg(inode->i_sb, KERN_WARNING, |
776 | "entry number %llu already freed: ino=%lu", | 776 | "%s (ino=%lu): entry number %llu already freed", |
777 | (unsigned long long)entry_nrs[j], | 777 | __func__, inode->i_ino, |
778 | (unsigned long)inode->i_ino); | 778 | (unsigned long long)entry_nrs[j]); |
779 | } else { | 779 | } else { |
780 | n++; | 780 | n++; |
781 | } | 781 | } |
@@ -816,12 +816,11 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) | |||
816 | for (k = 0; k < nempties; k++) { | 816 | for (k = 0; k < nempties; k++) { |
817 | ret = nilfs_palloc_delete_entry_block(inode, | 817 | ret = nilfs_palloc_delete_entry_block(inode, |
818 | last_nrs[k]); | 818 | last_nrs[k]); |
819 | if (ret && ret != -ENOENT) { | 819 | if (ret && ret != -ENOENT) |
820 | nilfs_warning(inode->i_sb, __func__, | 820 | nilfs_msg(inode->i_sb, KERN_WARNING, |
821 | "failed to delete block of entry %llu: ino=%lu, err=%d", | 821 | "error %d deleting block that object (entry=%llu, ino=%lu) belongs to", |
822 | (unsigned long long)last_nrs[k], | 822 | ret, (unsigned long long)last_nrs[k], |
823 | (unsigned long)inode->i_ino, ret); | 823 | inode->i_ino); |
824 | } | ||
825 | } | 824 | } |
826 | 825 | ||
827 | desc_kaddr = kmap_atomic(desc_bh->b_page); | 826 | desc_kaddr = kmap_atomic(desc_bh->b_page); |
@@ -835,12 +834,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) | |||
835 | 834 | ||
836 | if (nfree == nilfs_palloc_entries_per_group(inode)) { | 835 | if (nfree == nilfs_palloc_entries_per_group(inode)) { |
837 | ret = nilfs_palloc_delete_bitmap_block(inode, group); | 836 | ret = nilfs_palloc_delete_bitmap_block(inode, group); |
838 | if (ret && ret != -ENOENT) { | 837 | if (ret && ret != -ENOENT) |
839 | nilfs_warning(inode->i_sb, __func__, | 838 | nilfs_msg(inode->i_sb, KERN_WARNING, |
840 | "failed to delete bitmap block of group %lu: ino=%lu, err=%d", | 839 | "error %d deleting bitmap block of group=%lu, ino=%lu", |
841 | group, | 840 | ret, group, inode->i_ino); |
842 | (unsigned long)inode->i_ino, ret); | ||
843 | } | ||
844 | } | 841 | } |
845 | } | 842 | } |
846 | return 0; | 843 | return 0; |
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index f2a7877e0c8c..01fb1831ca25 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c | |||
@@ -41,8 +41,8 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, | |||
41 | struct inode *inode = bmap->b_inode; | 41 | struct inode *inode = bmap->b_inode; |
42 | 42 | ||
43 | if (err == -EINVAL) { | 43 | if (err == -EINVAL) { |
44 | nilfs_error(inode->i_sb, fname, | 44 | __nilfs_error(inode->i_sb, fname, |
45 | "broken bmap (inode number=%lu)", inode->i_ino); | 45 | "broken bmap (inode number=%lu)", inode->i_ino); |
46 | err = -EIO; | 46 | err = -EIO; |
47 | } | 47 | } |
48 | return err; | 48 | return err; |
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index b6a4c8f93ac8..2b6ffbe5997a 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
24 | #include <linux/buffer_head.h> | 24 | #include <linux/buffer_head.h> |
25 | #include <linux/nilfs2_fs.h> | 25 | #include <linux/nilfs2_ondisk.h> /* nilfs_binfo, nilfs_inode, etc */ |
26 | #include "alloc.h" | 26 | #include "alloc.h" |
27 | #include "dat.h" | 27 | #include "dat.h" |
28 | 28 | ||
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 4cca998ec7a0..d5c23da43513 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -41,7 +41,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) | |||
41 | struct inode *inode = NILFS_BTNC_I(btnc); | 41 | struct inode *inode = NILFS_BTNC_I(btnc); |
42 | struct buffer_head *bh; | 42 | struct buffer_head *bh; |
43 | 43 | ||
44 | bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node); | 44 | bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); |
45 | if (unlikely(!bh)) | 45 | if (unlikely(!bh)) |
46 | return NULL; | 46 | return NULL; |
47 | 47 | ||
@@ -70,7 +70,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, | |||
70 | struct page *page; | 70 | struct page *page; |
71 | int err; | 71 | int err; |
72 | 72 | ||
73 | bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node); | 73 | bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); |
74 | if (unlikely(!bh)) | 74 | if (unlikely(!bh)) |
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | 76 | ||
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 982d1e3df3a5..2e315f9f2e51 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -339,12 +339,14 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, | |||
339 | * nilfs_btree_node_broken - verify consistency of btree node | 339 | * nilfs_btree_node_broken - verify consistency of btree node |
340 | * @node: btree node block to be examined | 340 | * @node: btree node block to be examined |
341 | * @size: node size (in bytes) | 341 | * @size: node size (in bytes) |
342 | * @inode: host inode of btree | ||
342 | * @blocknr: block number | 343 | * @blocknr: block number |
343 | * | 344 | * |
344 | * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. | 345 | * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. |
345 | */ | 346 | */ |
346 | static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, | 347 | static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, |
347 | size_t size, sector_t blocknr) | 348 | size_t size, struct inode *inode, |
349 | sector_t blocknr) | ||
348 | { | 350 | { |
349 | int level, flags, nchildren; | 351 | int level, flags, nchildren; |
350 | int ret = 0; | 352 | int ret = 0; |
@@ -358,9 +360,10 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, | |||
358 | (flags & NILFS_BTREE_NODE_ROOT) || | 360 | (flags & NILFS_BTREE_NODE_ROOT) || |
359 | nchildren < 0 || | 361 | nchildren < 0 || |
360 | nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { | 362 | nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { |
361 | printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): " | 363 | nilfs_msg(inode->i_sb, KERN_CRIT, |
362 | "level = %d, flags = 0x%x, nchildren = %d\n", | 364 | "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", |
363 | (unsigned long long)blocknr, level, flags, nchildren); | 365 | inode->i_ino, (unsigned long long)blocknr, level, |
366 | flags, nchildren); | ||
364 | ret = 1; | 367 | ret = 1; |
365 | } | 368 | } |
366 | return ret; | 369 | return ret; |
@@ -369,12 +372,12 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, | |||
369 | /** | 372 | /** |
370 | * nilfs_btree_root_broken - verify consistency of btree root node | 373 | * nilfs_btree_root_broken - verify consistency of btree root node |
371 | * @node: btree root node to be examined | 374 | * @node: btree root node to be examined |
372 | * @ino: inode number | 375 | * @inode: host inode of btree |
373 | * | 376 | * |
374 | * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. | 377 | * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. |
375 | */ | 378 | */ |
376 | static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, | 379 | static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, |
377 | unsigned long ino) | 380 | struct inode *inode) |
378 | { | 381 | { |
379 | int level, flags, nchildren; | 382 | int level, flags, nchildren; |
380 | int ret = 0; | 383 | int ret = 0; |
@@ -387,8 +390,9 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, | |||
387 | level >= NILFS_BTREE_LEVEL_MAX || | 390 | level >= NILFS_BTREE_LEVEL_MAX || |
388 | nchildren < 0 || | 391 | nchildren < 0 || |
389 | nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { | 392 | nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { |
390 | pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", | 393 | nilfs_msg(inode->i_sb, KERN_CRIT, |
391 | ino, level, flags, nchildren); | 394 | "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", |
395 | inode->i_ino, level, flags, nchildren); | ||
392 | ret = 1; | 396 | ret = 1; |
393 | } | 397 | } |
394 | return ret; | 398 | return ret; |
@@ -396,13 +400,15 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, | |||
396 | 400 | ||
397 | int nilfs_btree_broken_node_block(struct buffer_head *bh) | 401 | int nilfs_btree_broken_node_block(struct buffer_head *bh) |
398 | { | 402 | { |
403 | struct inode *inode; | ||
399 | int ret; | 404 | int ret; |
400 | 405 | ||
401 | if (buffer_nilfs_checked(bh)) | 406 | if (buffer_nilfs_checked(bh)) |
402 | return 0; | 407 | return 0; |
403 | 408 | ||
409 | inode = bh->b_page->mapping->host; | ||
404 | ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, | 410 | ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, |
405 | bh->b_size, bh->b_blocknr); | 411 | bh->b_size, inode, bh->b_blocknr); |
406 | if (likely(!ret)) | 412 | if (likely(!ret)) |
407 | set_buffer_nilfs_checked(bh); | 413 | set_buffer_nilfs_checked(bh); |
408 | return ret; | 414 | return ret; |
@@ -448,13 +454,15 @@ nilfs_btree_get_node(const struct nilfs_bmap *btree, | |||
448 | return node; | 454 | return node; |
449 | } | 455 | } |
450 | 456 | ||
451 | static int | 457 | static int nilfs_btree_bad_node(const struct nilfs_bmap *btree, |
452 | nilfs_btree_bad_node(struct nilfs_btree_node *node, int level) | 458 | struct nilfs_btree_node *node, int level) |
453 | { | 459 | { |
454 | if (unlikely(nilfs_btree_node_get_level(node) != level)) { | 460 | if (unlikely(nilfs_btree_node_get_level(node) != level)) { |
455 | dump_stack(); | 461 | dump_stack(); |
456 | printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n", | 462 | nilfs_msg(btree->b_inode->i_sb, KERN_CRIT, |
457 | nilfs_btree_node_get_level(node), level); | 463 | "btree level mismatch (ino=%lu): %d != %d", |
464 | btree->b_inode->i_ino, | ||
465 | nilfs_btree_node_get_level(node), level); | ||
458 | return 1; | 466 | return 1; |
459 | } | 467 | } |
460 | return 0; | 468 | return 0; |
@@ -509,6 +517,9 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, | |||
509 | 517 | ||
510 | out_no_wait: | 518 | out_no_wait: |
511 | if (!buffer_uptodate(bh)) { | 519 | if (!buffer_uptodate(bh)) { |
520 | nilfs_msg(btree->b_inode->i_sb, KERN_ERR, | ||
521 | "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)", | ||
522 | btree->b_inode->i_ino, (unsigned long long)ptr); | ||
512 | brelse(bh); | 523 | brelse(bh); |
513 | return -EIO; | 524 | return -EIO; |
514 | } | 525 | } |
@@ -568,7 +579,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, | |||
568 | return ret; | 579 | return ret; |
569 | 580 | ||
570 | node = nilfs_btree_get_nonroot_node(path, level); | 581 | node = nilfs_btree_get_nonroot_node(path, level); |
571 | if (nilfs_btree_bad_node(node, level)) | 582 | if (nilfs_btree_bad_node(btree, node, level)) |
572 | return -EINVAL; | 583 | return -EINVAL; |
573 | if (!found) | 584 | if (!found) |
574 | found = nilfs_btree_node_lookup(node, key, &index); | 585 | found = nilfs_btree_node_lookup(node, key, &index); |
@@ -616,7 +627,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, | |||
616 | if (ret < 0) | 627 | if (ret < 0) |
617 | return ret; | 628 | return ret; |
618 | node = nilfs_btree_get_nonroot_node(path, level); | 629 | node = nilfs_btree_get_nonroot_node(path, level); |
619 | if (nilfs_btree_bad_node(node, level)) | 630 | if (nilfs_btree_bad_node(btree, node, level)) |
620 | return -EINVAL; | 631 | return -EINVAL; |
621 | index = nilfs_btree_node_get_nchildren(node) - 1; | 632 | index = nilfs_btree_node_get_nchildren(node) - 1; |
622 | ptr = nilfs_btree_node_get_ptr(node, index, ncmax); | 633 | ptr = nilfs_btree_node_get_ptr(node, index, ncmax); |
@@ -2072,8 +2083,10 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree, | |||
2072 | ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); | 2083 | ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); |
2073 | if (ret < 0) { | 2084 | if (ret < 0) { |
2074 | if (unlikely(ret == -ENOENT)) | 2085 | if (unlikely(ret == -ENOENT)) |
2075 | printk(KERN_CRIT "%s: key = %llu, level == %d\n", | 2086 | nilfs_msg(btree->b_inode->i_sb, KERN_CRIT, |
2076 | __func__, (unsigned long long)key, level); | 2087 | "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d", |
2088 | btree->b_inode->i_ino, | ||
2089 | (unsigned long long)key, level); | ||
2077 | goto out; | 2090 | goto out; |
2078 | } | 2091 | } |
2079 | 2092 | ||
@@ -2110,12 +2123,11 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, | |||
2110 | if (level < NILFS_BTREE_LEVEL_NODE_MIN || | 2123 | if (level < NILFS_BTREE_LEVEL_NODE_MIN || |
2111 | level >= NILFS_BTREE_LEVEL_MAX) { | 2124 | level >= NILFS_BTREE_LEVEL_MAX) { |
2112 | dump_stack(); | 2125 | dump_stack(); |
2113 | printk(KERN_WARNING | 2126 | nilfs_msg(btree->b_inode->i_sb, KERN_WARNING, |
2114 | "%s: invalid btree level: %d (key=%llu, ino=%lu, " | 2127 | "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)", |
2115 | "blocknr=%llu)\n", | 2128 | level, (unsigned long long)key, |
2116 | __func__, level, (unsigned long long)key, | 2129 | btree->b_inode->i_ino, |
2117 | NILFS_BMAP_I(btree)->vfs_inode.i_ino, | 2130 | (unsigned long long)bh->b_blocknr); |
2118 | (unsigned long long)bh->b_blocknr); | ||
2119 | return; | 2131 | return; |
2120 | } | 2132 | } |
2121 | 2133 | ||
@@ -2394,8 +2406,7 @@ int nilfs_btree_init(struct nilfs_bmap *bmap) | |||
2394 | 2406 | ||
2395 | __nilfs_btree_init(bmap); | 2407 | __nilfs_btree_init(bmap); |
2396 | 2408 | ||
2397 | if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), | 2409 | if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode)) |
2398 | bmap->b_inode->i_ino)) | ||
2399 | ret = -EIO; | 2410 | ret = -EIO; |
2400 | return ret; | 2411 | return ret; |
2401 | } | 2412 | } |
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index df1a25faa83b..2184e47fa4bf 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/nilfs2_fs.h> | 25 | #include <linux/nilfs2_ondisk.h> /* nilfs_btree_node */ |
26 | #include "btnode.h" | 26 | #include "btnode.h" |
27 | #include "bmap.h" | 27 | #include "bmap.h" |
28 | 28 | ||
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index 8a3d3b65af3f..a15a1601e931 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/buffer_head.h> | 22 | #include <linux/buffer_head.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/nilfs2_fs.h> | ||
25 | #include "mdt.h" | 24 | #include "mdt.h" |
26 | #include "cpfile.h" | 25 | #include "cpfile.h" |
27 | 26 | ||
@@ -332,9 +331,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
332 | int ret, ncps, nicps, nss, count, i; | 331 | int ret, ncps, nicps, nss, count, i; |
333 | 332 | ||
334 | if (unlikely(start == 0 || start > end)) { | 333 | if (unlikely(start == 0 || start > end)) { |
335 | printk(KERN_ERR "%s: invalid range of checkpoint numbers: " | 334 | nilfs_msg(cpfile->i_sb, KERN_ERR, |
336 | "[%llu, %llu)\n", __func__, | 335 | "cannot delete checkpoints: invalid range [%llu, %llu)", |
337 | (unsigned long long)start, (unsigned long long)end); | 336 | (unsigned long long)start, (unsigned long long)end); |
338 | return -EINVAL; | 337 | return -EINVAL; |
339 | } | 338 | } |
340 | 339 | ||
@@ -386,9 +385,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
386 | cpfile, cno); | 385 | cpfile, cno); |
387 | if (ret == 0) | 386 | if (ret == 0) |
388 | continue; | 387 | continue; |
389 | printk(KERN_ERR | 388 | nilfs_msg(cpfile->i_sb, KERN_ERR, |
390 | "%s: cannot delete block\n", | 389 | "error %d deleting checkpoint block", |
391 | __func__); | 390 | ret); |
392 | break; | 391 | break; |
393 | } | 392 | } |
394 | } | 393 | } |
@@ -991,14 +990,12 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, | |||
991 | int err; | 990 | int err; |
992 | 991 | ||
993 | if (cpsize > sb->s_blocksize) { | 992 | if (cpsize > sb->s_blocksize) { |
994 | printk(KERN_ERR | 993 | nilfs_msg(sb, KERN_ERR, |
995 | "NILFS: too large checkpoint size: %zu bytes.\n", | 994 | "too large checkpoint size: %zu bytes", cpsize); |
996 | cpsize); | ||
997 | return -EINVAL; | 995 | return -EINVAL; |
998 | } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) { | 996 | } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) { |
999 | printk(KERN_ERR | 997 | nilfs_msg(sb, KERN_ERR, |
1000 | "NILFS: too small checkpoint size: %zu bytes.\n", | 998 | "too small checkpoint size: %zu bytes", cpsize); |
1001 | cpsize); | ||
1002 | return -EINVAL; | 999 | return -EINVAL; |
1003 | } | 1000 | } |
1004 | 1001 | ||
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h index 0249744ae234..6eca972f9673 100644 --- a/fs/nilfs2/cpfile.h +++ b/fs/nilfs2/cpfile.h | |||
@@ -21,7 +21,8 @@ | |||
21 | 21 | ||
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/nilfs2_fs.h> | 24 | #include <linux/nilfs2_api.h> /* nilfs_cpstat */ |
25 | #include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */ | ||
25 | 26 | ||
26 | 27 | ||
27 | int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int, | 28 | int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int, |
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 7367610ea807..dffedb2f8817 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
@@ -349,10 +349,11 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) | |||
349 | kaddr = kmap_atomic(entry_bh->b_page); | 349 | kaddr = kmap_atomic(entry_bh->b_page); |
350 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); | 350 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); |
351 | if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { | 351 | if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { |
352 | printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, | 352 | nilfs_msg(dat->i_sb, KERN_CRIT, |
353 | (unsigned long long)vblocknr, | 353 | "%s: invalid vblocknr = %llu, [%llu, %llu)", |
354 | (unsigned long long)le64_to_cpu(entry->de_start), | 354 | __func__, (unsigned long long)vblocknr, |
355 | (unsigned long long)le64_to_cpu(entry->de_end)); | 355 | (unsigned long long)le64_to_cpu(entry->de_start), |
356 | (unsigned long long)le64_to_cpu(entry->de_end)); | ||
356 | kunmap_atomic(kaddr); | 357 | kunmap_atomic(kaddr); |
357 | brelse(entry_bh); | 358 | brelse(entry_bh); |
358 | return -EINVAL; | 359 | return -EINVAL; |
@@ -479,14 +480,12 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, | |||
479 | int err; | 480 | int err; |
480 | 481 | ||
481 | if (entry_size > sb->s_blocksize) { | 482 | if (entry_size > sb->s_blocksize) { |
482 | printk(KERN_ERR | 483 | nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes", |
483 | "NILFS: too large DAT entry size: %zu bytes.\n", | 484 | entry_size); |
484 | entry_size); | ||
485 | return -EINVAL; | 485 | return -EINVAL; |
486 | } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) { | 486 | } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) { |
487 | printk(KERN_ERR | 487 | nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes", |
488 | "NILFS: too small DAT entry size: %zu bytes.\n", | 488 | entry_size); |
489 | entry_size); | ||
490 | return -EINVAL; | 489 | return -EINVAL; |
491 | } | 490 | } |
492 | 491 | ||
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h index abbfdabcabea..57dc6cf466d0 100644 --- a/fs/nilfs2/dat.h +++ b/fs/nilfs2/dat.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */ | ||
25 | 26 | ||
26 | 27 | ||
27 | struct nilfs_palloc_req; | 28 | struct nilfs_palloc_req; |
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index e506f4f7120a..908ebbf0ac7e 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c | |||
@@ -42,6 +42,28 @@ | |||
42 | #include "nilfs.h" | 42 | #include "nilfs.h" |
43 | #include "page.h" | 43 | #include "page.h" |
44 | 44 | ||
45 | static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) | ||
46 | { | ||
47 | unsigned int len = le16_to_cpu(dlen); | ||
48 | |||
49 | #if (PAGE_SIZE >= 65536) | ||
50 | if (len == NILFS_MAX_REC_LEN) | ||
51 | return 1 << 16; | ||
52 | #endif | ||
53 | return len; | ||
54 | } | ||
55 | |||
56 | static inline __le16 nilfs_rec_len_to_disk(unsigned int len) | ||
57 | { | ||
58 | #if (PAGE_SIZE >= 65536) | ||
59 | if (len == (1 << 16)) | ||
60 | return cpu_to_le16(NILFS_MAX_REC_LEN); | ||
61 | |||
62 | BUG_ON(len > (1 << 16)); | ||
63 | #endif | ||
64 | return cpu_to_le16(len); | ||
65 | } | ||
66 | |||
45 | /* | 67 | /* |
46 | * nilfs uses block-sized chunks. Arguably, sector-sized ones would be | 68 | * nilfs uses block-sized chunks. Arguably, sector-sized ones would be |
47 | * more robust, but we have what we have | 69 | * more robust, but we have what we have |
@@ -140,10 +162,9 @@ out: | |||
140 | /* Too bad, we had an error */ | 162 | /* Too bad, we had an error */ |
141 | 163 | ||
142 | Ebadsize: | 164 | Ebadsize: |
143 | nilfs_error(sb, "nilfs_check_page", | 165 | nilfs_error(sb, |
144 | "size of directory #%lu is not a multiple of chunk size", | 166 | "size of directory #%lu is not a multiple of chunk size", |
145 | dir->i_ino | 167 | dir->i_ino); |
146 | ); | ||
147 | goto fail; | 168 | goto fail; |
148 | Eshort: | 169 | Eshort: |
149 | error = "rec_len is smaller than minimal"; | 170 | error = "rec_len is smaller than minimal"; |
@@ -157,19 +178,18 @@ Enamelen: | |||
157 | Espan: | 178 | Espan: |
158 | error = "directory entry across blocks"; | 179 | error = "directory entry across blocks"; |
159 | bad_entry: | 180 | bad_entry: |
160 | nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - " | 181 | nilfs_error(sb, |
161 | "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", | 182 | "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d", |
162 | dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, | 183 | dir->i_ino, error, (page->index << PAGE_SHIFT) + offs, |
163 | (unsigned long) le64_to_cpu(p->inode), | 184 | (unsigned long)le64_to_cpu(p->inode), |
164 | rec_len, p->name_len); | 185 | rec_len, p->name_len); |
165 | goto fail; | 186 | goto fail; |
166 | Eend: | 187 | Eend: |
167 | p = (struct nilfs_dir_entry *)(kaddr + offs); | 188 | p = (struct nilfs_dir_entry *)(kaddr + offs); |
168 | nilfs_error(sb, "nilfs_check_page", | 189 | nilfs_error(sb, |
169 | "entry in directory #%lu spans the page boundary" | 190 | "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu", |
170 | "offset=%lu, inode=%lu", | 191 | dir->i_ino, (page->index << PAGE_SHIFT) + offs, |
171 | dir->i_ino, (page->index<<PAGE_SHIFT)+offs, | 192 | (unsigned long)le64_to_cpu(p->inode)); |
172 | (unsigned long) le64_to_cpu(p->inode)); | ||
173 | fail: | 193 | fail: |
174 | SetPageError(page); | 194 | SetPageError(page); |
175 | return false; | 195 | return false; |
@@ -267,8 +287,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) | |||
267 | struct page *page = nilfs_get_page(inode, n); | 287 | struct page *page = nilfs_get_page(inode, n); |
268 | 288 | ||
269 | if (IS_ERR(page)) { | 289 | if (IS_ERR(page)) { |
270 | nilfs_error(sb, __func__, "bad page in #%lu", | 290 | nilfs_error(sb, "bad page in #%lu", inode->i_ino); |
271 | inode->i_ino); | ||
272 | ctx->pos += PAGE_SIZE - offset; | 291 | ctx->pos += PAGE_SIZE - offset; |
273 | return -EIO; | 292 | return -EIO; |
274 | } | 293 | } |
@@ -278,8 +297,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) | |||
278 | NILFS_DIR_REC_LEN(1); | 297 | NILFS_DIR_REC_LEN(1); |
279 | for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) { | 298 | for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) { |
280 | if (de->rec_len == 0) { | 299 | if (de->rec_len == 0) { |
281 | nilfs_error(sb, __func__, | 300 | nilfs_error(sb, "zero-length directory entry"); |
282 | "zero-length directory entry"); | ||
283 | nilfs_put_page(page); | 301 | nilfs_put_page(page); |
284 | return -EIO; | 302 | return -EIO; |
285 | } | 303 | } |
@@ -345,7 +363,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, | |||
345 | kaddr += nilfs_last_byte(dir, n) - reclen; | 363 | kaddr += nilfs_last_byte(dir, n) - reclen; |
346 | while ((char *) de <= kaddr) { | 364 | while ((char *) de <= kaddr) { |
347 | if (de->rec_len == 0) { | 365 | if (de->rec_len == 0) { |
348 | nilfs_error(dir->i_sb, __func__, | 366 | nilfs_error(dir->i_sb, |
349 | "zero-length directory entry"); | 367 | "zero-length directory entry"); |
350 | nilfs_put_page(page); | 368 | nilfs_put_page(page); |
351 | goto out; | 369 | goto out; |
@@ -360,7 +378,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, | |||
360 | n = 0; | 378 | n = 0; |
361 | /* next page is past the blocks we've got */ | 379 | /* next page is past the blocks we've got */ |
362 | if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { | 380 | if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { |
363 | nilfs_error(dir->i_sb, __func__, | 381 | nilfs_error(dir->i_sb, |
364 | "dir %lu size %lld exceeds block count %llu", | 382 | "dir %lu size %lld exceeds block count %llu", |
365 | dir->i_ino, dir->i_size, | 383 | dir->i_ino, dir->i_size, |
366 | (unsigned long long)dir->i_blocks); | 384 | (unsigned long long)dir->i_blocks); |
@@ -469,7 +487,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) | |||
469 | goto got_it; | 487 | goto got_it; |
470 | } | 488 | } |
471 | if (de->rec_len == 0) { | 489 | if (de->rec_len == 0) { |
472 | nilfs_error(dir->i_sb, __func__, | 490 | nilfs_error(dir->i_sb, |
473 | "zero-length directory entry"); | 491 | "zero-length directory entry"); |
474 | err = -EIO; | 492 | err = -EIO; |
475 | goto out_unlock; | 493 | goto out_unlock; |
@@ -541,7 +559,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) | |||
541 | 559 | ||
542 | while ((char *)de < (char *)dir) { | 560 | while ((char *)de < (char *)dir) { |
543 | if (de->rec_len == 0) { | 561 | if (de->rec_len == 0) { |
544 | nilfs_error(inode->i_sb, __func__, | 562 | nilfs_error(inode->i_sb, |
545 | "zero-length directory entry"); | 563 | "zero-length directory entry"); |
546 | err = -EIO; | 564 | err = -EIO; |
547 | goto out; | 565 | goto out; |
@@ -628,7 +646,7 @@ int nilfs_empty_dir(struct inode *inode) | |||
628 | 646 | ||
629 | while ((char *)de <= kaddr) { | 647 | while ((char *)de <= kaddr) { |
630 | if (de->rec_len == 0) { | 648 | if (de->rec_len == 0) { |
631 | nilfs_error(inode->i_sb, __func__, | 649 | nilfs_error(inode->i_sb, |
632 | "zero-length directory entry (kaddr=%p, de=%p)", | 650 | "zero-length directory entry (kaddr=%p, de=%p)", |
633 | kaddr, de); | 651 | kaddr, de); |
634 | goto not_empty; | 652 | goto not_empty; |
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 251a44928405..96e3ed0d9652 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c | |||
@@ -337,14 +337,16 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap, | |||
337 | 337 | ||
338 | key = nilfs_bmap_data_get_key(bmap, *bh); | 338 | key = nilfs_bmap_data_get_key(bmap, *bh); |
339 | if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { | 339 | if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { |
340 | printk(KERN_CRIT "%s: invalid key: %llu\n", __func__, | 340 | nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT, |
341 | (unsigned long long)key); | 341 | "%s (ino=%lu): invalid key: %llu", __func__, |
342 | bmap->b_inode->i_ino, (unsigned long long)key); | ||
342 | return -EINVAL; | 343 | return -EINVAL; |
343 | } | 344 | } |
344 | ptr = nilfs_direct_get_ptr(bmap, key); | 345 | ptr = nilfs_direct_get_ptr(bmap, key); |
345 | if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { | 346 | if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { |
346 | printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__, | 347 | nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT, |
347 | (unsigned long long)ptr); | 348 | "%s (ino=%lu): invalid pointer: %llu", __func__, |
349 | bmap->b_inode->i_ino, (unsigned long long)ptr); | ||
348 | return -EINVAL; | 350 | return -EINVAL; |
349 | } | 351 | } |
350 | 352 | ||
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h index 3015a6e78724..cfe85e848bba 100644 --- a/fs/nilfs2/direct.h +++ b/fs/nilfs2/direct.h | |||
@@ -24,16 +24,6 @@ | |||
24 | #include "bmap.h" | 24 | #include "bmap.h" |
25 | 25 | ||
26 | 26 | ||
27 | /** | ||
28 | * struct nilfs_direct_node - direct node | ||
29 | * @dn_flags: flags | ||
30 | * @dn_pad: padding | ||
31 | */ | ||
32 | struct nilfs_direct_node { | ||
33 | __u8 dn_flags; | ||
34 | __u8 pad[7]; | ||
35 | }; | ||
36 | |||
37 | #define NILFS_DIRECT_NBLOCKS (NILFS_BMAP_SIZE / sizeof(__le64) - 1) | 27 | #define NILFS_DIRECT_NBLOCKS (NILFS_BMAP_SIZE / sizeof(__le64) - 1) |
38 | #define NILFS_DIRECT_KEY_MIN 0 | 28 | #define NILFS_DIRECT_KEY_MIN 0 |
39 | #define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1) | 29 | #define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1) |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index e9148f94d696..853a831dcde0 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -148,8 +148,15 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, | |||
148 | int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) | 148 | int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) |
149 | { | 149 | { |
150 | wait_on_buffer(bh); | 150 | wait_on_buffer(bh); |
151 | if (!buffer_uptodate(bh)) | 151 | if (!buffer_uptodate(bh)) { |
152 | struct inode *inode = bh->b_page->mapping->host; | ||
153 | |||
154 | nilfs_msg(inode->i_sb, KERN_ERR, | ||
155 | "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)", | ||
156 | buffer_nilfs_node(bh) ? "node" : "data", | ||
157 | inode->i_ino, (unsigned long long)bh->b_blocknr); | ||
152 | return -EIO; | 158 | return -EIO; |
159 | } | ||
153 | if (buffer_dirty(bh)) | 160 | if (buffer_dirty(bh)) |
154 | return -EEXIST; | 161 | return -EEXIST; |
155 | 162 | ||
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index 1d2b1805327a..b8fa45c20c63 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c | |||
@@ -145,15 +145,14 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, | |||
145 | int err; | 145 | int err; |
146 | 146 | ||
147 | if (unlikely(!NILFS_VALID_INODE(sb, ino))) { | 147 | if (unlikely(!NILFS_VALID_INODE(sb, ino))) { |
148 | nilfs_error(sb, __func__, "bad inode number: %lu", | 148 | nilfs_error(sb, "bad inode number: %lu", (unsigned long)ino); |
149 | (unsigned long) ino); | ||
150 | return -EINVAL; | 149 | return -EINVAL; |
151 | } | 150 | } |
152 | 151 | ||
153 | err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh); | 152 | err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh); |
154 | if (unlikely(err)) | 153 | if (unlikely(err)) |
155 | nilfs_warning(sb, __func__, "unable to read inode: %lu", | 154 | nilfs_msg(sb, KERN_WARNING, "error %d reading inode: ino=%lu", |
156 | (unsigned long) ino); | 155 | err, (unsigned long)ino); |
157 | return err; | 156 | return err; |
158 | } | 157 | } |
159 | 158 | ||
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h index 23ad2f091e76..188b94fe0ec5 100644 --- a/fs/nilfs2/ifile.h +++ b/fs/nilfs2/ifile.h | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/buffer_head.h> | 25 | #include <linux/buffer_head.h> |
26 | #include <linux/nilfs2_fs.h> | ||
27 | #include "mdt.h" | 26 | #include "mdt.h" |
28 | #include "alloc.h" | 27 | #include "alloc.h" |
29 | 28 | ||
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index a0ebdb17e912..af04f553d7c9 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -112,13 +112,10 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, | |||
112 | * However, the page having this block must | 112 | * However, the page having this block must |
113 | * be locked in this case. | 113 | * be locked in this case. |
114 | */ | 114 | */ |
115 | printk(KERN_WARNING | 115 | nilfs_msg(inode->i_sb, KERN_WARNING, |
116 | "nilfs_get_block: a race condition " | 116 | "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", |
117 | "while inserting a data block. " | 117 | __func__, inode->i_ino, |
118 | "(inode number=%lu, file block " | 118 | (unsigned long long)blkoff); |
119 | "offset=%llu)\n", | ||
120 | inode->i_ino, | ||
121 | (unsigned long long)blkoff); | ||
122 | err = 0; | 119 | err = 0; |
123 | } | 120 | } |
124 | nilfs_transaction_abort(inode->i_sb); | 121 | nilfs_transaction_abort(inode->i_sb); |
@@ -359,7 +356,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) | |||
359 | 356 | ||
360 | root = NILFS_I(dir)->i_root; | 357 | root = NILFS_I(dir)->i_root; |
361 | ii = NILFS_I(inode); | 358 | ii = NILFS_I(inode); |
362 | ii->i_state = 1 << NILFS_I_NEW; | 359 | ii->i_state = BIT(NILFS_I_NEW); |
363 | ii->i_root = root; | 360 | ii->i_root = root; |
364 | 361 | ||
365 | err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); | 362 | err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); |
@@ -558,7 +555,7 @@ static int nilfs_iget_set(struct inode *inode, void *opaque) | |||
558 | 555 | ||
559 | inode->i_ino = args->ino; | 556 | inode->i_ino = args->ino; |
560 | if (args->for_gc) { | 557 | if (args->for_gc) { |
561 | NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE; | 558 | NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); |
562 | NILFS_I(inode)->i_cno = args->cno; | 559 | NILFS_I(inode)->i_cno = args->cno; |
563 | NILFS_I(inode)->i_root = NULL; | 560 | NILFS_I(inode)->i_root = NULL; |
564 | } else { | 561 | } else { |
@@ -726,9 +723,9 @@ repeat: | |||
726 | goto repeat; | 723 | goto repeat; |
727 | 724 | ||
728 | failed: | 725 | failed: |
729 | nilfs_warning(ii->vfs_inode.i_sb, __func__, | 726 | nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING, |
730 | "failed to truncate bmap (ino=%lu, err=%d)", | 727 | "error %d truncating bmap (ino=%lu)", ret, |
731 | ii->vfs_inode.i_ino, ret); | 728 | ii->vfs_inode.i_ino); |
732 | } | 729 | } |
733 | 730 | ||
734 | void nilfs_truncate(struct inode *inode) | 731 | void nilfs_truncate(struct inode *inode) |
@@ -939,9 +936,9 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) | |||
939 | * This will happen when somebody is freeing | 936 | * This will happen when somebody is freeing |
940 | * this inode. | 937 | * this inode. |
941 | */ | 938 | */ |
942 | nilfs_warning(inode->i_sb, __func__, | 939 | nilfs_msg(inode->i_sb, KERN_WARNING, |
943 | "cannot get inode (ino=%lu)", | 940 | "cannot set file dirty (ino=%lu): the file is being freed", |
944 | inode->i_ino); | 941 | inode->i_ino); |
945 | spin_unlock(&nilfs->ns_inode_lock); | 942 | spin_unlock(&nilfs->ns_inode_lock); |
946 | return -EINVAL; /* | 943 | return -EINVAL; /* |
947 | * NILFS_I_DIRTY may remain for | 944 | * NILFS_I_DIRTY may remain for |
@@ -962,8 +959,9 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags) | |||
962 | 959 | ||
963 | err = nilfs_load_inode_block(inode, &ibh); | 960 | err = nilfs_load_inode_block(inode, &ibh); |
964 | if (unlikely(err)) { | 961 | if (unlikely(err)) { |
965 | nilfs_warning(inode->i_sb, __func__, | 962 | nilfs_msg(inode->i_sb, KERN_WARNING, |
966 | "failed to reget inode block."); | 963 | "cannot mark inode dirty (ino=%lu): error %d loading inode block", |
964 | inode->i_ino, err); | ||
967 | return err; | 965 | return err; |
968 | } | 966 | } |
969 | nilfs_update_inode(inode, ibh, flags); | 967 | nilfs_update_inode(inode, ibh, flags); |
@@ -989,8 +987,8 @@ void nilfs_dirty_inode(struct inode *inode, int flags) | |||
989 | struct nilfs_mdt_info *mdi = NILFS_MDT(inode); | 987 | struct nilfs_mdt_info *mdi = NILFS_MDT(inode); |
990 | 988 | ||
991 | if (is_bad_inode(inode)) { | 989 | if (is_bad_inode(inode)) { |
992 | nilfs_warning(inode->i_sb, __func__, | 990 | nilfs_msg(inode->i_sb, KERN_WARNING, |
993 | "tried to mark bad_inode dirty. ignored."); | 991 | "tried to mark bad_inode dirty. ignored."); |
994 | dump_stack(); | 992 | dump_stack(); |
995 | return; | 993 | return; |
996 | } | 994 | } |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 358b57e2cdf9..f1d7989459fd 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/compat.h> /* compat_ptr() */ | 25 | #include <linux/compat.h> /* compat_ptr() */ |
26 | #include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */ | 26 | #include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */ |
27 | #include <linux/buffer_head.h> | 27 | #include <linux/buffer_head.h> |
28 | #include <linux/nilfs2_fs.h> | ||
29 | #include "nilfs.h" | 28 | #include "nilfs.h" |
30 | #include "segment.h" | 29 | #include "segment.h" |
31 | #include "bmap.h" | 30 | #include "bmap.h" |
@@ -584,27 +583,25 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, | |||
584 | 583 | ||
585 | if (unlikely(ret < 0)) { | 584 | if (unlikely(ret < 0)) { |
586 | if (ret == -ENOENT) | 585 | if (ret == -ENOENT) |
587 | printk(KERN_CRIT | 586 | nilfs_msg(inode->i_sb, KERN_CRIT, |
588 | "%s: invalid virtual block address (%s): " | 587 | "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", |
589 | "ino=%llu, cno=%llu, offset=%llu, " | 588 | __func__, vdesc->vd_flags ? "node" : "data", |
590 | "blocknr=%llu, vblocknr=%llu\n", | 589 | (unsigned long long)vdesc->vd_ino, |
591 | __func__, vdesc->vd_flags ? "node" : "data", | 590 | (unsigned long long)vdesc->vd_cno, |
592 | (unsigned long long)vdesc->vd_ino, | 591 | (unsigned long long)vdesc->vd_offset, |
593 | (unsigned long long)vdesc->vd_cno, | 592 | (unsigned long long)vdesc->vd_blocknr, |
594 | (unsigned long long)vdesc->vd_offset, | 593 | (unsigned long long)vdesc->vd_vblocknr); |
595 | (unsigned long long)vdesc->vd_blocknr, | ||
596 | (unsigned long long)vdesc->vd_vblocknr); | ||
597 | return ret; | 594 | return ret; |
598 | } | 595 | } |
599 | if (unlikely(!list_empty(&bh->b_assoc_buffers))) { | 596 | if (unlikely(!list_empty(&bh->b_assoc_buffers))) { |
600 | printk(KERN_CRIT "%s: conflicting %s buffer: ino=%llu, " | 597 | nilfs_msg(inode->i_sb, KERN_CRIT, |
601 | "cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu\n", | 598 | "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", |
602 | __func__, vdesc->vd_flags ? "node" : "data", | 599 | __func__, vdesc->vd_flags ? "node" : "data", |
603 | (unsigned long long)vdesc->vd_ino, | 600 | (unsigned long long)vdesc->vd_ino, |
604 | (unsigned long long)vdesc->vd_cno, | 601 | (unsigned long long)vdesc->vd_cno, |
605 | (unsigned long long)vdesc->vd_offset, | 602 | (unsigned long long)vdesc->vd_offset, |
606 | (unsigned long long)vdesc->vd_blocknr, | 603 | (unsigned long long)vdesc->vd_blocknr, |
607 | (unsigned long long)vdesc->vd_vblocknr); | 604 | (unsigned long long)vdesc->vd_vblocknr); |
608 | brelse(bh); | 605 | brelse(bh); |
609 | return -EEXIST; | 606 | return -EEXIST; |
610 | } | 607 | } |
@@ -854,8 +851,8 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, | |||
854 | return 0; | 851 | return 0; |
855 | 852 | ||
856 | failed: | 853 | failed: |
857 | printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n", | 854 | nilfs_msg(nilfs->ns_sb, KERN_ERR, "error %d preparing GC: %s", ret, |
858 | msg, ret); | 855 | msg); |
859 | return ret; | 856 | return ret; |
860 | } | 857 | } |
861 | 858 | ||
@@ -963,10 +960,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, | |||
963 | } | 960 | } |
964 | 961 | ||
965 | ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); | 962 | ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); |
966 | if (ret < 0) | 963 | if (ret < 0) { |
967 | printk(KERN_ERR "NILFS: GC failed during preparation: " | 964 | nilfs_msg(inode->i_sb, KERN_ERR, |
968 | "cannot read source blocks: err=%d\n", ret); | 965 | "error %d preparing GC: cannot read source blocks", |
969 | else { | 966 | ret); |
967 | } else { | ||
970 | if (nilfs_sb_need_update(nilfs)) | 968 | if (nilfs_sb_need_update(nilfs)) |
971 | set_nilfs_discontinued(nilfs); | 969 | set_nilfs_discontinued(nilfs); |
972 | ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); | 970 | ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 0d7b71fbeff8..d56d3a5bea88 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -207,8 +207,12 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, | |||
207 | 207 | ||
208 | out_no_wait: | 208 | out_no_wait: |
209 | err = -EIO; | 209 | err = -EIO; |
210 | if (!buffer_uptodate(first_bh)) | 210 | if (!buffer_uptodate(first_bh)) { |
211 | nilfs_msg(inode->i_sb, KERN_ERR, | ||
212 | "I/O error reading meta-data file (ino=%lu, block-offset=%lu)", | ||
213 | inode->i_ino, block); | ||
211 | goto failed_bh; | 214 | goto failed_bh; |
215 | } | ||
212 | out: | 216 | out: |
213 | *out_bh = first_bh; | 217 | *out_bh = first_bh; |
214 | return 0; | 218 | return 0; |
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 1ec8ae5995a5..dbcf1dc93a51 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
@@ -283,9 +283,9 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) | |||
283 | goto out; | 283 | goto out; |
284 | 284 | ||
285 | if (!inode->i_nlink) { | 285 | if (!inode->i_nlink) { |
286 | nilfs_warning(inode->i_sb, __func__, | 286 | nilfs_msg(inode->i_sb, KERN_WARNING, |
287 | "deleting nonexistent file (%lu), %d", | 287 | "deleting nonexistent file (ino=%lu), %d", |
288 | inode->i_ino, inode->i_nlink); | 288 | inode->i_ino, inode->i_nlink); |
289 | set_nlink(inode, 1); | 289 | set_nlink(inode, 1); |
290 | } | 290 | } |
291 | err = nilfs_delete_entry(de, page); | 291 | err = nilfs_delete_entry(de, page); |
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index b1d48bc0532d..33f8c8fc96e8 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h | |||
@@ -23,7 +23,8 @@ | |||
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/blkdev.h> | 25 | #include <linux/blkdev.h> |
26 | #include <linux/nilfs2_fs.h> | 26 | #include <linux/nilfs2_api.h> |
27 | #include <linux/nilfs2_ondisk.h> | ||
27 | #include "the_nilfs.h" | 28 | #include "the_nilfs.h" |
28 | #include "bmap.h" | 29 | #include "bmap.h" |
29 | 30 | ||
@@ -119,20 +120,19 @@ enum { | |||
119 | /* | 120 | /* |
120 | * Macros to check inode numbers | 121 | * Macros to check inode numbers |
121 | */ | 122 | */ |
122 | #define NILFS_MDT_INO_BITS \ | 123 | #define NILFS_MDT_INO_BITS \ |
123 | ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \ | 124 | (BIT(NILFS_DAT_INO) | BIT(NILFS_CPFILE_INO) | \ |
124 | 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \ | 125 | BIT(NILFS_SUFILE_INO) | BIT(NILFS_IFILE_INO) | \ |
125 | 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO)) | 126 | BIT(NILFS_ATIME_INO) | BIT(NILFS_SKETCH_INO)) |
126 | 127 | ||
127 | #define NILFS_SYS_INO_BITS \ | 128 | #define NILFS_SYS_INO_BITS (BIT(NILFS_ROOT_INO) | NILFS_MDT_INO_BITS) |
128 | ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS) | ||
129 | 129 | ||
130 | #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino) | 130 | #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino) |
131 | 131 | ||
132 | #define NILFS_MDT_INODE(sb, ino) \ | 132 | #define NILFS_MDT_INODE(sb, ino) \ |
133 | ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino)))) | 133 | ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino))) |
134 | #define NILFS_VALID_INODE(sb, ino) \ | 134 | #define NILFS_VALID_INODE(sb, ino) \ |
135 | ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino)))) | 135 | ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino))) |
136 | 136 | ||
137 | /** | 137 | /** |
138 | * struct nilfs_transaction_info: context information for synchronization | 138 | * struct nilfs_transaction_info: context information for synchronization |
@@ -299,10 +299,36 @@ static inline int nilfs_mark_inode_dirty_sync(struct inode *inode) | |||
299 | /* super.c */ | 299 | /* super.c */ |
300 | extern struct inode *nilfs_alloc_inode(struct super_block *); | 300 | extern struct inode *nilfs_alloc_inode(struct super_block *); |
301 | extern void nilfs_destroy_inode(struct inode *); | 301 | extern void nilfs_destroy_inode(struct inode *); |
302 | |||
302 | extern __printf(3, 4) | 303 | extern __printf(3, 4) |
303 | void nilfs_error(struct super_block *, const char *, const char *, ...); | 304 | void __nilfs_msg(struct super_block *sb, const char *level, |
305 | const char *fmt, ...); | ||
304 | extern __printf(3, 4) | 306 | extern __printf(3, 4) |
305 | void nilfs_warning(struct super_block *, const char *, const char *, ...); | 307 | void __nilfs_error(struct super_block *sb, const char *function, |
308 | const char *fmt, ...); | ||
309 | |||
310 | #ifdef CONFIG_PRINTK | ||
311 | |||
312 | #define nilfs_msg(sb, level, fmt, ...) \ | ||
313 | __nilfs_msg(sb, level, fmt, ##__VA_ARGS__) | ||
314 | #define nilfs_error(sb, fmt, ...) \ | ||
315 | __nilfs_error(sb, __func__, fmt, ##__VA_ARGS__) | ||
316 | |||
317 | #else | ||
318 | |||
319 | #define nilfs_msg(sb, level, fmt, ...) \ | ||
320 | do { \ | ||
321 | no_printk(fmt, ##__VA_ARGS__); \ | ||
322 | (void)(sb); \ | ||
323 | } while (0) | ||
324 | #define nilfs_error(sb, fmt, ...) \ | ||
325 | do { \ | ||
326 | no_printk(fmt, ##__VA_ARGS__); \ | ||
327 | __nilfs_error(sb, "", " "); \ | ||
328 | } while (0) | ||
329 | |||
330 | #endif /* CONFIG_PRINTK */ | ||
331 | |||
306 | extern struct nilfs_super_block * | 332 | extern struct nilfs_super_block * |
307 | nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); | 333 | nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); |
308 | extern int nilfs_store_magic_and_option(struct super_block *, | 334 | extern int nilfs_store_magic_and_option(struct super_block *, |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index d97ba5f11b77..f11a3ad2df0c 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -30,9 +30,9 @@ | |||
30 | #include "mdt.h" | 30 | #include "mdt.h" |
31 | 31 | ||
32 | 32 | ||
33 | #define NILFS_BUFFER_INHERENT_BITS \ | 33 | #define NILFS_BUFFER_INHERENT_BITS \ |
34 | ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ | 34 | (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \ |
35 | (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked)) | 35 | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked)) |
36 | 36 | ||
37 | static struct buffer_head * | 37 | static struct buffer_head * |
38 | __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, | 38 | __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, |
@@ -85,9 +85,9 @@ void nilfs_forget_buffer(struct buffer_head *bh) | |||
85 | { | 85 | { |
86 | struct page *page = bh->b_page; | 86 | struct page *page = bh->b_page; |
87 | const unsigned long clear_bits = | 87 | const unsigned long clear_bits = |
88 | (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | | 88 | (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | |
89 | 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | | 89 | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | |
90 | 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); | 90 | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); |
91 | 91 | ||
92 | lock_buffer(bh); | 92 | lock_buffer(bh); |
93 | set_mask_bits(&bh->b_state, clear_bits, 0); | 93 | set_mask_bits(&bh->b_state, clear_bits, 0); |
@@ -124,17 +124,17 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) | |||
124 | dbh->b_bdev = sbh->b_bdev; | 124 | dbh->b_bdev = sbh->b_bdev; |
125 | 125 | ||
126 | bh = dbh; | 126 | bh = dbh; |
127 | bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped)); | 127 | bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped)); |
128 | while ((bh = bh->b_this_page) != dbh) { | 128 | while ((bh = bh->b_this_page) != dbh) { |
129 | lock_buffer(bh); | 129 | lock_buffer(bh); |
130 | bits &= bh->b_state; | 130 | bits &= bh->b_state; |
131 | unlock_buffer(bh); | 131 | unlock_buffer(bh); |
132 | } | 132 | } |
133 | if (bits & (1UL << BH_Uptodate)) | 133 | if (bits & BIT(BH_Uptodate)) |
134 | SetPageUptodate(dpage); | 134 | SetPageUptodate(dpage); |
135 | else | 135 | else |
136 | ClearPageUptodate(dpage); | 136 | ClearPageUptodate(dpage); |
137 | if (bits & (1UL << BH_Mapped)) | 137 | if (bits & BIT(BH_Mapped)) |
138 | SetPageMappedToDisk(dpage); | 138 | SetPageMappedToDisk(dpage); |
139 | else | 139 | else |
140 | ClearPageMappedToDisk(dpage); | 140 | ClearPageMappedToDisk(dpage); |
@@ -215,7 +215,7 @@ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) | |||
215 | create_empty_buffers(dst, sbh->b_size, 0); | 215 | create_empty_buffers(dst, sbh->b_size, 0); |
216 | 216 | ||
217 | if (copy_dirty) | 217 | if (copy_dirty) |
218 | mask |= (1UL << BH_Dirty); | 218 | mask |= BIT(BH_Dirty); |
219 | 219 | ||
220 | dbh = dbufs = page_buffers(dst); | 220 | dbh = dbufs = page_buffers(dst); |
221 | do { | 221 | do { |
@@ -403,11 +403,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) | |||
403 | 403 | ||
404 | BUG_ON(!PageLocked(page)); | 404 | BUG_ON(!PageLocked(page)); |
405 | 405 | ||
406 | if (!silent) { | 406 | if (!silent) |
407 | nilfs_warning(sb, __func__, | 407 | nilfs_msg(sb, KERN_WARNING, |
408 | "discard page: offset %lld, ino %lu", | 408 | "discard dirty page: offset=%lld, ino=%lu", |
409 | page_offset(page), inode->i_ino); | 409 | page_offset(page), inode->i_ino); |
410 | } | ||
411 | 410 | ||
412 | ClearPageUptodate(page); | 411 | ClearPageUptodate(page); |
413 | ClearPageMappedToDisk(page); | 412 | ClearPageMappedToDisk(page); |
@@ -415,18 +414,18 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) | |||
415 | if (page_has_buffers(page)) { | 414 | if (page_has_buffers(page)) { |
416 | struct buffer_head *bh, *head; | 415 | struct buffer_head *bh, *head; |
417 | const unsigned long clear_bits = | 416 | const unsigned long clear_bits = |
418 | (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | | 417 | (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | |
419 | 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | | 418 | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | |
420 | 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); | 419 | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); |
421 | 420 | ||
422 | bh = head = page_buffers(page); | 421 | bh = head = page_buffers(page); |
423 | do { | 422 | do { |
424 | lock_buffer(bh); | 423 | lock_buffer(bh); |
425 | if (!silent) { | 424 | if (!silent) |
426 | nilfs_warning(sb, __func__, | 425 | nilfs_msg(sb, KERN_WARNING, |
427 | "discard block %llu, size %zu", | 426 | "discard dirty block: blocknr=%llu, size=%zu", |
428 | (u64)bh->b_blocknr, bh->b_size); | 427 | (u64)bh->b_blocknr, bh->b_size); |
429 | } | 428 | |
430 | set_mask_bits(&bh->b_state, clear_bits, 0); | 429 | set_mask_bits(&bh->b_state, clear_bits, 0); |
431 | unlock_buffer(bh); | 430 | unlock_buffer(bh); |
432 | } while (bh = bh->b_this_page, bh != head); | 431 | } while (bh = bh->b_this_page, bh != head); |
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index d893dc912b62..5139efed1888 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c | |||
@@ -54,38 +54,37 @@ struct nilfs_recovery_block { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | 56 | ||
57 | static int nilfs_warn_segment_error(int err) | 57 | static int nilfs_warn_segment_error(struct super_block *sb, int err) |
58 | { | 58 | { |
59 | const char *msg = NULL; | ||
60 | |||
59 | switch (err) { | 61 | switch (err) { |
60 | case NILFS_SEG_FAIL_IO: | 62 | case NILFS_SEG_FAIL_IO: |
61 | printk(KERN_WARNING | 63 | nilfs_msg(sb, KERN_ERR, "I/O error reading segment"); |
62 | "NILFS warning: I/O error on loading last segment\n"); | ||
63 | return -EIO; | 64 | return -EIO; |
64 | case NILFS_SEG_FAIL_MAGIC: | 65 | case NILFS_SEG_FAIL_MAGIC: |
65 | printk(KERN_WARNING | 66 | msg = "Magic number mismatch"; |
66 | "NILFS warning: Segment magic number invalid\n"); | ||
67 | break; | 67 | break; |
68 | case NILFS_SEG_FAIL_SEQ: | 68 | case NILFS_SEG_FAIL_SEQ: |
69 | printk(KERN_WARNING | 69 | msg = "Sequence number mismatch"; |
70 | "NILFS warning: Sequence number mismatch\n"); | ||
71 | break; | 70 | break; |
72 | case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: | 71 | case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: |
73 | printk(KERN_WARNING | 72 | msg = "Checksum error in super root"; |
74 | "NILFS warning: Checksum error in super root\n"); | ||
75 | break; | 73 | break; |
76 | case NILFS_SEG_FAIL_CHECKSUM_FULL: | 74 | case NILFS_SEG_FAIL_CHECKSUM_FULL: |
77 | printk(KERN_WARNING | 75 | msg = "Checksum error in segment payload"; |
78 | "NILFS warning: Checksum error in segment payload\n"); | ||
79 | break; | 76 | break; |
80 | case NILFS_SEG_FAIL_CONSISTENCY: | 77 | case NILFS_SEG_FAIL_CONSISTENCY: |
81 | printk(KERN_WARNING | 78 | msg = "Inconsistency found"; |
82 | "NILFS warning: Inconsistent segment\n"); | ||
83 | break; | 79 | break; |
84 | case NILFS_SEG_NO_SUPER_ROOT: | 80 | case NILFS_SEG_NO_SUPER_ROOT: |
85 | printk(KERN_WARNING | 81 | msg = "No super root in the last segment"; |
86 | "NILFS warning: No super root in the last segment\n"); | ||
87 | break; | 82 | break; |
83 | default: | ||
84 | nilfs_msg(sb, KERN_ERR, "unrecognized segment error %d", err); | ||
85 | return -EINVAL; | ||
88 | } | 86 | } |
87 | nilfs_msg(sb, KERN_WARNING, "invalid segment: %s", msg); | ||
89 | return -EINVAL; | 88 | return -EINVAL; |
90 | } | 89 | } |
91 | 90 | ||
@@ -178,7 +177,7 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, | |||
178 | brelse(bh_sr); | 177 | brelse(bh_sr); |
179 | 178 | ||
180 | failed: | 179 | failed: |
181 | return nilfs_warn_segment_error(ret); | 180 | return nilfs_warn_segment_error(nilfs->ns_sb, ret); |
182 | } | 181 | } |
183 | 182 | ||
184 | /** | 183 | /** |
@@ -553,11 +552,10 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, | |||
553 | put_page(page); | 552 | put_page(page); |
554 | 553 | ||
555 | failed_inode: | 554 | failed_inode: |
556 | printk(KERN_WARNING | 555 | nilfs_msg(sb, KERN_WARNING, |
557 | "NILFS warning: error recovering data block " | 556 | "error %d recovering data block (ino=%lu, block-offset=%llu)", |
558 | "(err=%d, ino=%lu, block-offset=%llu)\n", | 557 | err, (unsigned long)rb->ino, |
559 | err, (unsigned long)rb->ino, | 558 | (unsigned long long)rb->blkoff); |
560 | (unsigned long long)rb->blkoff); | ||
561 | if (!err2) | 559 | if (!err2) |
562 | err2 = err; | 560 | err2 = err; |
563 | next: | 561 | next: |
@@ -680,8 +678,8 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, | |||
680 | } | 678 | } |
681 | 679 | ||
682 | if (nsalvaged_blocks) { | 680 | if (nsalvaged_blocks) { |
683 | printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n", | 681 | nilfs_msg(sb, KERN_INFO, "salvaged %lu blocks", |
684 | sb->s_id, nsalvaged_blocks); | 682 | nsalvaged_blocks); |
685 | ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; | 683 | ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; |
686 | } | 684 | } |
687 | out: | 685 | out: |
@@ -692,10 +690,9 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, | |||
692 | confused: | 690 | confused: |
693 | err = -EINVAL; | 691 | err = -EINVAL; |
694 | failed: | 692 | failed: |
695 | printk(KERN_ERR | 693 | nilfs_msg(sb, KERN_ERR, |
696 | "NILFS (device %s): Error roll-forwarding " | 694 | "error %d roll-forwarding partial segment at blocknr = %llu", |
697 | "(err=%d, pseg block=%llu). ", | 695 | err, (unsigned long long)pseg_start); |
698 | sb->s_id, err, (unsigned long long)pseg_start); | ||
699 | goto out; | 696 | goto out; |
700 | } | 697 | } |
701 | 698 | ||
@@ -715,9 +712,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, | |||
715 | set_buffer_dirty(bh); | 712 | set_buffer_dirty(bh); |
716 | err = sync_dirty_buffer(bh); | 713 | err = sync_dirty_buffer(bh); |
717 | if (unlikely(err)) | 714 | if (unlikely(err)) |
718 | printk(KERN_WARNING | 715 | nilfs_msg(nilfs->ns_sb, KERN_WARNING, |
719 | "NILFS warning: buffer sync write failed during " | 716 | "buffer sync write failed during post-cleaning of recovery."); |
720 | "post-cleaning of recovery.\n"); | ||
721 | brelse(bh); | 717 | brelse(bh); |
722 | } | 718 | } |
723 | 719 | ||
@@ -752,8 +748,8 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, | |||
752 | 748 | ||
753 | err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); | 749 | err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); |
754 | if (unlikely(err)) { | 750 | if (unlikely(err)) { |
755 | printk(KERN_ERR | 751 | nilfs_msg(sb, KERN_ERR, |
756 | "NILFS: error loading the latest checkpoint.\n"); | 752 | "error %d loading the latest checkpoint", err); |
757 | return err; | 753 | return err; |
758 | } | 754 | } |
759 | 755 | ||
@@ -764,8 +760,9 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, | |||
764 | if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { | 760 | if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { |
765 | err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); | 761 | err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); |
766 | if (unlikely(err)) { | 762 | if (unlikely(err)) { |
767 | printk(KERN_ERR "NILFS: Error preparing segments for " | 763 | nilfs_msg(sb, KERN_ERR, |
768 | "recovery.\n"); | 764 | "error %d preparing segment for recovery", |
765 | err); | ||
769 | goto failed; | 766 | goto failed; |
770 | } | 767 | } |
771 | 768 | ||
@@ -778,8 +775,9 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, | |||
778 | nilfs_detach_log_writer(sb); | 775 | nilfs_detach_log_writer(sb); |
779 | 776 | ||
780 | if (unlikely(err)) { | 777 | if (unlikely(err)) { |
781 | printk(KERN_ERR "NILFS: Oops! recovery failed. " | 778 | nilfs_msg(sb, KERN_ERR, |
782 | "(err=%d)\n", err); | 779 | "error %d writing segment for recovery", |
780 | err); | ||
783 | goto failed; | 781 | goto failed; |
784 | } | 782 | } |
785 | 783 | ||
@@ -961,5 +959,5 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, | |||
961 | failed: | 959 | failed: |
962 | brelse(bh_sum); | 960 | brelse(bh_sum); |
963 | nilfs_dispose_segment_list(&segments); | 961 | nilfs_dispose_segment_list(&segments); |
964 | return (ret < 0) ? ret : nilfs_warn_segment_error(ret); | 962 | return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret); |
965 | } | 963 | } |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index a962d7d83447..6f87b2ac1aeb 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -514,7 +514,11 @@ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) | |||
514 | } while (--segbuf->sb_nbio > 0); | 514 | } while (--segbuf->sb_nbio > 0); |
515 | 515 | ||
516 | if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { | 516 | if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { |
517 | printk(KERN_ERR "NILFS: IO error writing segment\n"); | 517 | nilfs_msg(segbuf->sb_super, KERN_ERR, |
518 | "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu", | ||
519 | (unsigned long long)segbuf->sb_pseg_start, | ||
520 | segbuf->sb_sum.nblocks, | ||
521 | (unsigned long long)segbuf->sb_segnum); | ||
518 | err = -EIO; | 522 | err = -EIO; |
519 | } | 523 | } |
520 | return err; | 524 | return err; |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index e78b68a81aec..bedcae2c28e6 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -150,7 +150,8 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int); | |||
150 | #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a) | 150 | #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a) |
151 | #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a) | 151 | #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a) |
152 | 152 | ||
153 | static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti) | 153 | static int nilfs_prepare_segment_lock(struct super_block *sb, |
154 | struct nilfs_transaction_info *ti) | ||
154 | { | 155 | { |
155 | struct nilfs_transaction_info *cur_ti = current->journal_info; | 156 | struct nilfs_transaction_info *cur_ti = current->journal_info; |
156 | void *save = NULL; | 157 | void *save = NULL; |
@@ -164,8 +165,7 @@ static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti) | |||
164 | * it is saved and will be restored on | 165 | * it is saved and will be restored on |
165 | * nilfs_transaction_commit(). | 166 | * nilfs_transaction_commit(). |
166 | */ | 167 | */ |
167 | printk(KERN_WARNING | 168 | nilfs_msg(sb, KERN_WARNING, "journal info from a different FS"); |
168 | "NILFS warning: journal info from a different FS\n"); | ||
169 | save = current->journal_info; | 169 | save = current->journal_info; |
170 | } | 170 | } |
171 | if (!ti) { | 171 | if (!ti) { |
@@ -215,7 +215,7 @@ int nilfs_transaction_begin(struct super_block *sb, | |||
215 | int vacancy_check) | 215 | int vacancy_check) |
216 | { | 216 | { |
217 | struct the_nilfs *nilfs; | 217 | struct the_nilfs *nilfs; |
218 | int ret = nilfs_prepare_segment_lock(ti); | 218 | int ret = nilfs_prepare_segment_lock(sb, ti); |
219 | struct nilfs_transaction_info *trace_ti; | 219 | struct nilfs_transaction_info *trace_ti; |
220 | 220 | ||
221 | if (unlikely(ret < 0)) | 221 | if (unlikely(ret < 0)) |
@@ -373,7 +373,7 @@ static void nilfs_transaction_lock(struct super_block *sb, | |||
373 | nilfs_segctor_do_immediate_flush(sci); | 373 | nilfs_segctor_do_immediate_flush(sci); |
374 | 374 | ||
375 | up_write(&nilfs->ns_segctor_sem); | 375 | up_write(&nilfs->ns_segctor_sem); |
376 | yield(); | 376 | cond_resched(); |
377 | } | 377 | } |
378 | if (gcflag) | 378 | if (gcflag) |
379 | ti->ti_flags |= NILFS_TI_GC; | 379 | ti->ti_flags |= NILFS_TI_GC; |
@@ -1858,11 +1858,11 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) | |||
1858 | */ | 1858 | */ |
1859 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, | 1859 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, |
1860 | b_assoc_buffers) { | 1860 | b_assoc_buffers) { |
1861 | const unsigned long set_bits = (1 << BH_Uptodate); | 1861 | const unsigned long set_bits = BIT(BH_Uptodate); |
1862 | const unsigned long clear_bits = | 1862 | const unsigned long clear_bits = |
1863 | (1 << BH_Dirty | 1 << BH_Async_Write | | 1863 | (BIT(BH_Dirty) | BIT(BH_Async_Write) | |
1864 | 1 << BH_Delay | 1 << BH_NILFS_Volatile | | 1864 | BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | |
1865 | 1 << BH_NILFS_Redirected); | 1865 | BIT(BH_NILFS_Redirected)); |
1866 | 1866 | ||
1867 | set_mask_bits(&bh->b_state, clear_bits, set_bits); | 1867 | set_mask_bits(&bh->b_state, clear_bits, set_bits); |
1868 | if (bh == segbuf->sb_super_root) { | 1868 | if (bh == segbuf->sb_super_root) { |
@@ -1951,8 +1951,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, | |||
1951 | err = nilfs_ifile_get_inode_block( | 1951 | err = nilfs_ifile_get_inode_block( |
1952 | ifile, ii->vfs_inode.i_ino, &ibh); | 1952 | ifile, ii->vfs_inode.i_ino, &ibh); |
1953 | if (unlikely(err)) { | 1953 | if (unlikely(err)) { |
1954 | nilfs_warning(sci->sc_super, __func__, | 1954 | nilfs_msg(sci->sc_super, KERN_WARNING, |
1955 | "failed to get inode block."); | 1955 | "log writer: error %d getting inode block (ino=%lu)", |
1956 | err, ii->vfs_inode.i_ino); | ||
1956 | return err; | 1957 | return err; |
1957 | } | 1958 | } |
1958 | mark_buffer_dirty(ibh); | 1959 | mark_buffer_dirty(ibh); |
@@ -2131,10 +2132,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) | |||
2131 | static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) | 2132 | static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) |
2132 | { | 2133 | { |
2133 | spin_lock(&sci->sc_state_lock); | 2134 | spin_lock(&sci->sc_state_lock); |
2134 | if (!(sci->sc_flush_request & (1 << bn))) { | 2135 | if (!(sci->sc_flush_request & BIT(bn))) { |
2135 | unsigned long prev_req = sci->sc_flush_request; | 2136 | unsigned long prev_req = sci->sc_flush_request; |
2136 | 2137 | ||
2137 | sci->sc_flush_request |= (1 << bn); | 2138 | sci->sc_flush_request |= BIT(bn); |
2138 | if (!prev_req) | 2139 | if (!prev_req) |
2139 | wake_up(&sci->sc_wait_daemon); | 2140 | wake_up(&sci->sc_wait_daemon); |
2140 | } | 2141 | } |
@@ -2318,7 +2319,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, | |||
2318 | } | 2319 | } |
2319 | 2320 | ||
2320 | #define FLUSH_FILE_BIT (0x1) /* data file only */ | 2321 | #define FLUSH_FILE_BIT (0x1) /* data file only */ |
2321 | #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ | 2322 | #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */ |
2322 | 2323 | ||
2323 | /** | 2324 | /** |
2324 | * nilfs_segctor_accept - record accepted sequence count of log-write requests | 2325 | * nilfs_segctor_accept - record accepted sequence count of log-write requests |
@@ -2458,8 +2459,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, | |||
2458 | if (likely(!err)) | 2459 | if (likely(!err)) |
2459 | break; | 2460 | break; |
2460 | 2461 | ||
2461 | nilfs_warning(sb, __func__, | 2462 | nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err); |
2462 | "segment construction failed. (err=%d)", err); | ||
2463 | set_current_state(TASK_INTERRUPTIBLE); | 2463 | set_current_state(TASK_INTERRUPTIBLE); |
2464 | schedule_timeout(sci->sc_interval); | 2464 | schedule_timeout(sci->sc_interval); |
2465 | } | 2465 | } |
@@ -2467,9 +2467,9 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, | |||
2467 | int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, | 2467 | int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, |
2468 | sci->sc_nfreesegs); | 2468 | sci->sc_nfreesegs); |
2469 | if (ret) { | 2469 | if (ret) { |
2470 | printk(KERN_WARNING | 2470 | nilfs_msg(sb, KERN_WARNING, |
2471 | "NILFS warning: error %d on discard request, " | 2471 | "error %d on discard request, turning discards off for the device", |
2472 | "turning discards off for the device\n", ret); | 2472 | ret); |
2473 | nilfs_clear_opt(nilfs, DISCARD); | 2473 | nilfs_clear_opt(nilfs, DISCARD); |
2474 | } | 2474 | } |
2475 | } | 2475 | } |
@@ -2551,10 +2551,9 @@ static int nilfs_segctor_thread(void *arg) | |||
2551 | /* start sync. */ | 2551 | /* start sync. */ |
2552 | sci->sc_task = current; | 2552 | sci->sc_task = current; |
2553 | wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ | 2553 | wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ |
2554 | printk(KERN_INFO | 2554 | nilfs_msg(sci->sc_super, KERN_INFO, |
2555 | "segctord starting. Construction interval = %lu seconds, " | 2555 | "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds", |
2556 | "CP frequency < %lu seconds\n", | 2556 | sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); |
2557 | sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); | ||
2558 | 2557 | ||
2559 | spin_lock(&sci->sc_state_lock); | 2558 | spin_lock(&sci->sc_state_lock); |
2560 | loop: | 2559 | loop: |
@@ -2628,8 +2627,8 @@ static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) | |||
2628 | if (IS_ERR(t)) { | 2627 | if (IS_ERR(t)) { |
2629 | int err = PTR_ERR(t); | 2628 | int err = PTR_ERR(t); |
2630 | 2629 | ||
2631 | printk(KERN_ERR "NILFS: error %d creating segctord thread\n", | 2630 | nilfs_msg(sci->sc_super, KERN_ERR, |
2632 | err); | 2631 | "error %d creating segctord thread", err); |
2633 | return err; | 2632 | return err; |
2634 | } | 2633 | } |
2635 | wait_event(sci->sc_wait_task, sci->sc_task != NULL); | 2634 | wait_event(sci->sc_wait_task, sci->sc_task != NULL); |
@@ -2739,14 +2738,14 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) | |||
2739 | nilfs_segctor_write_out(sci); | 2738 | nilfs_segctor_write_out(sci); |
2740 | 2739 | ||
2741 | if (!list_empty(&sci->sc_dirty_files)) { | 2740 | if (!list_empty(&sci->sc_dirty_files)) { |
2742 | nilfs_warning(sci->sc_super, __func__, | 2741 | nilfs_msg(sci->sc_super, KERN_WARNING, |
2743 | "dirty file(s) after the final construction"); | 2742 | "disposed unprocessed dirty file(s) when stopping log writer"); |
2744 | nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); | 2743 | nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); |
2745 | } | 2744 | } |
2746 | 2745 | ||
2747 | if (!list_empty(&sci->sc_iput_queue)) { | 2746 | if (!list_empty(&sci->sc_iput_queue)) { |
2748 | nilfs_warning(sci->sc_super, __func__, | 2747 | nilfs_msg(sci->sc_super, KERN_WARNING, |
2749 | "iput queue is not empty"); | 2748 | "disposed unprocessed inode(s) in iput queue when stopping log writer"); |
2750 | nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); | 2749 | nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); |
2751 | } | 2750 | } |
2752 | 2751 | ||
@@ -2822,8 +2821,8 @@ void nilfs_detach_log_writer(struct super_block *sb) | |||
2822 | spin_lock(&nilfs->ns_inode_lock); | 2821 | spin_lock(&nilfs->ns_inode_lock); |
2823 | if (!list_empty(&nilfs->ns_dirty_files)) { | 2822 | if (!list_empty(&nilfs->ns_dirty_files)) { |
2824 | list_splice_init(&nilfs->ns_dirty_files, &garbage_list); | 2823 | list_splice_init(&nilfs->ns_dirty_files, &garbage_list); |
2825 | nilfs_warning(sb, __func__, | 2824 | nilfs_msg(sb, KERN_WARNING, |
2826 | "Hit dirty file after stopped log writer"); | 2825 | "disposed unprocessed dirty file(s) when detaching log writer"); |
2827 | } | 2826 | } |
2828 | spin_unlock(&nilfs->ns_inode_lock); | 2827 | spin_unlock(&nilfs->ns_inode_lock); |
2829 | up_write(&nilfs->ns_segctor_sem); | 2828 | up_write(&nilfs->ns_segctor_sem); |
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 6565c10b7b76..1060949d7dd2 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
24 | #include <linux/buffer_head.h> | 24 | #include <linux/buffer_head.h> |
25 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
26 | #include <linux/nilfs2_fs.h> | ||
27 | #include "nilfs.h" | 26 | #include "nilfs.h" |
28 | 27 | ||
29 | struct nilfs_root; | 28 | struct nilfs_root; |
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 1963595a1580..1541a1e9221a 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/nilfs2_fs.h> | ||
26 | #include "mdt.h" | 25 | #include "mdt.h" |
27 | #include "sufile.h" | 26 | #include "sufile.h" |
28 | 27 | ||
@@ -181,9 +180,9 @@ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, | |||
181 | down_write(&NILFS_MDT(sufile)->mi_sem); | 180 | down_write(&NILFS_MDT(sufile)->mi_sem); |
182 | for (seg = segnumv; seg < segnumv + nsegs; seg++) { | 181 | for (seg = segnumv; seg < segnumv + nsegs; seg++) { |
183 | if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { | 182 | if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { |
184 | printk(KERN_WARNING | 183 | nilfs_msg(sufile->i_sb, KERN_WARNING, |
185 | "%s: invalid segment number: %llu\n", __func__, | 184 | "%s: invalid segment number: %llu", |
186 | (unsigned long long)*seg); | 185 | __func__, (unsigned long long)*seg); |
187 | nerr++; | 186 | nerr++; |
188 | } | 187 | } |
189 | } | 188 | } |
@@ -240,8 +239,9 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, | |||
240 | int ret; | 239 | int ret; |
241 | 240 | ||
242 | if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { | 241 | if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { |
243 | printk(KERN_WARNING "%s: invalid segment number: %llu\n", | 242 | nilfs_msg(sufile->i_sb, KERN_WARNING, |
244 | __func__, (unsigned long long)segnum); | 243 | "%s: invalid segment number: %llu", |
244 | __func__, (unsigned long long)segnum); | ||
245 | return -EINVAL; | 245 | return -EINVAL; |
246 | } | 246 | } |
247 | down_write(&NILFS_MDT(sufile)->mi_sem); | 247 | down_write(&NILFS_MDT(sufile)->mi_sem); |
@@ -419,8 +419,9 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, | |||
419 | kaddr = kmap_atomic(su_bh->b_page); | 419 | kaddr = kmap_atomic(su_bh->b_page); |
420 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 420 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
421 | if (unlikely(!nilfs_segment_usage_clean(su))) { | 421 | if (unlikely(!nilfs_segment_usage_clean(su))) { |
422 | printk(KERN_WARNING "%s: segment %llu must be clean\n", | 422 | nilfs_msg(sufile->i_sb, KERN_WARNING, |
423 | __func__, (unsigned long long)segnum); | 423 | "%s: segment %llu must be clean", __func__, |
424 | (unsigned long long)segnum); | ||
424 | kunmap_atomic(kaddr); | 425 | kunmap_atomic(kaddr); |
425 | return; | 426 | return; |
426 | } | 427 | } |
@@ -444,7 +445,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
444 | 445 | ||
445 | kaddr = kmap_atomic(su_bh->b_page); | 446 | kaddr = kmap_atomic(su_bh->b_page); |
446 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 447 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
447 | if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && | 448 | if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) && |
448 | su->su_nblocks == cpu_to_le32(0)) { | 449 | su->su_nblocks == cpu_to_le32(0)) { |
449 | kunmap_atomic(kaddr); | 450 | kunmap_atomic(kaddr); |
450 | return; | 451 | return; |
@@ -455,7 +456,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, | |||
455 | /* make the segment garbage */ | 456 | /* make the segment garbage */ |
456 | su->su_lastmod = cpu_to_le64(0); | 457 | su->su_lastmod = cpu_to_le64(0); |
457 | su->su_nblocks = cpu_to_le32(0); | 458 | su->su_nblocks = cpu_to_le32(0); |
458 | su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); | 459 | su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)); |
459 | kunmap_atomic(kaddr); | 460 | kunmap_atomic(kaddr); |
460 | 461 | ||
461 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); | 462 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); |
@@ -476,8 +477,9 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, | |||
476 | kaddr = kmap_atomic(su_bh->b_page); | 477 | kaddr = kmap_atomic(su_bh->b_page); |
477 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | 478 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
478 | if (nilfs_segment_usage_clean(su)) { | 479 | if (nilfs_segment_usage_clean(su)) { |
479 | printk(KERN_WARNING "%s: segment %llu is already clean\n", | 480 | nilfs_msg(sufile->i_sb, KERN_WARNING, |
480 | __func__, (unsigned long long)segnum); | 481 | "%s: segment %llu is already clean", |
482 | __func__, (unsigned long long)segnum); | ||
481 | kunmap_atomic(kaddr); | 483 | kunmap_atomic(kaddr); |
482 | return; | 484 | return; |
483 | } | 485 | } |
@@ -692,7 +694,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, | |||
692 | su2 = su; | 694 | su2 = su; |
693 | for (j = 0; j < n; j++, su = (void *)su + susz) { | 695 | for (j = 0; j < n; j++, su = (void *)su + susz) { |
694 | if ((le32_to_cpu(su->su_flags) & | 696 | if ((le32_to_cpu(su->su_flags) & |
695 | ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || | 697 | ~BIT(NILFS_SEGMENT_USAGE_ERROR)) || |
696 | nilfs_segment_is_active(nilfs, segnum + j)) { | 698 | nilfs_segment_is_active(nilfs, segnum + j)) { |
697 | ret = -EBUSY; | 699 | ret = -EBUSY; |
698 | kunmap_atomic(kaddr); | 700 | kunmap_atomic(kaddr); |
@@ -859,10 +861,10 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
859 | si->sui_lastmod = le64_to_cpu(su->su_lastmod); | 861 | si->sui_lastmod = le64_to_cpu(su->su_lastmod); |
860 | si->sui_nblocks = le32_to_cpu(su->su_nblocks); | 862 | si->sui_nblocks = le32_to_cpu(su->su_nblocks); |
861 | si->sui_flags = le32_to_cpu(su->su_flags) & | 863 | si->sui_flags = le32_to_cpu(su->su_flags) & |
862 | ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 864 | ~BIT(NILFS_SEGMENT_USAGE_ACTIVE); |
863 | if (nilfs_segment_is_active(nilfs, segnum + j)) | 865 | if (nilfs_segment_is_active(nilfs, segnum + j)) |
864 | si->sui_flags |= | 866 | si->sui_flags |= |
865 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 867 | BIT(NILFS_SEGMENT_USAGE_ACTIVE); |
866 | } | 868 | } |
867 | kunmap_atomic(kaddr); | 869 | kunmap_atomic(kaddr); |
868 | brelse(su_bh); | 870 | brelse(su_bh); |
@@ -950,7 +952,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, | |||
950 | * disk. | 952 | * disk. |
951 | */ | 953 | */ |
952 | sup->sup_sui.sui_flags &= | 954 | sup->sup_sui.sui_flags &= |
953 | ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); | 955 | ~BIT(NILFS_SEGMENT_USAGE_ACTIVE); |
954 | 956 | ||
955 | cleansi = nilfs_suinfo_clean(&sup->sup_sui); | 957 | cleansi = nilfs_suinfo_clean(&sup->sup_sui); |
956 | cleansu = nilfs_segment_usage_clean(su); | 958 | cleansu = nilfs_segment_usage_clean(su); |
@@ -1175,14 +1177,12 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, | |||
1175 | int err; | 1177 | int err; |
1176 | 1178 | ||
1177 | if (susize > sb->s_blocksize) { | 1179 | if (susize > sb->s_blocksize) { |
1178 | printk(KERN_ERR | 1180 | nilfs_msg(sb, KERN_ERR, |
1179 | "NILFS: too large segment usage size: %zu bytes.\n", | 1181 | "too large segment usage size: %zu bytes", susize); |
1180 | susize); | ||
1181 | return -EINVAL; | 1182 | return -EINVAL; |
1182 | } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { | 1183 | } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { |
1183 | printk(KERN_ERR | 1184 | nilfs_msg(sb, KERN_ERR, |
1184 | "NILFS: too small segment usage size: %zu bytes.\n", | 1185 | "too small segment usage size: %zu bytes", susize); |
1185 | susize); | ||
1186 | return -EINVAL; | 1186 | return -EINVAL; |
1187 | } | 1187 | } |
1188 | 1188 | ||
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 46e89872294c..158a9190c8ec 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/nilfs2_fs.h> | ||
25 | #include "mdt.h" | 24 | #include "mdt.h" |
26 | 25 | ||
27 | 26 | ||
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 666107a18a22..c95d369e90aa 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -71,6 +71,22 @@ struct kmem_cache *nilfs_btree_path_cache; | |||
71 | static int nilfs_setup_super(struct super_block *sb, int is_mount); | 71 | static int nilfs_setup_super(struct super_block *sb, int is_mount); |
72 | static int nilfs_remount(struct super_block *sb, int *flags, char *data); | 72 | static int nilfs_remount(struct super_block *sb, int *flags, char *data); |
73 | 73 | ||
74 | void __nilfs_msg(struct super_block *sb, const char *level, const char *fmt, | ||
75 | ...) | ||
76 | { | ||
77 | struct va_format vaf; | ||
78 | va_list args; | ||
79 | |||
80 | va_start(args, fmt); | ||
81 | vaf.fmt = fmt; | ||
82 | vaf.va = &args; | ||
83 | if (sb) | ||
84 | printk("%sNILFS (%s): %pV\n", level, sb->s_id, &vaf); | ||
85 | else | ||
86 | printk("%sNILFS: %pV\n", level, &vaf); | ||
87 | va_end(args); | ||
88 | } | ||
89 | |||
74 | static void nilfs_set_error(struct super_block *sb) | 90 | static void nilfs_set_error(struct super_block *sb) |
75 | { | 91 | { |
76 | struct the_nilfs *nilfs = sb->s_fs_info; | 92 | struct the_nilfs *nilfs = sb->s_fs_info; |
@@ -91,19 +107,20 @@ static void nilfs_set_error(struct super_block *sb) | |||
91 | } | 107 | } |
92 | 108 | ||
93 | /** | 109 | /** |
94 | * nilfs_error() - report failure condition on a filesystem | 110 | * __nilfs_error() - report failure condition on a filesystem |
111 | * | ||
112 | * __nilfs_error() sets an ERROR_FS flag on the superblock as well as | ||
113 | * reporting an error message. This function should be called when | ||
114 | * NILFS detects incoherences or defects of meta data on disk. | ||
95 | * | 115 | * |
96 | * nilfs_error() sets an ERROR_FS flag on the superblock as well as | 116 | * This implements the body of nilfs_error() macro. Normally, |
97 | * reporting an error message. It should be called when NILFS detects | 117 | * nilfs_error() should be used. As for sustainable errors such as a |
98 | * incoherences or defects of meta data on disk. As for sustainable | 118 | * single-shot I/O error, nilfs_msg() should be used instead. |
99 | * errors such as a single-shot I/O error, nilfs_warning() or the printk() | ||
100 | * function should be used instead. | ||
101 | * | 119 | * |
102 | * The segment constructor must not call this function because it can | 120 | * Callers should not add a trailing newline since this will do it. |
103 | * kill itself. | ||
104 | */ | 121 | */ |
105 | void nilfs_error(struct super_block *sb, const char *function, | 122 | void __nilfs_error(struct super_block *sb, const char *function, |
106 | const char *fmt, ...) | 123 | const char *fmt, ...) |
107 | { | 124 | { |
108 | struct the_nilfs *nilfs = sb->s_fs_info; | 125 | struct the_nilfs *nilfs = sb->s_fs_info; |
109 | struct va_format vaf; | 126 | struct va_format vaf; |
@@ -133,24 +150,6 @@ void nilfs_error(struct super_block *sb, const char *function, | |||
133 | sb->s_id); | 150 | sb->s_id); |
134 | } | 151 | } |
135 | 152 | ||
136 | void nilfs_warning(struct super_block *sb, const char *function, | ||
137 | const char *fmt, ...) | ||
138 | { | ||
139 | struct va_format vaf; | ||
140 | va_list args; | ||
141 | |||
142 | va_start(args, fmt); | ||
143 | |||
144 | vaf.fmt = fmt; | ||
145 | vaf.va = &args; | ||
146 | |||
147 | printk(KERN_WARNING "NILFS warning (device %s): %s: %pV\n", | ||
148 | sb->s_id, function, &vaf); | ||
149 | |||
150 | va_end(args); | ||
151 | } | ||
152 | |||
153 | |||
154 | struct inode *nilfs_alloc_inode(struct super_block *sb) | 153 | struct inode *nilfs_alloc_inode(struct super_block *sb) |
155 | { | 154 | { |
156 | struct nilfs_inode_info *ii; | 155 | struct nilfs_inode_info *ii; |
@@ -196,8 +195,8 @@ static int nilfs_sync_super(struct super_block *sb, int flag) | |||
196 | } | 195 | } |
197 | 196 | ||
198 | if (unlikely(err)) { | 197 | if (unlikely(err)) { |
199 | printk(KERN_ERR | 198 | nilfs_msg(sb, KERN_ERR, "unable to write superblock: err=%d", |
200 | "NILFS: unable to write superblock (err=%d)\n", err); | 199 | err); |
201 | if (err == -EIO && nilfs->ns_sbh[1]) { | 200 | if (err == -EIO && nilfs->ns_sbh[1]) { |
202 | /* | 201 | /* |
203 | * sbp[0] points to newer log than sbp[1], | 202 | * sbp[0] points to newer log than sbp[1], |
@@ -267,8 +266,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, | |||
267 | sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { | 266 | sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { |
268 | memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); | 267 | memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); |
269 | } else { | 268 | } else { |
270 | printk(KERN_CRIT "NILFS: superblock broke on dev %s\n", | 269 | nilfs_msg(sb, KERN_CRIT, "superblock broke"); |
271 | sb->s_id); | ||
272 | return NULL; | 270 | return NULL; |
273 | } | 271 | } |
274 | } else if (sbp[1] && | 272 | } else if (sbp[1] && |
@@ -378,9 +376,9 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) | |||
378 | offset = sb2off & (nilfs->ns_blocksize - 1); | 376 | offset = sb2off & (nilfs->ns_blocksize - 1); |
379 | nsbh = sb_getblk(sb, newblocknr); | 377 | nsbh = sb_getblk(sb, newblocknr); |
380 | if (!nsbh) { | 378 | if (!nsbh) { |
381 | printk(KERN_WARNING | 379 | nilfs_msg(sb, KERN_WARNING, |
382 | "NILFS warning: unable to move secondary superblock " | 380 | "unable to move secondary superblock to block %llu", |
383 | "to block %llu\n", (unsigned long long)newblocknr); | 381 | (unsigned long long)newblocknr); |
384 | ret = -EIO; | 382 | ret = -EIO; |
385 | goto out; | 383 | goto out; |
386 | } | 384 | } |
@@ -543,10 +541,9 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, | |||
543 | up_read(&nilfs->ns_segctor_sem); | 541 | up_read(&nilfs->ns_segctor_sem); |
544 | if (unlikely(err)) { | 542 | if (unlikely(err)) { |
545 | if (err == -ENOENT || err == -EINVAL) { | 543 | if (err == -ENOENT || err == -EINVAL) { |
546 | printk(KERN_ERR | 544 | nilfs_msg(sb, KERN_ERR, |
547 | "NILFS: Invalid checkpoint " | 545 | "Invalid checkpoint (checkpoint number=%llu)", |
548 | "(checkpoint number=%llu)\n", | 546 | (unsigned long long)cno); |
549 | (unsigned long long)cno); | ||
550 | err = -EINVAL; | 547 | err = -EINVAL; |
551 | } | 548 | } |
552 | goto failed; | 549 | goto failed; |
@@ -642,9 +639,8 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
642 | err = nilfs_ifile_count_free_inodes(root->ifile, | 639 | err = nilfs_ifile_count_free_inodes(root->ifile, |
643 | &nmaxinodes, &nfreeinodes); | 640 | &nmaxinodes, &nfreeinodes); |
644 | if (unlikely(err)) { | 641 | if (unlikely(err)) { |
645 | printk(KERN_WARNING | 642 | nilfs_msg(sb, KERN_WARNING, |
646 | "NILFS warning: fail to count free inodes: err %d.\n", | 643 | "failed to count free inodes: err=%d", err); |
647 | err); | ||
648 | if (err == -ERANGE) { | 644 | if (err == -ERANGE) { |
649 | /* | 645 | /* |
650 | * If nilfs_palloc_count_max_entries() returns | 646 | * If nilfs_palloc_count_max_entries() returns |
@@ -776,9 +772,9 @@ static int parse_options(char *options, struct super_block *sb, int is_remount) | |||
776 | break; | 772 | break; |
777 | case Opt_snapshot: | 773 | case Opt_snapshot: |
778 | if (is_remount) { | 774 | if (is_remount) { |
779 | printk(KERN_ERR | 775 | nilfs_msg(sb, KERN_ERR, |
780 | "NILFS: \"%s\" option is invalid " | 776 | "\"%s\" option is invalid for remount", |
781 | "for remount.\n", p); | 777 | p); |
782 | return 0; | 778 | return 0; |
783 | } | 779 | } |
784 | break; | 780 | break; |
@@ -792,8 +788,8 @@ static int parse_options(char *options, struct super_block *sb, int is_remount) | |||
792 | nilfs_clear_opt(nilfs, DISCARD); | 788 | nilfs_clear_opt(nilfs, DISCARD); |
793 | break; | 789 | break; |
794 | default: | 790 | default: |
795 | printk(KERN_ERR | 791 | nilfs_msg(sb, KERN_ERR, |
796 | "NILFS: Unrecognized mount option \"%s\"\n", p); | 792 | "unrecognized mount option \"%s\"", p); |
797 | return 0; | 793 | return 0; |
798 | } | 794 | } |
799 | } | 795 | } |
@@ -829,12 +825,10 @@ static int nilfs_setup_super(struct super_block *sb, int is_mount) | |||
829 | mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); | 825 | mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); |
830 | 826 | ||
831 | if (nilfs->ns_mount_state & NILFS_ERROR_FS) { | 827 | if (nilfs->ns_mount_state & NILFS_ERROR_FS) { |
832 | printk(KERN_WARNING | 828 | nilfs_msg(sb, KERN_WARNING, "mounting fs with errors"); |
833 | "NILFS warning: mounting fs with errors\n"); | ||
834 | #if 0 | 829 | #if 0 |
835 | } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { | 830 | } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { |
836 | printk(KERN_WARNING | 831 | nilfs_msg(sb, KERN_WARNING, "maximal mount count reached"); |
837 | "NILFS warning: maximal mount count reached\n"); | ||
838 | #endif | 832 | #endif |
839 | } | 833 | } |
840 | if (!max_mnt_count) | 834 | if (!max_mnt_count) |
@@ -897,17 +891,17 @@ int nilfs_check_feature_compatibility(struct super_block *sb, | |||
897 | features = le64_to_cpu(sbp->s_feature_incompat) & | 891 | features = le64_to_cpu(sbp->s_feature_incompat) & |
898 | ~NILFS_FEATURE_INCOMPAT_SUPP; | 892 | ~NILFS_FEATURE_INCOMPAT_SUPP; |
899 | if (features) { | 893 | if (features) { |
900 | printk(KERN_ERR "NILFS: couldn't mount because of unsupported " | 894 | nilfs_msg(sb, KERN_ERR, |
901 | "optional features (%llx)\n", | 895 | "couldn't mount because of unsupported optional features (%llx)", |
902 | (unsigned long long)features); | 896 | (unsigned long long)features); |
903 | return -EINVAL; | 897 | return -EINVAL; |
904 | } | 898 | } |
905 | features = le64_to_cpu(sbp->s_feature_compat_ro) & | 899 | features = le64_to_cpu(sbp->s_feature_compat_ro) & |
906 | ~NILFS_FEATURE_COMPAT_RO_SUPP; | 900 | ~NILFS_FEATURE_COMPAT_RO_SUPP; |
907 | if (!(sb->s_flags & MS_RDONLY) && features) { | 901 | if (!(sb->s_flags & MS_RDONLY) && features) { |
908 | printk(KERN_ERR "NILFS: couldn't mount RDWR because of " | 902 | nilfs_msg(sb, KERN_ERR, |
909 | "unsupported optional features (%llx)\n", | 903 | "couldn't mount RDWR because of unsupported optional features (%llx)", |
910 | (unsigned long long)features); | 904 | (unsigned long long)features); |
911 | return -EINVAL; | 905 | return -EINVAL; |
912 | } | 906 | } |
913 | return 0; | 907 | return 0; |
@@ -923,13 +917,13 @@ static int nilfs_get_root_dentry(struct super_block *sb, | |||
923 | 917 | ||
924 | inode = nilfs_iget(sb, root, NILFS_ROOT_INO); | 918 | inode = nilfs_iget(sb, root, NILFS_ROOT_INO); |
925 | if (IS_ERR(inode)) { | 919 | if (IS_ERR(inode)) { |
926 | printk(KERN_ERR "NILFS: get root inode failed\n"); | ||
927 | ret = PTR_ERR(inode); | 920 | ret = PTR_ERR(inode); |
921 | nilfs_msg(sb, KERN_ERR, "error %d getting root inode", ret); | ||
928 | goto out; | 922 | goto out; |
929 | } | 923 | } |
930 | if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) { | 924 | if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) { |
931 | iput(inode); | 925 | iput(inode); |
932 | printk(KERN_ERR "NILFS: corrupt root inode.\n"); | 926 | nilfs_msg(sb, KERN_ERR, "corrupt root inode"); |
933 | ret = -EINVAL; | 927 | ret = -EINVAL; |
934 | goto out; | 928 | goto out; |
935 | } | 929 | } |
@@ -957,7 +951,7 @@ static int nilfs_get_root_dentry(struct super_block *sb, | |||
957 | return ret; | 951 | return ret; |
958 | 952 | ||
959 | failed_dentry: | 953 | failed_dentry: |
960 | printk(KERN_ERR "NILFS: get root dentry failed\n"); | 954 | nilfs_msg(sb, KERN_ERR, "error %d getting root dentry", ret); |
961 | goto out; | 955 | goto out; |
962 | } | 956 | } |
963 | 957 | ||
@@ -977,18 +971,18 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, | |||
977 | ret = (ret == -ENOENT) ? -EINVAL : ret; | 971 | ret = (ret == -ENOENT) ? -EINVAL : ret; |
978 | goto out; | 972 | goto out; |
979 | } else if (!ret) { | 973 | } else if (!ret) { |
980 | printk(KERN_ERR "NILFS: The specified checkpoint is " | 974 | nilfs_msg(s, KERN_ERR, |
981 | "not a snapshot (checkpoint number=%llu).\n", | 975 | "The specified checkpoint is not a snapshot (checkpoint number=%llu)", |
982 | (unsigned long long)cno); | 976 | (unsigned long long)cno); |
983 | ret = -EINVAL; | 977 | ret = -EINVAL; |
984 | goto out; | 978 | goto out; |
985 | } | 979 | } |
986 | 980 | ||
987 | ret = nilfs_attach_checkpoint(s, cno, false, &root); | 981 | ret = nilfs_attach_checkpoint(s, cno, false, &root); |
988 | if (ret) { | 982 | if (ret) { |
989 | printk(KERN_ERR "NILFS: error loading snapshot " | 983 | nilfs_msg(s, KERN_ERR, |
990 | "(checkpoint number=%llu).\n", | 984 | "error %d while loading snapshot (checkpoint number=%llu)", |
991 | (unsigned long long)cno); | 985 | ret, (unsigned long long)cno); |
992 | goto out; | 986 | goto out; |
993 | } | 987 | } |
994 | ret = nilfs_get_root_dentry(s, root, root_dentry); | 988 | ret = nilfs_get_root_dentry(s, root, root_dentry); |
@@ -1058,7 +1052,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) | |||
1058 | __u64 cno; | 1052 | __u64 cno; |
1059 | int err; | 1053 | int err; |
1060 | 1054 | ||
1061 | nilfs = alloc_nilfs(sb->s_bdev); | 1055 | nilfs = alloc_nilfs(sb); |
1062 | if (!nilfs) | 1056 | if (!nilfs) |
1063 | return -ENOMEM; | 1057 | return -ENOMEM; |
1064 | 1058 | ||
@@ -1083,8 +1077,9 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) | |||
1083 | cno = nilfs_last_cno(nilfs); | 1077 | cno = nilfs_last_cno(nilfs); |
1084 | err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); | 1078 | err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); |
1085 | if (err) { | 1079 | if (err) { |
1086 | printk(KERN_ERR "NILFS: error loading last checkpoint " | 1080 | nilfs_msg(sb, KERN_ERR, |
1087 | "(checkpoint number=%llu).\n", (unsigned long long)cno); | 1081 | "error %d while loading last checkpoint (checkpoint number=%llu)", |
1082 | err, (unsigned long long)cno); | ||
1088 | goto failed_unload; | 1083 | goto failed_unload; |
1089 | } | 1084 | } |
1090 | 1085 | ||
@@ -1144,9 +1139,8 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) | |||
1144 | err = -EINVAL; | 1139 | err = -EINVAL; |
1145 | 1140 | ||
1146 | if (!nilfs_valid_fs(nilfs)) { | 1141 | if (!nilfs_valid_fs(nilfs)) { |
1147 | printk(KERN_WARNING "NILFS (device %s): couldn't " | 1142 | nilfs_msg(sb, KERN_WARNING, |
1148 | "remount because the filesystem is in an " | 1143 | "couldn't remount because the filesystem is in an incomplete recovery state"); |
1149 | "incomplete recovery state.\n", sb->s_id); | ||
1150 | goto restore_opts; | 1144 | goto restore_opts; |
1151 | } | 1145 | } |
1152 | 1146 | ||
@@ -1178,10 +1172,9 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) | |||
1178 | ~NILFS_FEATURE_COMPAT_RO_SUPP; | 1172 | ~NILFS_FEATURE_COMPAT_RO_SUPP; |
1179 | up_read(&nilfs->ns_sem); | 1173 | up_read(&nilfs->ns_sem); |
1180 | if (features) { | 1174 | if (features) { |
1181 | printk(KERN_WARNING "NILFS (device %s): couldn't " | 1175 | nilfs_msg(sb, KERN_WARNING, |
1182 | "remount RDWR because of unsupported optional " | 1176 | "couldn't remount RDWR because of unsupported optional features (%llx)", |
1183 | "features (%llx)\n", | 1177 | (unsigned long long)features); |
1184 | sb->s_id, (unsigned long long)features); | ||
1185 | err = -EROFS; | 1178 | err = -EROFS; |
1186 | goto restore_opts; | 1179 | goto restore_opts; |
1187 | } | 1180 | } |
@@ -1212,6 +1205,38 @@ struct nilfs_super_data { | |||
1212 | int flags; | 1205 | int flags; |
1213 | }; | 1206 | }; |
1214 | 1207 | ||
1208 | static int nilfs_parse_snapshot_option(const char *option, | ||
1209 | const substring_t *arg, | ||
1210 | struct nilfs_super_data *sd) | ||
1211 | { | ||
1212 | unsigned long long val; | ||
1213 | const char *msg = NULL; | ||
1214 | int err; | ||
1215 | |||
1216 | if (!(sd->flags & MS_RDONLY)) { | ||
1217 | msg = "read-only option is not specified"; | ||
1218 | goto parse_error; | ||
1219 | } | ||
1220 | |||
1221 | err = kstrtoull(arg->from, 0, &val); | ||
1222 | if (err) { | ||
1223 | if (err == -ERANGE) | ||
1224 | msg = "too large checkpoint number"; | ||
1225 | else | ||
1226 | msg = "malformed argument"; | ||
1227 | goto parse_error; | ||
1228 | } else if (val == 0) { | ||
1229 | msg = "invalid checkpoint number 0"; | ||
1230 | goto parse_error; | ||
1231 | } | ||
1232 | sd->cno = val; | ||
1233 | return 0; | ||
1234 | |||
1235 | parse_error: | ||
1236 | nilfs_msg(NULL, KERN_ERR, "invalid option \"%s\": %s", option, msg); | ||
1237 | return 1; | ||
1238 | } | ||
1239 | |||
1215 | /** | 1240 | /** |
1216 | * nilfs_identify - pre-read mount options needed to identify mount instance | 1241 | * nilfs_identify - pre-read mount options needed to identify mount instance |
1217 | * @data: mount options | 1242 | * @data: mount options |
@@ -1228,24 +1253,9 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd) | |||
1228 | p = strsep(&options, ","); | 1253 | p = strsep(&options, ","); |
1229 | if (p != NULL && *p) { | 1254 | if (p != NULL && *p) { |
1230 | token = match_token(p, tokens, args); | 1255 | token = match_token(p, tokens, args); |
1231 | if (token == Opt_snapshot) { | 1256 | if (token == Opt_snapshot) |
1232 | if (!(sd->flags & MS_RDONLY)) { | 1257 | ret = nilfs_parse_snapshot_option(p, &args[0], |
1233 | ret++; | 1258 | sd); |
1234 | } else { | ||
1235 | sd->cno = simple_strtoull(args[0].from, | ||
1236 | NULL, 0); | ||
1237 | /* | ||
1238 | * No need to see the end pointer; | ||
1239 | * match_token() has done syntax | ||
1240 | * checking. | ||
1241 | */ | ||
1242 | if (sd->cno == 0) | ||
1243 | ret++; | ||
1244 | } | ||
1245 | } | ||
1246 | if (ret) | ||
1247 | printk(KERN_ERR | ||
1248 | "NILFS: invalid mount option: %s\n", p); | ||
1249 | } | 1259 | } |
1250 | if (!options) | 1260 | if (!options) |
1251 | break; | 1261 | break; |
@@ -1326,10 +1336,10 @@ nilfs_mount(struct file_system_type *fs_type, int flags, | |||
1326 | } else if (!sd.cno) { | 1336 | } else if (!sd.cno) { |
1327 | if (nilfs_tree_is_busy(s->s_root)) { | 1337 | if (nilfs_tree_is_busy(s->s_root)) { |
1328 | if ((flags ^ s->s_flags) & MS_RDONLY) { | 1338 | if ((flags ^ s->s_flags) & MS_RDONLY) { |
1329 | printk(KERN_ERR "NILFS: the device already " | 1339 | nilfs_msg(s, KERN_ERR, |
1330 | "has a %s mount.\n", | 1340 | "the device already has a %s mount.", |
1331 | (s->s_flags & MS_RDONLY) ? | 1341 | (s->s_flags & MS_RDONLY) ? |
1332 | "read-only" : "read/write"); | 1342 | "read-only" : "read/write"); |
1333 | err = -EBUSY; | 1343 | err = -EBUSY; |
1334 | goto failed_super; | 1344 | goto failed_super; |
1335 | } | 1345 | } |
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 8ffa42b704d8..490303e3d517 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c | |||
@@ -272,8 +272,8 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr, | |||
272 | err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); | 272 | err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); |
273 | up_read(&nilfs->ns_segctor_sem); | 273 | up_read(&nilfs->ns_segctor_sem); |
274 | if (err < 0) { | 274 | if (err < 0) { |
275 | printk(KERN_ERR "NILFS: unable to get checkpoint stat: err=%d\n", | 275 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
276 | err); | 276 | "unable to get checkpoint stat: err=%d", err); |
277 | return err; | 277 | return err; |
278 | } | 278 | } |
279 | 279 | ||
@@ -295,8 +295,8 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr, | |||
295 | err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); | 295 | err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); |
296 | up_read(&nilfs->ns_segctor_sem); | 296 | up_read(&nilfs->ns_segctor_sem); |
297 | if (err < 0) { | 297 | if (err < 0) { |
298 | printk(KERN_ERR "NILFS: unable to get checkpoint stat: err=%d\n", | 298 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
299 | err); | 299 | "unable to get checkpoint stat: err=%d", err); |
300 | return err; | 300 | return err; |
301 | } | 301 | } |
302 | 302 | ||
@@ -326,9 +326,9 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr, | |||
326 | { | 326 | { |
327 | __u64 cno; | 327 | __u64 cno; |
328 | 328 | ||
329 | down_read(&nilfs->ns_sem); | 329 | down_read(&nilfs->ns_segctor_sem); |
330 | cno = nilfs->ns_cno; | 330 | cno = nilfs->ns_cno; |
331 | up_read(&nilfs->ns_sem); | 331 | up_read(&nilfs->ns_segctor_sem); |
332 | 332 | ||
333 | return snprintf(buf, PAGE_SIZE, "%llu\n", cno); | 333 | return snprintf(buf, PAGE_SIZE, "%llu\n", cno); |
334 | } | 334 | } |
@@ -414,8 +414,8 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr, | |||
414 | err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat); | 414 | err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat); |
415 | up_read(&nilfs->ns_segctor_sem); | 415 | up_read(&nilfs->ns_segctor_sem); |
416 | if (err < 0) { | 416 | if (err < 0) { |
417 | printk(KERN_ERR "NILFS: unable to get segment stat: err=%d\n", | 417 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
418 | err); | 418 | "unable to get segment stat: err=%d", err); |
419 | return err; | 419 | return err; |
420 | } | 420 | } |
421 | 421 | ||
@@ -511,9 +511,9 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr, | |||
511 | { | 511 | { |
512 | u64 seg_seq; | 512 | u64 seg_seq; |
513 | 513 | ||
514 | down_read(&nilfs->ns_sem); | 514 | down_read(&nilfs->ns_segctor_sem); |
515 | seg_seq = nilfs->ns_seg_seq; | 515 | seg_seq = nilfs->ns_seg_seq; |
516 | up_read(&nilfs->ns_sem); | 516 | up_read(&nilfs->ns_segctor_sem); |
517 | 517 | ||
518 | return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq); | 518 | return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq); |
519 | } | 519 | } |
@@ -525,9 +525,9 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr, | |||
525 | { | 525 | { |
526 | __u64 segnum; | 526 | __u64 segnum; |
527 | 527 | ||
528 | down_read(&nilfs->ns_sem); | 528 | down_read(&nilfs->ns_segctor_sem); |
529 | segnum = nilfs->ns_segnum; | 529 | segnum = nilfs->ns_segnum; |
530 | up_read(&nilfs->ns_sem); | 530 | up_read(&nilfs->ns_segctor_sem); |
531 | 531 | ||
532 | return snprintf(buf, PAGE_SIZE, "%llu\n", segnum); | 532 | return snprintf(buf, PAGE_SIZE, "%llu\n", segnum); |
533 | } | 533 | } |
@@ -539,9 +539,9 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr, | |||
539 | { | 539 | { |
540 | __u64 nextnum; | 540 | __u64 nextnum; |
541 | 541 | ||
542 | down_read(&nilfs->ns_sem); | 542 | down_read(&nilfs->ns_segctor_sem); |
543 | nextnum = nilfs->ns_nextnum; | 543 | nextnum = nilfs->ns_nextnum; |
544 | up_read(&nilfs->ns_sem); | 544 | up_read(&nilfs->ns_segctor_sem); |
545 | 545 | ||
546 | return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum); | 546 | return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum); |
547 | } | 547 | } |
@@ -553,9 +553,9 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr, | |||
553 | { | 553 | { |
554 | unsigned long pseg_offset; | 554 | unsigned long pseg_offset; |
555 | 555 | ||
556 | down_read(&nilfs->ns_sem); | 556 | down_read(&nilfs->ns_segctor_sem); |
557 | pseg_offset = nilfs->ns_pseg_offset; | 557 | pseg_offset = nilfs->ns_pseg_offset; |
558 | up_read(&nilfs->ns_sem); | 558 | up_read(&nilfs->ns_segctor_sem); |
559 | 559 | ||
560 | return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset); | 560 | return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset); |
561 | } | 561 | } |
@@ -567,9 +567,9 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr, | |||
567 | { | 567 | { |
568 | __u64 cno; | 568 | __u64 cno; |
569 | 569 | ||
570 | down_read(&nilfs->ns_sem); | 570 | down_read(&nilfs->ns_segctor_sem); |
571 | cno = nilfs->ns_cno; | 571 | cno = nilfs->ns_cno; |
572 | up_read(&nilfs->ns_sem); | 572 | up_read(&nilfs->ns_segctor_sem); |
573 | 573 | ||
574 | return snprintf(buf, PAGE_SIZE, "%llu\n", cno); | 574 | return snprintf(buf, PAGE_SIZE, "%llu\n", cno); |
575 | } | 575 | } |
@@ -581,9 +581,9 @@ nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr, | |||
581 | { | 581 | { |
582 | time_t ctime; | 582 | time_t ctime; |
583 | 583 | ||
584 | down_read(&nilfs->ns_sem); | 584 | down_read(&nilfs->ns_segctor_sem); |
585 | ctime = nilfs->ns_ctime; | 585 | ctime = nilfs->ns_ctime; |
586 | up_read(&nilfs->ns_sem); | 586 | up_read(&nilfs->ns_segctor_sem); |
587 | 587 | ||
588 | return NILFS_SHOW_TIME(ctime, buf); | 588 | return NILFS_SHOW_TIME(ctime, buf); |
589 | } | 589 | } |
@@ -595,9 +595,9 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr, | |||
595 | { | 595 | { |
596 | time_t ctime; | 596 | time_t ctime; |
597 | 597 | ||
598 | down_read(&nilfs->ns_sem); | 598 | down_read(&nilfs->ns_segctor_sem); |
599 | ctime = nilfs->ns_ctime; | 599 | ctime = nilfs->ns_ctime; |
600 | up_read(&nilfs->ns_sem); | 600 | up_read(&nilfs->ns_segctor_sem); |
601 | 601 | ||
602 | return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)ctime); | 602 | return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)ctime); |
603 | } | 603 | } |
@@ -609,9 +609,9 @@ nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr, | |||
609 | { | 609 | { |
610 | time_t nongc_ctime; | 610 | time_t nongc_ctime; |
611 | 611 | ||
612 | down_read(&nilfs->ns_sem); | 612 | down_read(&nilfs->ns_segctor_sem); |
613 | nongc_ctime = nilfs->ns_nongc_ctime; | 613 | nongc_ctime = nilfs->ns_nongc_ctime; |
614 | up_read(&nilfs->ns_sem); | 614 | up_read(&nilfs->ns_segctor_sem); |
615 | 615 | ||
616 | return NILFS_SHOW_TIME(nongc_ctime, buf); | 616 | return NILFS_SHOW_TIME(nongc_ctime, buf); |
617 | } | 617 | } |
@@ -623,9 +623,9 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr, | |||
623 | { | 623 | { |
624 | time_t nongc_ctime; | 624 | time_t nongc_ctime; |
625 | 625 | ||
626 | down_read(&nilfs->ns_sem); | 626 | down_read(&nilfs->ns_segctor_sem); |
627 | nongc_ctime = nilfs->ns_nongc_ctime; | 627 | nongc_ctime = nilfs->ns_nongc_ctime; |
628 | up_read(&nilfs->ns_sem); | 628 | up_read(&nilfs->ns_segctor_sem); |
629 | 629 | ||
630 | return snprintf(buf, PAGE_SIZE, "%llu\n", | 630 | return snprintf(buf, PAGE_SIZE, "%llu\n", |
631 | (unsigned long long)nongc_ctime); | 631 | (unsigned long long)nongc_ctime); |
@@ -638,9 +638,9 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr, | |||
638 | { | 638 | { |
639 | u32 ndirtyblks; | 639 | u32 ndirtyblks; |
640 | 640 | ||
641 | down_read(&nilfs->ns_sem); | 641 | down_read(&nilfs->ns_segctor_sem); |
642 | ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks); | 642 | ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks); |
643 | up_read(&nilfs->ns_sem); | 643 | up_read(&nilfs->ns_segctor_sem); |
644 | 644 | ||
645 | return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks); | 645 | return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks); |
646 | } | 646 | } |
@@ -789,14 +789,15 @@ nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr, | |||
789 | 789 | ||
790 | err = kstrtouint(skip_spaces(buf), 0, &val); | 790 | err = kstrtouint(skip_spaces(buf), 0, &val); |
791 | if (err) { | 791 | if (err) { |
792 | printk(KERN_ERR "NILFS: unable to convert string: err=%d\n", | 792 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
793 | err); | 793 | "unable to convert string: err=%d", err); |
794 | return err; | 794 | return err; |
795 | } | 795 | } |
796 | 796 | ||
797 | if (val < NILFS_SB_FREQ) { | 797 | if (val < NILFS_SB_FREQ) { |
798 | val = NILFS_SB_FREQ; | 798 | val = NILFS_SB_FREQ; |
799 | printk(KERN_WARNING "NILFS: superblock update frequency cannot be lesser than 10 seconds\n"); | 799 | nilfs_msg(nilfs->ns_sb, KERN_WARNING, |
800 | "superblock update frequency cannot be lesser than 10 seconds"); | ||
800 | } | 801 | } |
801 | 802 | ||
802 | down_write(&nilfs->ns_sem); | 803 | down_write(&nilfs->ns_sem); |
@@ -999,7 +1000,8 @@ int nilfs_sysfs_create_device_group(struct super_block *sb) | |||
999 | nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL); | 1000 | nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL); |
1000 | if (unlikely(!nilfs->ns_dev_subgroups)) { | 1001 | if (unlikely(!nilfs->ns_dev_subgroups)) { |
1001 | err = -ENOMEM; | 1002 | err = -ENOMEM; |
1002 | printk(KERN_ERR "NILFS: unable to allocate memory for device group\n"); | 1003 | nilfs_msg(sb, KERN_ERR, |
1004 | "unable to allocate memory for device group"); | ||
1003 | goto failed_create_device_group; | 1005 | goto failed_create_device_group; |
1004 | } | 1006 | } |
1005 | 1007 | ||
@@ -1109,15 +1111,15 @@ int __init nilfs_sysfs_init(void) | |||
1109 | nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj); | 1111 | nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj); |
1110 | if (!nilfs_kset) { | 1112 | if (!nilfs_kset) { |
1111 | err = -ENOMEM; | 1113 | err = -ENOMEM; |
1112 | printk(KERN_ERR "NILFS: unable to create sysfs entry: err %d\n", | 1114 | nilfs_msg(NULL, KERN_ERR, |
1113 | err); | 1115 | "unable to create sysfs entry: err=%d", err); |
1114 | goto failed_sysfs_init; | 1116 | goto failed_sysfs_init; |
1115 | } | 1117 | } |
1116 | 1118 | ||
1117 | err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group); | 1119 | err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group); |
1118 | if (unlikely(err)) { | 1120 | if (unlikely(err)) { |
1119 | printk(KERN_ERR "NILFS: unable to create feature group: err %d\n", | 1121 | nilfs_msg(NULL, KERN_ERR, |
1120 | err); | 1122 | "unable to create feature group: err=%d", err); |
1121 | goto cleanup_sysfs_init; | 1123 | goto cleanup_sysfs_init; |
1122 | } | 1124 | } |
1123 | 1125 | ||
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index e9fd241b9a0a..2dd75bf619ad 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -56,12 +56,12 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs, | |||
56 | 56 | ||
57 | /** | 57 | /** |
58 | * alloc_nilfs - allocate a nilfs object | 58 | * alloc_nilfs - allocate a nilfs object |
59 | * @bdev: block device to which the_nilfs is related | 59 | * @sb: super block instance |
60 | * | 60 | * |
61 | * Return Value: On success, pointer to the_nilfs is returned. | 61 | * Return Value: On success, pointer to the_nilfs is returned. |
62 | * On error, NULL is returned. | 62 | * On error, NULL is returned. |
63 | */ | 63 | */ |
64 | struct the_nilfs *alloc_nilfs(struct block_device *bdev) | 64 | struct the_nilfs *alloc_nilfs(struct super_block *sb) |
65 | { | 65 | { |
66 | struct the_nilfs *nilfs; | 66 | struct the_nilfs *nilfs; |
67 | 67 | ||
@@ -69,7 +69,8 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev) | |||
69 | if (!nilfs) | 69 | if (!nilfs) |
70 | return NULL; | 70 | return NULL; |
71 | 71 | ||
72 | nilfs->ns_bdev = bdev; | 72 | nilfs->ns_sb = sb; |
73 | nilfs->ns_bdev = sb->s_bdev; | ||
73 | atomic_set(&nilfs->ns_ndirtyblks, 0); | 74 | atomic_set(&nilfs->ns_ndirtyblks, 0); |
74 | init_rwsem(&nilfs->ns_sem); | 75 | init_rwsem(&nilfs->ns_sem); |
75 | mutex_init(&nilfs->ns_snapshot_mount_mutex); | 76 | mutex_init(&nilfs->ns_snapshot_mount_mutex); |
@@ -191,7 +192,10 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, | |||
191 | nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); | 192 | nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); |
192 | nilfs->ns_cno = nilfs->ns_last_cno + 1; | 193 | nilfs->ns_cno = nilfs->ns_last_cno + 1; |
193 | if (nilfs->ns_segnum >= nilfs->ns_nsegments) { | 194 | if (nilfs->ns_segnum >= nilfs->ns_nsegments) { |
194 | printk(KERN_ERR "NILFS invalid last segment number.\n"); | 195 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
196 | "pointed segment number is out of range: segnum=%llu, nsegments=%lu", | ||
197 | (unsigned long long)nilfs->ns_segnum, | ||
198 | nilfs->ns_nsegments); | ||
195 | ret = -EINVAL; | 199 | ret = -EINVAL; |
196 | } | 200 | } |
197 | return ret; | 201 | return ret; |
@@ -215,12 +219,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
215 | int err; | 219 | int err; |
216 | 220 | ||
217 | if (!valid_fs) { | 221 | if (!valid_fs) { |
218 | printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n"); | 222 | nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs"); |
219 | if (s_flags & MS_RDONLY) { | 223 | if (s_flags & MS_RDONLY) { |
220 | printk(KERN_INFO "NILFS: INFO: recovery " | 224 | nilfs_msg(sb, KERN_INFO, |
221 | "required for readonly filesystem.\n"); | 225 | "recovery required for readonly filesystem"); |
222 | printk(KERN_INFO "NILFS: write access will " | 226 | nilfs_msg(sb, KERN_INFO, |
223 | "be enabled during recovery.\n"); | 227 | "write access will be enabled during recovery"); |
224 | } | 228 | } |
225 | } | 229 | } |
226 | 230 | ||
@@ -235,13 +239,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
235 | goto scan_error; | 239 | goto scan_error; |
236 | 240 | ||
237 | if (!nilfs_valid_sb(sbp[1])) { | 241 | if (!nilfs_valid_sb(sbp[1])) { |
238 | printk(KERN_WARNING | 242 | nilfs_msg(sb, KERN_WARNING, |
239 | "NILFS warning: unable to fall back to spare" | 243 | "unable to fall back to spare super block"); |
240 | "super block\n"); | ||
241 | goto scan_error; | 244 | goto scan_error; |
242 | } | 245 | } |
243 | printk(KERN_INFO | 246 | nilfs_msg(sb, KERN_INFO, |
244 | "NILFS: try rollback from an earlier position\n"); | 247 | "trying rollback from an earlier position"); |
245 | 248 | ||
246 | /* | 249 | /* |
247 | * restore super block with its spare and reconfigure | 250 | * restore super block with its spare and reconfigure |
@@ -254,10 +257,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
254 | /* verify consistency between two super blocks */ | 257 | /* verify consistency between two super blocks */ |
255 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); | 258 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); |
256 | if (blocksize != nilfs->ns_blocksize) { | 259 | if (blocksize != nilfs->ns_blocksize) { |
257 | printk(KERN_WARNING | 260 | nilfs_msg(sb, KERN_WARNING, |
258 | "NILFS warning: blocksize differs between " | 261 | "blocksize differs between two super blocks (%d != %d)", |
259 | "two super blocks (%d != %d)\n", | 262 | blocksize, nilfs->ns_blocksize); |
260 | blocksize, nilfs->ns_blocksize); | ||
261 | goto scan_error; | 263 | goto scan_error; |
262 | } | 264 | } |
263 | 265 | ||
@@ -276,7 +278,8 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
276 | 278 | ||
277 | err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root); | 279 | err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root); |
278 | if (unlikely(err)) { | 280 | if (unlikely(err)) { |
279 | printk(KERN_ERR "NILFS: error loading super root.\n"); | 281 | nilfs_msg(sb, KERN_ERR, "error %d while loading super root", |
282 | err); | ||
280 | goto failed; | 283 | goto failed; |
281 | } | 284 | } |
282 | 285 | ||
@@ -287,30 +290,29 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
287 | __u64 features; | 290 | __u64 features; |
288 | 291 | ||
289 | if (nilfs_test_opt(nilfs, NORECOVERY)) { | 292 | if (nilfs_test_opt(nilfs, NORECOVERY)) { |
290 | printk(KERN_INFO "NILFS: norecovery option specified. " | 293 | nilfs_msg(sb, KERN_INFO, |
291 | "skipping roll-forward recovery\n"); | 294 | "norecovery option specified, skipping roll-forward recovery"); |
292 | goto skip_recovery; | 295 | goto skip_recovery; |
293 | } | 296 | } |
294 | features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & | 297 | features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & |
295 | ~NILFS_FEATURE_COMPAT_RO_SUPP; | 298 | ~NILFS_FEATURE_COMPAT_RO_SUPP; |
296 | if (features) { | 299 | if (features) { |
297 | printk(KERN_ERR "NILFS: couldn't proceed with " | 300 | nilfs_msg(sb, KERN_ERR, |
298 | "recovery because of unsupported optional " | 301 | "couldn't proceed with recovery because of unsupported optional features (%llx)", |
299 | "features (%llx)\n", | 302 | (unsigned long long)features); |
300 | (unsigned long long)features); | ||
301 | err = -EROFS; | 303 | err = -EROFS; |
302 | goto failed_unload; | 304 | goto failed_unload; |
303 | } | 305 | } |
304 | if (really_read_only) { | 306 | if (really_read_only) { |
305 | printk(KERN_ERR "NILFS: write access " | 307 | nilfs_msg(sb, KERN_ERR, |
306 | "unavailable, cannot proceed.\n"); | 308 | "write access unavailable, cannot proceed"); |
307 | err = -EROFS; | 309 | err = -EROFS; |
308 | goto failed_unload; | 310 | goto failed_unload; |
309 | } | 311 | } |
310 | sb->s_flags &= ~MS_RDONLY; | 312 | sb->s_flags &= ~MS_RDONLY; |
311 | } else if (nilfs_test_opt(nilfs, NORECOVERY)) { | 313 | } else if (nilfs_test_opt(nilfs, NORECOVERY)) { |
312 | printk(KERN_ERR "NILFS: recovery cancelled because norecovery " | 314 | nilfs_msg(sb, KERN_ERR, |
313 | "option was specified for a read/write mount\n"); | 315 | "recovery cancelled because norecovery option was specified for a read/write mount"); |
314 | err = -EINVAL; | 316 | err = -EINVAL; |
315 | goto failed_unload; | 317 | goto failed_unload; |
316 | } | 318 | } |
@@ -325,11 +327,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
325 | up_write(&nilfs->ns_sem); | 327 | up_write(&nilfs->ns_sem); |
326 | 328 | ||
327 | if (err) { | 329 | if (err) { |
328 | printk(KERN_ERR "NILFS: failed to update super block. " | 330 | nilfs_msg(sb, KERN_ERR, |
329 | "recovery unfinished.\n"); | 331 | "error %d updating super block. recovery unfinished.", |
332 | err); | ||
330 | goto failed_unload; | 333 | goto failed_unload; |
331 | } | 334 | } |
332 | printk(KERN_INFO "NILFS: recovery complete.\n"); | 335 | nilfs_msg(sb, KERN_INFO, "recovery complete"); |
333 | 336 | ||
334 | skip_recovery: | 337 | skip_recovery: |
335 | nilfs_clear_recovery_info(&ri); | 338 | nilfs_clear_recovery_info(&ri); |
@@ -337,7 +340,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) | |||
337 | return 0; | 340 | return 0; |
338 | 341 | ||
339 | scan_error: | 342 | scan_error: |
340 | printk(KERN_ERR "NILFS: error searching super root.\n"); | 343 | nilfs_msg(sb, KERN_ERR, "error %d while searching super root", err); |
341 | goto failed; | 344 | goto failed; |
342 | 345 | ||
343 | failed_unload: | 346 | failed_unload: |
@@ -384,12 +387,11 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, | |||
384 | struct nilfs_super_block *sbp) | 387 | struct nilfs_super_block *sbp) |
385 | { | 388 | { |
386 | if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { | 389 | if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { |
387 | printk(KERN_ERR "NILFS: unsupported revision " | 390 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
388 | "(superblock rev.=%d.%d, current rev.=%d.%d). " | 391 | "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).", |
389 | "Please check the version of mkfs.nilfs.\n", | 392 | le32_to_cpu(sbp->s_rev_level), |
390 | le32_to_cpu(sbp->s_rev_level), | 393 | le16_to_cpu(sbp->s_minor_rev_level), |
391 | le16_to_cpu(sbp->s_minor_rev_level), | 394 | NILFS_CURRENT_REV, NILFS_MINOR_REV); |
392 | NILFS_CURRENT_REV, NILFS_MINOR_REV); | ||
393 | return -EINVAL; | 395 | return -EINVAL; |
394 | } | 396 | } |
395 | nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); | 397 | nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); |
@@ -398,12 +400,14 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, | |||
398 | 400 | ||
399 | nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); | 401 | nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); |
400 | if (nilfs->ns_inode_size > nilfs->ns_blocksize) { | 402 | if (nilfs->ns_inode_size > nilfs->ns_blocksize) { |
401 | printk(KERN_ERR "NILFS: too large inode size: %d bytes.\n", | 403 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
402 | nilfs->ns_inode_size); | 404 | "too large inode size: %d bytes", |
405 | nilfs->ns_inode_size); | ||
403 | return -EINVAL; | 406 | return -EINVAL; |
404 | } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) { | 407 | } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) { |
405 | printk(KERN_ERR "NILFS: too small inode size: %d bytes.\n", | 408 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
406 | nilfs->ns_inode_size); | 409 | "too small inode size: %d bytes", |
410 | nilfs->ns_inode_size); | ||
407 | return -EINVAL; | 411 | return -EINVAL; |
408 | } | 412 | } |
409 | 413 | ||
@@ -411,7 +415,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, | |||
411 | 415 | ||
412 | nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); | 416 | nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); |
413 | if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { | 417 | if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { |
414 | printk(KERN_ERR "NILFS: too short segment.\n"); | 418 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
419 | "too short segment: %lu blocks", | ||
420 | nilfs->ns_blocks_per_segment); | ||
415 | return -EINVAL; | 421 | return -EINVAL; |
416 | } | 422 | } |
417 | 423 | ||
@@ -420,7 +426,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, | |||
420 | le32_to_cpu(sbp->s_r_segments_percentage); | 426 | le32_to_cpu(sbp->s_r_segments_percentage); |
421 | if (nilfs->ns_r_segments_percentage < 1 || | 427 | if (nilfs->ns_r_segments_percentage < 1 || |
422 | nilfs->ns_r_segments_percentage > 99) { | 428 | nilfs->ns_r_segments_percentage > 99) { |
423 | printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n"); | 429 | nilfs_msg(nilfs->ns_sb, KERN_ERR, |
430 | "invalid reserved segments percentage: %lu", | ||
431 | nilfs->ns_r_segments_percentage); | ||
424 | return -EINVAL; | 432 | return -EINVAL; |
425 | } | 433 | } |
426 | 434 | ||
@@ -504,16 +512,16 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, | |||
504 | 512 | ||
505 | if (!sbp[0]) { | 513 | if (!sbp[0]) { |
506 | if (!sbp[1]) { | 514 | if (!sbp[1]) { |
507 | printk(KERN_ERR "NILFS: unable to read superblock\n"); | 515 | nilfs_msg(sb, KERN_ERR, "unable to read superblock"); |
508 | return -EIO; | 516 | return -EIO; |
509 | } | 517 | } |
510 | printk(KERN_WARNING | 518 | nilfs_msg(sb, KERN_WARNING, |
511 | "NILFS warning: unable to read primary superblock " | 519 | "unable to read primary superblock (blocksize = %d)", |
512 | "(blocksize = %d)\n", blocksize); | 520 | blocksize); |
513 | } else if (!sbp[1]) { | 521 | } else if (!sbp[1]) { |
514 | printk(KERN_WARNING | 522 | nilfs_msg(sb, KERN_WARNING, |
515 | "NILFS warning: unable to read secondary superblock " | 523 | "unable to read secondary superblock (blocksize = %d)", |
516 | "(blocksize = %d)\n", blocksize); | 524 | blocksize); |
517 | } | 525 | } |
518 | 526 | ||
519 | /* | 527 | /* |
@@ -535,14 +543,14 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, | |||
535 | } | 543 | } |
536 | if (!valid[swp]) { | 544 | if (!valid[swp]) { |
537 | nilfs_release_super_block(nilfs); | 545 | nilfs_release_super_block(nilfs); |
538 | printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n", | 546 | nilfs_msg(sb, KERN_ERR, "couldn't find nilfs on the device"); |
539 | sb->s_id); | ||
540 | return -EINVAL; | 547 | return -EINVAL; |
541 | } | 548 | } |
542 | 549 | ||
543 | if (!valid[!swp]) | 550 | if (!valid[!swp]) |
544 | printk(KERN_WARNING "NILFS warning: broken superblock. " | 551 | nilfs_msg(sb, KERN_WARNING, |
545 | "using spare superblock (blocksize = %d).\n", blocksize); | 552 | "broken superblock, retrying with spare superblock (blocksize = %d)", |
553 | blocksize); | ||
546 | if (swp) | 554 | if (swp) |
547 | nilfs_swap_super_block(nilfs); | 555 | nilfs_swap_super_block(nilfs); |
548 | 556 | ||
@@ -576,7 +584,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) | |||
576 | 584 | ||
577 | blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); | 585 | blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); |
578 | if (!blocksize) { | 586 | if (!blocksize) { |
579 | printk(KERN_ERR "NILFS: unable to set blocksize\n"); | 587 | nilfs_msg(sb, KERN_ERR, "unable to set blocksize"); |
580 | err = -EINVAL; | 588 | err = -EINVAL; |
581 | goto out; | 589 | goto out; |
582 | } | 590 | } |
@@ -595,8 +603,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) | |||
595 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); | 603 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); |
596 | if (blocksize < NILFS_MIN_BLOCK_SIZE || | 604 | if (blocksize < NILFS_MIN_BLOCK_SIZE || |
597 | blocksize > NILFS_MAX_BLOCK_SIZE) { | 605 | blocksize > NILFS_MAX_BLOCK_SIZE) { |
598 | printk(KERN_ERR "NILFS: couldn't mount because of unsupported " | 606 | nilfs_msg(sb, KERN_ERR, |
599 | "filesystem blocksize %d\n", blocksize); | 607 | "couldn't mount because of unsupported filesystem blocksize %d", |
608 | blocksize); | ||
600 | err = -EINVAL; | 609 | err = -EINVAL; |
601 | goto failed_sbh; | 610 | goto failed_sbh; |
602 | } | 611 | } |
@@ -604,10 +613,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) | |||
604 | int hw_blocksize = bdev_logical_block_size(sb->s_bdev); | 613 | int hw_blocksize = bdev_logical_block_size(sb->s_bdev); |
605 | 614 | ||
606 | if (blocksize < hw_blocksize) { | 615 | if (blocksize < hw_blocksize) { |
607 | printk(KERN_ERR | 616 | nilfs_msg(sb, KERN_ERR, |
608 | "NILFS: blocksize %d too small for device " | 617 | "blocksize %d too small for device (sector-size = %d)", |
609 | "(sector-size = %d).\n", | 618 | blocksize, hw_blocksize); |
610 | blocksize, hw_blocksize); | ||
611 | err = -EINVAL; | 619 | err = -EINVAL; |
612 | goto failed_sbh; | 620 | goto failed_sbh; |
613 | } | 621 | } |
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 79369fd6b13b..b305c6f033e7 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h | |||
@@ -43,6 +43,7 @@ enum { | |||
43 | * struct the_nilfs - struct to supervise multiple nilfs mount points | 43 | * struct the_nilfs - struct to supervise multiple nilfs mount points |
44 | * @ns_flags: flags | 44 | * @ns_flags: flags |
45 | * @ns_flushed_device: flag indicating if all volatile data was flushed | 45 | * @ns_flushed_device: flag indicating if all volatile data was flushed |
46 | * @ns_sb: back pointer to super block instance | ||
46 | * @ns_bdev: block device | 47 | * @ns_bdev: block device |
47 | * @ns_sem: semaphore for shared states | 48 | * @ns_sem: semaphore for shared states |
48 | * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts | 49 | * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts |
@@ -102,6 +103,7 @@ struct the_nilfs { | |||
102 | unsigned long ns_flags; | 103 | unsigned long ns_flags; |
103 | int ns_flushed_device; | 104 | int ns_flushed_device; |
104 | 105 | ||
106 | struct super_block *ns_sb; | ||
105 | struct block_device *ns_bdev; | 107 | struct block_device *ns_bdev; |
106 | struct rw_semaphore ns_sem; | 108 | struct rw_semaphore ns_sem; |
107 | struct mutex ns_snapshot_mount_mutex; | 109 | struct mutex ns_snapshot_mount_mutex; |
@@ -120,11 +122,8 @@ struct the_nilfs { | |||
120 | unsigned int ns_sb_update_freq; | 122 | unsigned int ns_sb_update_freq; |
121 | 123 | ||
122 | /* | 124 | /* |
123 | * Following fields are dedicated to a writable FS-instance. | 125 | * The following fields are updated by a writable FS-instance. |
124 | * Except for the period seeking checkpoint, code outside the segment | 126 | * These fields are protected by ns_segctor_sem outside load_nilfs(). |
125 | * constructor must lock a segment semaphore while accessing these | ||
126 | * fields. | ||
127 | * The writable FS-instance is sole during a lifetime of the_nilfs. | ||
128 | */ | 127 | */ |
129 | u64 ns_seg_seq; | 128 | u64 ns_seg_seq; |
130 | __u64 ns_segnum; | 129 | __u64 ns_segnum; |
@@ -281,7 +280,7 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs) | |||
281 | } | 280 | } |
282 | 281 | ||
283 | void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64); | 282 | void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64); |
284 | struct the_nilfs *alloc_nilfs(struct block_device *bdev); | 283 | struct the_nilfs *alloc_nilfs(struct super_block *sb); |
285 | void destroy_nilfs(struct the_nilfs *nilfs); | 284 | void destroy_nilfs(struct the_nilfs *nilfs); |
286 | int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data); | 285 | int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data); |
287 | int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb); | 286 | int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb); |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 460c0cedab3a..7dabbc31060e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -6106,6 +6106,43 @@ void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, | |||
6106 | } | 6106 | } |
6107 | } | 6107 | } |
6108 | 6108 | ||
6109 | /* | ||
6110 | * Try to flush truncate logs if we can free enough clusters from it. | ||
6111 | * As for return value, "< 0" means error, "0" no space and "1" means | ||
6112 | * we have freed enough spaces and let the caller try to allocate again. | ||
6113 | */ | ||
6114 | int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb, | ||
6115 | unsigned int needed) | ||
6116 | { | ||
6117 | tid_t target; | ||
6118 | int ret = 0; | ||
6119 | unsigned int truncated_clusters; | ||
6120 | |||
6121 | inode_lock(osb->osb_tl_inode); | ||
6122 | truncated_clusters = osb->truncated_clusters; | ||
6123 | inode_unlock(osb->osb_tl_inode); | ||
6124 | |||
6125 | /* | ||
6126 | * Check whether we can succeed in allocating if we free | ||
6127 | * the truncate log. | ||
6128 | */ | ||
6129 | if (truncated_clusters < needed) | ||
6130 | goto out; | ||
6131 | |||
6132 | ret = ocfs2_flush_truncate_log(osb); | ||
6133 | if (ret) { | ||
6134 | mlog_errno(ret); | ||
6135 | goto out; | ||
6136 | } | ||
6137 | |||
6138 | if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) { | ||
6139 | jbd2_log_wait_commit(osb->journal->j_journal, target); | ||
6140 | ret = 1; | ||
6141 | } | ||
6142 | out: | ||
6143 | return ret; | ||
6144 | } | ||
6145 | |||
6109 | static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, | 6146 | static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, |
6110 | int slot_num, | 6147 | int slot_num, |
6111 | struct inode **tl_inode, | 6148 | struct inode **tl_inode, |
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index f3dc1b0dfffc..4a5152ec88a3 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h | |||
@@ -188,6 +188,8 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, | |||
188 | u64 start_blk, | 188 | u64 start_blk, |
189 | unsigned int num_clusters); | 189 | unsigned int num_clusters); |
190 | int __ocfs2_flush_truncate_log(struct ocfs2_super *osb); | 190 | int __ocfs2_flush_truncate_log(struct ocfs2_super *osb); |
191 | int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb, | ||
192 | unsigned int needed); | ||
191 | 193 | ||
192 | /* | 194 | /* |
193 | * Process local structure which describes the block unlinks done | 195 | * Process local structure which describes the block unlinks done |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index af2adfcb0f6f..98d36548153d 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -1645,43 +1645,6 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, | |||
1645 | return ret; | 1645 | return ret; |
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | /* | ||
1649 | * Try to flush truncate logs if we can free enough clusters from it. | ||
1650 | * As for return value, "< 0" means error, "0" no space and "1" means | ||
1651 | * we have freed enough spaces and let the caller try to allocate again. | ||
1652 | */ | ||
1653 | static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb, | ||
1654 | unsigned int needed) | ||
1655 | { | ||
1656 | tid_t target; | ||
1657 | int ret = 0; | ||
1658 | unsigned int truncated_clusters; | ||
1659 | |||
1660 | inode_lock(osb->osb_tl_inode); | ||
1661 | truncated_clusters = osb->truncated_clusters; | ||
1662 | inode_unlock(osb->osb_tl_inode); | ||
1663 | |||
1664 | /* | ||
1665 | * Check whether we can succeed in allocating if we free | ||
1666 | * the truncate log. | ||
1667 | */ | ||
1668 | if (truncated_clusters < needed) | ||
1669 | goto out; | ||
1670 | |||
1671 | ret = ocfs2_flush_truncate_log(osb); | ||
1672 | if (ret) { | ||
1673 | mlog_errno(ret); | ||
1674 | goto out; | ||
1675 | } | ||
1676 | |||
1677 | if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) { | ||
1678 | jbd2_log_wait_commit(osb->journal->j_journal, target); | ||
1679 | ret = 1; | ||
1680 | } | ||
1681 | out: | ||
1682 | return ret; | ||
1683 | } | ||
1684 | |||
1685 | int ocfs2_write_begin_nolock(struct address_space *mapping, | 1648 | int ocfs2_write_begin_nolock(struct address_space *mapping, |
1686 | loff_t pos, unsigned len, ocfs2_write_type_t type, | 1649 | loff_t pos, unsigned len, ocfs2_write_type_t type, |
1687 | struct page **pagep, void **fsdata, | 1650 | struct page **pagep, void **fsdata, |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 8107d0d0c3f6..e9f3705c4c9f 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -1004,6 +1004,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1004 | int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | 1004 | int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, |
1005 | u8 nodenum, u8 *real_master); | 1005 | u8 nodenum, u8 *real_master); |
1006 | 1006 | ||
1007 | void __dlm_do_purge_lockres(struct dlm_ctxt *dlm, | ||
1008 | struct dlm_lock_resource *res); | ||
1007 | 1009 | ||
1008 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | 1010 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, |
1009 | struct dlm_lock_resource *res, | 1011 | struct dlm_lock_resource *res, |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 13719d3f35f8..6ea06f8a7d29 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2276,9 +2276,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2276 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", | 2276 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", |
2277 | dlm->name, namelen, lockname, res->owner, r); | 2277 | dlm->name, namelen, lockname, res->owner, r); |
2278 | dlm_print_one_lock_resource(res); | 2278 | dlm_print_one_lock_resource(res); |
2279 | BUG(); | 2279 | if (r == -ENOMEM) |
2280 | } | 2280 | BUG(); |
2281 | return ret ? ret : r; | 2281 | } else |
2282 | ret = r; | ||
2283 | |||
2284 | return ret; | ||
2282 | } | 2285 | } |
2283 | 2286 | ||
2284 | int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | 2287 | int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, |
@@ -2416,48 +2419,26 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data, | |||
2416 | } | 2419 | } |
2417 | 2420 | ||
2418 | spin_lock(&res->spinlock); | 2421 | spin_lock(&res->spinlock); |
2419 | BUG_ON(!(res->state & DLM_LOCK_RES_DROPPING_REF)); | 2422 | if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { |
2420 | if (!list_empty(&res->purge)) { | 2423 | spin_unlock(&res->spinlock); |
2421 | mlog(0, "%s: Removing res %.*s from purgelist\n", | 2424 | spin_unlock(&dlm->spinlock); |
2422 | dlm->name, res->lockname.len, res->lockname.name); | 2425 | mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done " |
2423 | list_del_init(&res->purge); | 2426 | "but it is already derefed!\n", dlm->name, |
2424 | dlm_lockres_put(res); | 2427 | res->lockname.len, res->lockname.name, node); |
2425 | dlm->purge_count--; | 2428 | ret = 0; |
2426 | } | 2429 | goto done; |
2427 | |||
2428 | if (!__dlm_lockres_unused(res)) { | ||
2429 | mlog(ML_ERROR, "%s: res %.*s in use after deref\n", | ||
2430 | dlm->name, res->lockname.len, res->lockname.name); | ||
2431 | __dlm_print_one_lock_resource(res); | ||
2432 | BUG(); | ||
2433 | } | ||
2434 | |||
2435 | __dlm_unhash_lockres(dlm, res); | ||
2436 | |||
2437 | spin_lock(&dlm->track_lock); | ||
2438 | if (!list_empty(&res->tracking)) | ||
2439 | list_del_init(&res->tracking); | ||
2440 | else { | ||
2441 | mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n", | ||
2442 | dlm->name, res->lockname.len, res->lockname.name); | ||
2443 | __dlm_print_one_lock_resource(res); | ||
2444 | } | 2430 | } |
2445 | spin_unlock(&dlm->track_lock); | ||
2446 | 2431 | ||
2447 | /* lockres is not in the hash now. drop the flag and wake up | 2432 | __dlm_do_purge_lockres(dlm, res); |
2448 | * any processes waiting in dlm_get_lock_resource. | ||
2449 | */ | ||
2450 | res->state &= ~DLM_LOCK_RES_DROPPING_REF; | ||
2451 | spin_unlock(&res->spinlock); | 2433 | spin_unlock(&res->spinlock); |
2452 | wake_up(&res->wq); | 2434 | wake_up(&res->wq); |
2453 | 2435 | ||
2454 | dlm_lockres_put(res); | ||
2455 | |||
2456 | spin_unlock(&dlm->spinlock); | 2436 | spin_unlock(&dlm->spinlock); |
2457 | 2437 | ||
2458 | ret = 0; | 2438 | ret = 0; |
2459 | |||
2460 | done: | 2439 | done: |
2440 | if (res) | ||
2441 | dlm_lockres_put(res); | ||
2461 | dlm_put(dlm); | 2442 | dlm_put(dlm); |
2462 | return ret; | 2443 | return ret; |
2463 | } | 2444 | } |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index f6b313898763..dd5cb8bcefd1 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -2343,6 +2343,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2343 | struct dlm_lock_resource *res; | 2343 | struct dlm_lock_resource *res; |
2344 | int i; | 2344 | int i; |
2345 | struct hlist_head *bucket; | 2345 | struct hlist_head *bucket; |
2346 | struct hlist_node *tmp; | ||
2346 | struct dlm_lock *lock; | 2347 | struct dlm_lock *lock; |
2347 | 2348 | ||
2348 | 2349 | ||
@@ -2365,7 +2366,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2365 | */ | 2366 | */ |
2366 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 2367 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
2367 | bucket = dlm_lockres_hash(dlm, i); | 2368 | bucket = dlm_lockres_hash(dlm, i); |
2368 | hlist_for_each_entry(res, bucket, hash_node) { | 2369 | hlist_for_each_entry_safe(res, tmp, bucket, hash_node) { |
2369 | /* always prune any $RECOVERY entries for dead nodes, | 2370 | /* always prune any $RECOVERY entries for dead nodes, |
2370 | * otherwise hangs can occur during later recovery */ | 2371 | * otherwise hangs can occur during later recovery */ |
2371 | if (dlm_is_recovery_lock(res->lockname.name, | 2372 | if (dlm_is_recovery_lock(res->lockname.name, |
@@ -2386,8 +2387,17 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2386 | break; | 2387 | break; |
2387 | } | 2388 | } |
2388 | } | 2389 | } |
2389 | dlm_lockres_clear_refmap_bit(dlm, res, | 2390 | |
2390 | dead_node); | 2391 | if ((res->owner == dead_node) && |
2392 | (res->state & DLM_LOCK_RES_DROPPING_REF)) { | ||
2393 | dlm_lockres_get(res); | ||
2394 | __dlm_do_purge_lockres(dlm, res); | ||
2395 | spin_unlock(&res->spinlock); | ||
2396 | wake_up(&res->wq); | ||
2397 | dlm_lockres_put(res); | ||
2398 | continue; | ||
2399 | } else if (res->owner == dlm->node_num) | ||
2400 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); | ||
2391 | spin_unlock(&res->spinlock); | 2401 | spin_unlock(&res->spinlock); |
2392 | continue; | 2402 | continue; |
2393 | } | 2403 | } |
@@ -2398,14 +2408,17 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2398 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { | 2408 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { |
2399 | mlog(0, "%s:%.*s: owned by " | 2409 | mlog(0, "%s:%.*s: owned by " |
2400 | "dead node %u, this node was " | 2410 | "dead node %u, this node was " |
2401 | "dropping its ref when it died. " | 2411 | "dropping its ref when master died. " |
2402 | "continue, dropping the flag.\n", | 2412 | "continue, purging the lockres.\n", |
2403 | dlm->name, res->lockname.len, | 2413 | dlm->name, res->lockname.len, |
2404 | res->lockname.name, dead_node); | 2414 | res->lockname.name, dead_node); |
2415 | dlm_lockres_get(res); | ||
2416 | __dlm_do_purge_lockres(dlm, res); | ||
2417 | spin_unlock(&res->spinlock); | ||
2418 | wake_up(&res->wq); | ||
2419 | dlm_lockres_put(res); | ||
2420 | continue; | ||
2405 | } | 2421 | } |
2406 | res->state &= ~DLM_LOCK_RES_DROPPING_REF; | ||
2407 | dlm_move_lockres_to_recovery_list(dlm, | ||
2408 | res); | ||
2409 | } else if (res->owner == dlm->node_num) { | 2422 | } else if (res->owner == dlm->node_num) { |
2410 | dlm_free_dead_locks(dlm, res, dead_node); | 2423 | dlm_free_dead_locks(dlm, res, dead_node); |
2411 | __dlm_lockres_calc_usage(dlm, res); | 2424 | __dlm_lockres_calc_usage(dlm, res); |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 68d239ba0c63..838a06d4066a 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -160,6 +160,52 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
160 | spin_unlock(&dlm->spinlock); | 160 | spin_unlock(&dlm->spinlock); |
161 | } | 161 | } |
162 | 162 | ||
163 | /* | ||
164 | * Do the real purge work: | ||
165 | * unhash the lockres, and | ||
166 | * clear flag DLM_LOCK_RES_DROPPING_REF. | ||
167 | * It requires dlm and lockres spinlock to be taken. | ||
168 | */ | ||
169 | void __dlm_do_purge_lockres(struct dlm_ctxt *dlm, | ||
170 | struct dlm_lock_resource *res) | ||
171 | { | ||
172 | assert_spin_locked(&dlm->spinlock); | ||
173 | assert_spin_locked(&res->spinlock); | ||
174 | |||
175 | if (!list_empty(&res->purge)) { | ||
176 | mlog(0, "%s: Removing res %.*s from purgelist\n", | ||
177 | dlm->name, res->lockname.len, res->lockname.name); | ||
178 | list_del_init(&res->purge); | ||
179 | dlm_lockres_put(res); | ||
180 | dlm->purge_count--; | ||
181 | } | ||
182 | |||
183 | if (!__dlm_lockres_unused(res)) { | ||
184 | mlog(ML_ERROR, "%s: res %.*s in use after deref\n", | ||
185 | dlm->name, res->lockname.len, res->lockname.name); | ||
186 | __dlm_print_one_lock_resource(res); | ||
187 | BUG(); | ||
188 | } | ||
189 | |||
190 | __dlm_unhash_lockres(dlm, res); | ||
191 | |||
192 | spin_lock(&dlm->track_lock); | ||
193 | if (!list_empty(&res->tracking)) | ||
194 | list_del_init(&res->tracking); | ||
195 | else { | ||
196 | mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n", | ||
197 | dlm->name, res->lockname.len, res->lockname.name); | ||
198 | __dlm_print_one_lock_resource(res); | ||
199 | } | ||
200 | spin_unlock(&dlm->track_lock); | ||
201 | |||
202 | /* | ||
203 | * lockres is not in the hash now. drop the flag and wake up | ||
204 | * any processes waiting in dlm_get_lock_resource. | ||
205 | */ | ||
206 | res->state &= ~DLM_LOCK_RES_DROPPING_REF; | ||
207 | } | ||
208 | |||
163 | static void dlm_purge_lockres(struct dlm_ctxt *dlm, | 209 | static void dlm_purge_lockres(struct dlm_ctxt *dlm, |
164 | struct dlm_lock_resource *res) | 210 | struct dlm_lock_resource *res) |
165 | { | 211 | { |
@@ -175,6 +221,13 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
175 | res->lockname.len, res->lockname.name, master); | 221 | res->lockname.len, res->lockname.name, master); |
176 | 222 | ||
177 | if (!master) { | 223 | if (!master) { |
224 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { | ||
225 | mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n", | ||
226 | dlm->name, res->lockname.len, res->lockname.name); | ||
227 | spin_unlock(&res->spinlock); | ||
228 | return; | ||
229 | } | ||
230 | |||
178 | res->state |= DLM_LOCK_RES_DROPPING_REF; | 231 | res->state |= DLM_LOCK_RES_DROPPING_REF; |
179 | /* drop spinlock... retake below */ | 232 | /* drop spinlock... retake below */ |
180 | spin_unlock(&res->spinlock); | 233 | spin_unlock(&res->spinlock); |
@@ -203,8 +256,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
203 | dlm->purge_count--; | 256 | dlm->purge_count--; |
204 | } | 257 | } |
205 | 258 | ||
206 | if (!master && ret != 0) { | 259 | if (!master && ret == DLM_DEREF_RESPONSE_INPROG) { |
207 | mlog(0, "%s: deref %.*s in progress or master goes down\n", | 260 | mlog(0, "%s: deref %.*s in progress\n", |
208 | dlm->name, res->lockname.len, res->lockname.name); | 261 | dlm->name, res->lockname.len, res->lockname.name); |
209 | spin_unlock(&res->spinlock); | 262 | spin_unlock(&res->spinlock); |
210 | return; | 263 | return; |
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index ced70c8139f7..c9e828ec3c8e 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c | |||
@@ -1007,10 +1007,17 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn) | |||
1007 | lc->oc_type = NO_CONTROLD; | 1007 | lc->oc_type = NO_CONTROLD; |
1008 | 1008 | ||
1009 | rc = dlm_new_lockspace(conn->cc_name, conn->cc_cluster_name, | 1009 | rc = dlm_new_lockspace(conn->cc_name, conn->cc_cluster_name, |
1010 | DLM_LSFL_FS, DLM_LVB_LEN, | 1010 | DLM_LSFL_FS | DLM_LSFL_NEWEXCL, DLM_LVB_LEN, |
1011 | &ocfs2_ls_ops, conn, &ops_rv, &fsdlm); | 1011 | &ocfs2_ls_ops, conn, &ops_rv, &fsdlm); |
1012 | if (rc) | 1012 | if (rc) { |
1013 | if (rc == -EEXIST || rc == -EPROTO) | ||
1014 | printk(KERN_ERR "ocfs2: Unable to create the " | ||
1015 | "lockspace %s (%d), because a ocfs2-tools " | ||
1016 | "program is running on this file system " | ||
1017 | "with the same name lockspace\n", | ||
1018 | conn->cc_name, rc); | ||
1013 | goto out; | 1019 | goto out; |
1020 | } | ||
1014 | 1021 | ||
1015 | if (ops_rv == -EOPNOTSUPP) { | 1022 | if (ops_rv == -EOPNOTSUPP) { |
1016 | lc->oc_type = WITH_CONTROLD; | 1023 | lc->oc_type = WITH_CONTROLD; |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 2f19aeec5482..ea47120a85ff 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -1164,7 +1164,8 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb, | |||
1164 | int flags, | 1164 | int flags, |
1165 | struct ocfs2_alloc_context **ac) | 1165 | struct ocfs2_alloc_context **ac) |
1166 | { | 1166 | { |
1167 | int status; | 1167 | int status, ret = 0; |
1168 | int retried = 0; | ||
1168 | 1169 | ||
1169 | *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); | 1170 | *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); |
1170 | if (!(*ac)) { | 1171 | if (!(*ac)) { |
@@ -1189,7 +1190,24 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb, | |||
1189 | } | 1190 | } |
1190 | 1191 | ||
1191 | if (status == -ENOSPC) { | 1192 | if (status == -ENOSPC) { |
1193 | retry: | ||
1192 | status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); | 1194 | status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); |
1195 | /* Retry if there is sufficient space cached in truncate log */ | ||
1196 | if (status == -ENOSPC && !retried) { | ||
1197 | retried = 1; | ||
1198 | ocfs2_inode_unlock((*ac)->ac_inode, 1); | ||
1199 | inode_unlock((*ac)->ac_inode); | ||
1200 | |||
1201 | ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted); | ||
1202 | if (ret == 1) | ||
1203 | goto retry; | ||
1204 | |||
1205 | if (ret < 0) | ||
1206 | mlog_errno(ret); | ||
1207 | |||
1208 | inode_lock((*ac)->ac_inode); | ||
1209 | ocfs2_inode_lock((*ac)->ac_inode, NULL, 1); | ||
1210 | } | ||
1193 | if (status < 0) { | 1211 | if (status < 0) { |
1194 | if (status != -ENOSPC) | 1212 | if (status != -ENOSPC) |
1195 | mlog_errno(status); | 1213 | mlog_errno(status); |
diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 7151ea428041..a8c13605b434 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y += proc.o | 5 | obj-y += proc.o |
6 | 6 | ||
7 | CFLAGS_task_mmu.o += -Wno-override-init | ||
7 | proc-y := nommu.o task_nommu.o | 8 | proc-y := nommu.o task_nommu.o |
8 | proc-$(CONFIG_MMU) := task_mmu.o | 9 | proc-$(CONFIG_MMU) := task_mmu.o |
9 | 10 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index 31370da2ee7c..54e270262979 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -579,11 +579,8 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, | |||
579 | unsigned long totalpages = totalram_pages + total_swap_pages; | 579 | unsigned long totalpages = totalram_pages + total_swap_pages; |
580 | unsigned long points = 0; | 580 | unsigned long points = 0; |
581 | 581 | ||
582 | read_lock(&tasklist_lock); | 582 | points = oom_badness(task, NULL, NULL, totalpages) * |
583 | if (pid_alive(task)) | 583 | 1000 / totalpages; |
584 | points = oom_badness(task, NULL, NULL, totalpages) * | ||
585 | 1000 / totalpages; | ||
586 | read_unlock(&tasklist_lock); | ||
587 | seq_printf(m, "%lu\n", points); | 584 | seq_printf(m, "%lu\n", points); |
588 | 585 | ||
589 | return 0; | 586 | return 0; |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 510413eb25b8..7907e456ac4f 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -80,19 +80,17 @@ static u64 get_iowait_time(int cpu) | |||
80 | static int show_stat(struct seq_file *p, void *v) | 80 | static int show_stat(struct seq_file *p, void *v) |
81 | { | 81 | { |
82 | int i, j; | 82 | int i, j; |
83 | unsigned long jif; | ||
84 | u64 user, nice, system, idle, iowait, irq, softirq, steal; | 83 | u64 user, nice, system, idle, iowait, irq, softirq, steal; |
85 | u64 guest, guest_nice; | 84 | u64 guest, guest_nice; |
86 | u64 sum = 0; | 85 | u64 sum = 0; |
87 | u64 sum_softirq = 0; | 86 | u64 sum_softirq = 0; |
88 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; | 87 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; |
89 | struct timespec boottime; | 88 | struct timespec64 boottime; |
90 | 89 | ||
91 | user = nice = system = idle = iowait = | 90 | user = nice = system = idle = iowait = |
92 | irq = softirq = steal = 0; | 91 | irq = softirq = steal = 0; |
93 | guest = guest_nice = 0; | 92 | guest = guest_nice = 0; |
94 | getboottime(&boottime); | 93 | getboottime64(&boottime); |
95 | jif = boottime.tv_sec; | ||
96 | 94 | ||
97 | for_each_possible_cpu(i) { | 95 | for_each_possible_cpu(i) { |
98 | user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; | 96 | user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; |
@@ -163,12 +161,12 @@ static int show_stat(struct seq_file *p, void *v) | |||
163 | 161 | ||
164 | seq_printf(p, | 162 | seq_printf(p, |
165 | "\nctxt %llu\n" | 163 | "\nctxt %llu\n" |
166 | "btime %lu\n" | 164 | "btime %llu\n" |
167 | "processes %lu\n" | 165 | "processes %lu\n" |
168 | "procs_running %lu\n" | 166 | "procs_running %lu\n" |
169 | "procs_blocked %lu\n", | 167 | "procs_blocked %lu\n", |
170 | nr_context_switches(), | 168 | nr_context_switches(), |
171 | (unsigned long)jif, | 169 | (unsigned long long)boottime.tv_sec, |
172 | total_forks, | 170 | total_forks, |
173 | nr_running(), | 171 | nr_running(), |
174 | nr_iowait()); | 172 | nr_iowait()); |
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c index b751eea32e20..5db6f45b3fed 100644 --- a/fs/reiserfs/ibalance.c +++ b/fs/reiserfs/ibalance.c | |||
@@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb, | |||
1153 | insert_ptr); | 1153 | insert_ptr); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); | ||
1157 | insert_ptr[0] = new_insert_ptr; | 1156 | insert_ptr[0] = new_insert_ptr; |
1157 | if (new_insert_ptr) | ||
1158 | memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); | ||
1158 | 1159 | ||
1159 | return order; | 1160 | return order; |
1160 | } | 1161 | } |
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h index dd86c5fc102d..d7d0f495a34e 100644 --- a/include/acpi/acpi_io.h +++ b/include/acpi/acpi_io.h | |||
@@ -13,7 +13,7 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, | |||
13 | } | 13 | } |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | void __iomem *__init_refok | 16 | void __iomem *__ref |
17 | acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); | 17 | acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); |
18 | void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size); | 18 | void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size); |
19 | void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); | 19 | void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); |
diff --git a/include/linux/capability.h b/include/linux/capability.h index 5f3c63dde2d5..dbc21c719ce6 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -38,6 +38,7 @@ struct cpu_vfs_cap_data { | |||
38 | struct file; | 38 | struct file; |
39 | struct inode; | 39 | struct inode; |
40 | struct dentry; | 40 | struct dentry; |
41 | struct task_struct; | ||
41 | struct user_namespace; | 42 | struct user_namespace; |
42 | 43 | ||
43 | extern const kernel_cap_t __cap_empty_set; | 44 | extern const kernel_cap_t __cap_empty_set; |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index e828cf65d7df..da7fbf1cdd56 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -579,7 +579,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, | |||
579 | } | 579 | } |
580 | 580 | ||
581 | /** | 581 | /** |
582 | * cpumask_parse - extract a cpumask from from a string | 582 | * cpumask_parse - extract a cpumask from a string |
583 | * @buf: the buffer to extract from | 583 | * @buf: the buffer to extract from |
584 | * @dstp: the cpumask to set. | 584 | * @dstp: the cpumask to set. |
585 | * | 585 | * |
diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 5c41c5e75b5c..b1f9f0ccb8ac 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h | |||
@@ -47,6 +47,8 @@ int request_firmware_nowait( | |||
47 | void (*cont)(const struct firmware *fw, void *context)); | 47 | void (*cont)(const struct firmware *fw, void *context)); |
48 | int request_firmware_direct(const struct firmware **fw, const char *name, | 48 | int request_firmware_direct(const struct firmware **fw, const char *name, |
49 | struct device *device); | 49 | struct device *device); |
50 | int request_firmware_into_buf(const struct firmware **firmware_p, | ||
51 | const char *name, struct device *device, void *buf, size_t size); | ||
50 | 52 | ||
51 | void release_firmware(const struct firmware *fw); | 53 | void release_firmware(const struct firmware *fw); |
52 | #else | 54 | #else |
@@ -75,5 +77,11 @@ static inline int request_firmware_direct(const struct firmware **fw, | |||
75 | return -EINVAL; | 77 | return -EINVAL; |
76 | } | 78 | } |
77 | 79 | ||
80 | static inline int request_firmware_into_buf(const struct firmware **firmware_p, | ||
81 | const char *name, struct device *device, void *buf, size_t size) | ||
82 | { | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
78 | #endif | 86 | #endif |
79 | #endif | 87 | #endif |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 577365a77b47..f3f0b4c8e8ac 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2652,6 +2652,7 @@ extern int do_pipe_flags(int *, int); | |||
2652 | #define __kernel_read_file_id(id) \ | 2652 | #define __kernel_read_file_id(id) \ |
2653 | id(UNKNOWN, unknown) \ | 2653 | id(UNKNOWN, unknown) \ |
2654 | id(FIRMWARE, firmware) \ | 2654 | id(FIRMWARE, firmware) \ |
2655 | id(FIRMWARE_PREALLOC_BUFFER, firmware) \ | ||
2655 | id(MODULE, kernel-module) \ | 2656 | id(MODULE, kernel-module) \ |
2656 | id(KEXEC_IMAGE, kexec-image) \ | 2657 | id(KEXEC_IMAGE, kexec-image) \ |
2657 | id(KEXEC_INITRAMFS, kexec-initramfs) \ | 2658 | id(KEXEC_INITRAMFS, kexec-initramfs) \ |
diff --git a/include/linux/init.h b/include/linux/init.h index aedb254abc37..6935d02474aa 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -77,12 +77,6 @@ | |||
77 | #define __refdata __section(.ref.data) | 77 | #define __refdata __section(.ref.data) |
78 | #define __refconst __constsection(.ref.rodata) | 78 | #define __refconst __constsection(.ref.rodata) |
79 | 79 | ||
80 | /* compatibility defines */ | ||
81 | #define __init_refok __ref | ||
82 | #define __initdata_refok __refdata | ||
83 | #define __exit_refok __ref | ||
84 | |||
85 | |||
86 | #ifdef MODULE | 80 | #ifdef MODULE |
87 | #define __exitused | 81 | #define __exitused |
88 | #else | 82 | #else |
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 1eee6bcfcf76..d10e54f03c09 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
@@ -63,8 +63,6 @@ struct ipc_namespace { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | extern struct ipc_namespace init_ipc_ns; | 65 | extern struct ipc_namespace init_ipc_ns; |
66 | extern atomic_t nr_ipc_ns; | ||
67 | |||
68 | extern spinlock_t mq_lock; | 66 | extern spinlock_t mq_lock; |
69 | 67 | ||
70 | #ifdef CONFIG_SYSVIPC | 68 | #ifdef CONFIG_SYSVIPC |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index c9cf374445d8..d600303306eb 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -56,6 +56,7 @@ void kasan_cache_destroy(struct kmem_cache *cache); | |||
56 | void kasan_poison_slab(struct page *page); | 56 | void kasan_poison_slab(struct page *page); |
57 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); | 57 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); |
58 | void kasan_poison_object_data(struct kmem_cache *cache, void *object); | 58 | void kasan_poison_object_data(struct kmem_cache *cache, void *object); |
59 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); | ||
59 | 60 | ||
60 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); | 61 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); |
61 | void kasan_kfree_large(const void *ptr); | 62 | void kasan_kfree_large(const void *ptr); |
@@ -102,6 +103,8 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache, | |||
102 | void *object) {} | 103 | void *object) {} |
103 | static inline void kasan_poison_object_data(struct kmem_cache *cache, | 104 | static inline void kasan_poison_object_data(struct kmem_cache *cache, |
104 | void *object) {} | 105 | void *object) {} |
106 | static inline void kasan_init_slab_obj(struct kmem_cache *cache, | ||
107 | const void *object) {} | ||
105 | 108 | ||
106 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} | 109 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} |
107 | static inline void kasan_kfree_large(const void *ptr) {} | 110 | static inline void kasan_kfree_large(const void *ptr) {} |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index c42082112ec8..d96a6118d26a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/log2.h> | 11 | #include <linux/log2.h> |
12 | #include <linux/typecheck.h> | 12 | #include <linux/typecheck.h> |
13 | #include <linux/printk.h> | 13 | #include <linux/printk.h> |
14 | #include <linux/dynamic_debug.h> | ||
15 | #include <asm/byteorder.h> | 14 | #include <asm/byteorder.h> |
16 | #include <uapi/linux/kernel.h> | 15 | #include <uapi/linux/kernel.h> |
17 | 16 | ||
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index e8acb2b43dd9..d7437777baaa 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -14,6 +14,8 @@ | |||
14 | 14 | ||
15 | #if !defined(__ASSEMBLY__) | 15 | #if !defined(__ASSEMBLY__) |
16 | 16 | ||
17 | #include <asm/io.h> | ||
18 | |||
17 | #include <uapi/linux/kexec.h> | 19 | #include <uapi/linux/kexec.h> |
18 | 20 | ||
19 | #ifdef CONFIG_KEXEC_CORE | 21 | #ifdef CONFIG_KEXEC_CORE |
@@ -41,7 +43,7 @@ | |||
41 | #endif | 43 | #endif |
42 | 44 | ||
43 | #ifndef KEXEC_CONTROL_MEMORY_GFP | 45 | #ifndef KEXEC_CONTROL_MEMORY_GFP |
44 | #define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL | 46 | #define KEXEC_CONTROL_MEMORY_GFP (GFP_KERNEL | __GFP_NORETRY) |
45 | #endif | 47 | #endif |
46 | 48 | ||
47 | #ifndef KEXEC_CONTROL_PAGE_SIZE | 49 | #ifndef KEXEC_CONTROL_PAGE_SIZE |
@@ -228,12 +230,13 @@ extern void *kexec_purgatory_get_symbol_addr(struct kimage *image, | |||
228 | extern void __crash_kexec(struct pt_regs *); | 230 | extern void __crash_kexec(struct pt_regs *); |
229 | extern void crash_kexec(struct pt_regs *); | 231 | extern void crash_kexec(struct pt_regs *); |
230 | int kexec_should_crash(struct task_struct *); | 232 | int kexec_should_crash(struct task_struct *); |
233 | int kexec_crash_loaded(void); | ||
231 | void crash_save_cpu(struct pt_regs *regs, int cpu); | 234 | void crash_save_cpu(struct pt_regs *regs, int cpu); |
232 | void crash_save_vmcoreinfo(void); | 235 | void crash_save_vmcoreinfo(void); |
233 | void arch_crash_save_vmcoreinfo(void); | 236 | void arch_crash_save_vmcoreinfo(void); |
234 | __printf(1, 2) | 237 | __printf(1, 2) |
235 | void vmcoreinfo_append_str(const char *fmt, ...); | 238 | void vmcoreinfo_append_str(const char *fmt, ...); |
236 | unsigned long paddr_vmcoreinfo_note(void); | 239 | phys_addr_t paddr_vmcoreinfo_note(void); |
237 | 240 | ||
238 | #define VMCOREINFO_OSRELEASE(value) \ | 241 | #define VMCOREINFO_OSRELEASE(value) \ |
239 | vmcoreinfo_append_str("OSRELEASE=%s\n", value) | 242 | vmcoreinfo_append_str("OSRELEASE=%s\n", value) |
@@ -318,12 +321,51 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | |||
318 | void arch_kexec_protect_crashkres(void); | 321 | void arch_kexec_protect_crashkres(void); |
319 | void arch_kexec_unprotect_crashkres(void); | 322 | void arch_kexec_unprotect_crashkres(void); |
320 | 323 | ||
324 | #ifndef page_to_boot_pfn | ||
325 | static inline unsigned long page_to_boot_pfn(struct page *page) | ||
326 | { | ||
327 | return page_to_pfn(page); | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | #ifndef boot_pfn_to_page | ||
332 | static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) | ||
333 | { | ||
334 | return pfn_to_page(boot_pfn); | ||
335 | } | ||
336 | #endif | ||
337 | |||
338 | #ifndef phys_to_boot_phys | ||
339 | static inline unsigned long phys_to_boot_phys(phys_addr_t phys) | ||
340 | { | ||
341 | return phys; | ||
342 | } | ||
343 | #endif | ||
344 | |||
345 | #ifndef boot_phys_to_phys | ||
346 | static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) | ||
347 | { | ||
348 | return boot_phys; | ||
349 | } | ||
350 | #endif | ||
351 | |||
352 | static inline unsigned long virt_to_boot_phys(void *addr) | ||
353 | { | ||
354 | return phys_to_boot_phys(__pa((unsigned long)addr)); | ||
355 | } | ||
356 | |||
357 | static inline void *boot_phys_to_virt(unsigned long entry) | ||
358 | { | ||
359 | return phys_to_virt(boot_phys_to_phys(entry)); | ||
360 | } | ||
361 | |||
321 | #else /* !CONFIG_KEXEC_CORE */ | 362 | #else /* !CONFIG_KEXEC_CORE */ |
322 | struct pt_regs; | 363 | struct pt_regs; |
323 | struct task_struct; | 364 | struct task_struct; |
324 | static inline void __crash_kexec(struct pt_regs *regs) { } | 365 | static inline void __crash_kexec(struct pt_regs *regs) { } |
325 | static inline void crash_kexec(struct pt_regs *regs) { } | 366 | static inline void crash_kexec(struct pt_regs *regs) { } |
326 | static inline int kexec_should_crash(struct task_struct *p) { return 0; } | 367 | static inline int kexec_should_crash(struct task_struct *p) { return 0; } |
368 | static inline int kexec_crash_loaded(void) { return 0; } | ||
327 | #define kexec_in_progress false | 369 | #define kexec_in_progress false |
328 | #endif /* CONFIG_KEXEC_CORE */ | 370 | #endif /* CONFIG_KEXEC_CORE */ |
329 | 371 | ||
diff --git a/include/linux/mman.h b/include/linux/mman.h index 33e17f6a327a..634c4c51fe3a 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -49,7 +49,7 @@ static inline void vm_unacct_memory(long pages) | |||
49 | * | 49 | * |
50 | * Returns true if the prot flags are valid | 50 | * Returns true if the prot flags are valid |
51 | */ | 51 | */ |
52 | static inline int arch_validate_prot(unsigned long prot) | 52 | static inline bool arch_validate_prot(unsigned long prot) |
53 | { | 53 | { |
54 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; | 54 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
55 | } | 55 | } |
diff --git a/include/linux/printk.h b/include/linux/printk.h index f136b22c7772..8dc155dab3ed 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -61,6 +61,11 @@ static inline void console_verbose(void) | |||
61 | console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; | 61 | console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; |
62 | } | 62 | } |
63 | 63 | ||
64 | /* strlen("ratelimit") + 1 */ | ||
65 | #define DEVKMSG_STR_MAX_SIZE 10 | ||
66 | extern char devkmsg_log_str[]; | ||
67 | struct ctl_table; | ||
68 | |||
64 | struct va_format { | 69 | struct va_format { |
65 | const char *fmt; | 70 | const char *fmt; |
66 | va_list *va; | 71 | va_list *va; |
@@ -175,6 +180,10 @@ extern int printk_delay_msec; | |||
175 | extern int dmesg_restrict; | 180 | extern int dmesg_restrict; |
176 | extern int kptr_restrict; | 181 | extern int kptr_restrict; |
177 | 182 | ||
183 | extern int | ||
184 | devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, | ||
185 | size_t *lenp, loff_t *ppos); | ||
186 | |||
178 | extern void wake_up_klogd(void); | 187 | extern void wake_up_klogd(void); |
179 | 188 | ||
180 | char *log_buf_addr_get(void); | 189 | char *log_buf_addr_get(void); |
@@ -257,21 +266,39 @@ extern asmlinkage void dump_stack(void) __cold; | |||
257 | * and other debug macros are compiled out unless either DEBUG is defined | 266 | * and other debug macros are compiled out unless either DEBUG is defined |
258 | * or CONFIG_DYNAMIC_DEBUG is set. | 267 | * or CONFIG_DYNAMIC_DEBUG is set. |
259 | */ | 268 | */ |
260 | #define pr_emerg(fmt, ...) \ | 269 | |
261 | printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | 270 | #ifdef CONFIG_PRINTK |
262 | #define pr_alert(fmt, ...) \ | 271 | |
263 | printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | 272 | asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...); |
264 | #define pr_crit(fmt, ...) \ | 273 | asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...); |
265 | printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | 274 | asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...); |
266 | #define pr_err(fmt, ...) \ | 275 | asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...); |
267 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | 276 | asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...); |
268 | #define pr_warning(fmt, ...) \ | 277 | asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...); |
269 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | 278 | asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...); |
270 | #define pr_warn pr_warning | 279 | |
271 | #define pr_notice(fmt, ...) \ | 280 | #define pr_emerg(fmt, ...) __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__) |
272 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | 281 | #define pr_alert(fmt, ...) __pr_alert(pr_fmt(fmt), ##__VA_ARGS__) |
273 | #define pr_info(fmt, ...) \ | 282 | #define pr_crit(fmt, ...) __pr_crit(pr_fmt(fmt), ##__VA_ARGS__) |
274 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | 283 | #define pr_err(fmt, ...) __pr_err(pr_fmt(fmt), ##__VA_ARGS__) |
284 | #define pr_warn(fmt, ...) __pr_warn(pr_fmt(fmt), ##__VA_ARGS__) | ||
285 | #define pr_notice(fmt, ...) __pr_notice(pr_fmt(fmt), ##__VA_ARGS__) | ||
286 | #define pr_info(fmt, ...) __pr_info(pr_fmt(fmt), ##__VA_ARGS__) | ||
287 | |||
288 | #else | ||
289 | |||
290 | #define pr_emerg(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
291 | #define pr_alert(fmt, ...) printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
292 | #define pr_crit(fmt, ...) printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
293 | #define pr_err(fmt, ...) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
294 | #define pr_warn(fmt, ...) printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
295 | #define pr_notice(fmt, ...) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
296 | #define pr_info(fmt, ...) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
297 | |||
298 | #endif | ||
299 | |||
300 | #define pr_warning pr_warn | ||
301 | |||
275 | /* | 302 | /* |
276 | * Like KERN_CONT, pr_cont() should only be used when continuing | 303 | * Like KERN_CONT, pr_cont() should only be used when continuing |
277 | * a line with no newline ('\n') enclosed. Otherwise it defaults | 304 | * a line with no newline ('\n') enclosed. Otherwise it defaults |
@@ -289,10 +316,11 @@ extern asmlinkage void dump_stack(void) __cold; | |||
289 | no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | 316 | no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
290 | #endif | 317 | #endif |
291 | 318 | ||
292 | #include <linux/dynamic_debug.h> | ||
293 | 319 | ||
294 | /* If you are writing a driver, please use dev_dbg instead */ | 320 | /* If you are writing a driver, please use dev_dbg instead */ |
295 | #if defined(CONFIG_DYNAMIC_DEBUG) | 321 | #if defined(CONFIG_DYNAMIC_DEBUG) |
322 | #include <linux/dynamic_debug.h> | ||
323 | |||
296 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ | 324 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ |
297 | #define pr_debug(fmt, ...) \ | 325 | #define pr_debug(fmt, ...) \ |
298 | dynamic_pr_debug(fmt, ##__VA_ARGS__) | 326 | dynamic_pr_debug(fmt, ##__VA_ARGS__) |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index cbfee507c839..4c45105dece3 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -35,7 +35,7 @@ | |||
35 | * 00 - data pointer | 35 | * 00 - data pointer |
36 | * 01 - internal entry | 36 | * 01 - internal entry |
37 | * 10 - exceptional entry | 37 | * 10 - exceptional entry |
38 | * 11 - locked exceptional entry | 38 | * 11 - this bit combination is currently unused/reserved |
39 | * | 39 | * |
40 | * The internal entry may be a pointer to the next level in the tree, a | 40 | * The internal entry may be a pointer to the next level in the tree, a |
41 | * sibling entry, or an indicator that the entry in this slot has been moved | 41 | * sibling entry, or an indicator that the entry in this slot has been moved |
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 18102529254e..57c9e0622a38 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
@@ -2,11 +2,15 @@ | |||
2 | #define _LINUX_RATELIMIT_H | 2 | #define _LINUX_RATELIMIT_H |
3 | 3 | ||
4 | #include <linux/param.h> | 4 | #include <linux/param.h> |
5 | #include <linux/sched.h> | ||
5 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
6 | 7 | ||
7 | #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) | 8 | #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) |
8 | #define DEFAULT_RATELIMIT_BURST 10 | 9 | #define DEFAULT_RATELIMIT_BURST 10 |
9 | 10 | ||
11 | /* issue num suppressed message on exit */ | ||
12 | #define RATELIMIT_MSG_ON_RELEASE BIT(0) | ||
13 | |||
10 | struct ratelimit_state { | 14 | struct ratelimit_state { |
11 | raw_spinlock_t lock; /* protect the state */ | 15 | raw_spinlock_t lock; /* protect the state */ |
12 | 16 | ||
@@ -15,6 +19,7 @@ struct ratelimit_state { | |||
15 | int printed; | 19 | int printed; |
16 | int missed; | 20 | int missed; |
17 | unsigned long begin; | 21 | unsigned long begin; |
22 | unsigned long flags; | ||
18 | }; | 23 | }; |
19 | 24 | ||
20 | #define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ | 25 | #define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ |
@@ -34,12 +39,35 @@ struct ratelimit_state { | |||
34 | static inline void ratelimit_state_init(struct ratelimit_state *rs, | 39 | static inline void ratelimit_state_init(struct ratelimit_state *rs, |
35 | int interval, int burst) | 40 | int interval, int burst) |
36 | { | 41 | { |
42 | memset(rs, 0, sizeof(*rs)); | ||
43 | |||
37 | raw_spin_lock_init(&rs->lock); | 44 | raw_spin_lock_init(&rs->lock); |
38 | rs->interval = interval; | 45 | rs->interval = interval; |
39 | rs->burst = burst; | 46 | rs->burst = burst; |
40 | rs->printed = 0; | 47 | } |
41 | rs->missed = 0; | 48 | |
42 | rs->begin = 0; | 49 | static inline void ratelimit_default_init(struct ratelimit_state *rs) |
50 | { | ||
51 | return ratelimit_state_init(rs, DEFAULT_RATELIMIT_INTERVAL, | ||
52 | DEFAULT_RATELIMIT_BURST); | ||
53 | } | ||
54 | |||
55 | static inline void ratelimit_state_exit(struct ratelimit_state *rs) | ||
56 | { | ||
57 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) | ||
58 | return; | ||
59 | |||
60 | if (rs->missed) { | ||
61 | pr_warn("%s: %d output lines suppressed due to ratelimiting\n", | ||
62 | current->comm, rs->missed); | ||
63 | rs->missed = 0; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static inline void | ||
68 | ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) | ||
69 | { | ||
70 | rs->flags = flags; | ||
43 | } | 71 | } |
44 | 72 | ||
45 | extern struct ratelimit_state printk_ratelimit_state; | 73 | extern struct ratelimit_state printk_ratelimit_state; |
diff --git a/include/linux/rio.h b/include/linux/rio.h index aa2323893e8d..37b95c4af99d 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
@@ -163,6 +163,7 @@ enum rio_device_state { | |||
163 | * @dst_ops: Destination operation capabilities | 163 | * @dst_ops: Destination operation capabilities |
164 | * @comp_tag: RIO component tag | 164 | * @comp_tag: RIO component tag |
165 | * @phys_efptr: RIO device extended features pointer | 165 | * @phys_efptr: RIO device extended features pointer |
166 | * @phys_rmap: LP-Serial Register Map Type (1 or 2) | ||
166 | * @em_efptr: RIO Error Management features pointer | 167 | * @em_efptr: RIO Error Management features pointer |
167 | * @dma_mask: Mask of bits of RIO address this device implements | 168 | * @dma_mask: Mask of bits of RIO address this device implements |
168 | * @driver: Driver claiming this device | 169 | * @driver: Driver claiming this device |
@@ -193,6 +194,7 @@ struct rio_dev { | |||
193 | u32 dst_ops; | 194 | u32 dst_ops; |
194 | u32 comp_tag; | 195 | u32 comp_tag; |
195 | u32 phys_efptr; | 196 | u32 phys_efptr; |
197 | u32 phys_rmap; | ||
196 | u32 em_efptr; | 198 | u32 em_efptr; |
197 | u64 dma_mask; | 199 | u64 dma_mask; |
198 | struct rio_driver *driver; /* RIO driver claiming this device */ | 200 | struct rio_driver *driver; /* RIO driver claiming this device */ |
@@ -237,11 +239,6 @@ struct rio_dbell { | |||
237 | void *dev_id; | 239 | void *dev_id; |
238 | }; | 240 | }; |
239 | 241 | ||
240 | enum rio_phy_type { | ||
241 | RIO_PHY_PARALLEL, | ||
242 | RIO_PHY_SERIAL, | ||
243 | }; | ||
244 | |||
245 | /** | 242 | /** |
246 | * struct rio_mport - RIO master port info | 243 | * struct rio_mport - RIO master port info |
247 | * @dbells: List of doorbell events | 244 | * @dbells: List of doorbell events |
@@ -259,8 +256,8 @@ enum rio_phy_type { | |||
259 | * @id: Port ID, unique among all ports | 256 | * @id: Port ID, unique among all ports |
260 | * @index: Port index, unique among all port interfaces of the same type | 257 | * @index: Port index, unique among all port interfaces of the same type |
261 | * @sys_size: RapidIO common transport system size | 258 | * @sys_size: RapidIO common transport system size |
262 | * @phy_type: RapidIO phy type | ||
263 | * @phys_efptr: RIO port extended features pointer | 259 | * @phys_efptr: RIO port extended features pointer |
260 | * @phys_rmap: LP-Serial EFB Register Mapping type (1 or 2). | ||
264 | * @name: Port name string | 261 | * @name: Port name string |
265 | * @dev: device structure associated with an mport | 262 | * @dev: device structure associated with an mport |
266 | * @priv: Master port private data | 263 | * @priv: Master port private data |
@@ -289,8 +286,8 @@ struct rio_mport { | |||
289 | * 0 - Small size. 256 devices. | 286 | * 0 - Small size. 256 devices. |
290 | * 1 - Large size, 65536 devices. | 287 | * 1 - Large size, 65536 devices. |
291 | */ | 288 | */ |
292 | enum rio_phy_type phy_type; /* RapidIO phy type */ | ||
293 | u32 phys_efptr; | 289 | u32 phys_efptr; |
290 | u32 phys_rmap; | ||
294 | unsigned char name[RIO_MAX_MPORT_NAME]; | 291 | unsigned char name[RIO_MAX_MPORT_NAME]; |
295 | struct device dev; | 292 | struct device dev; |
296 | void *priv; /* Master port private data */ | 293 | void *priv; /* Master port private data */ |
@@ -425,7 +422,7 @@ struct rio_ops { | |||
425 | int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); | 422 | int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); |
426 | void *(*get_inb_message)(struct rio_mport *mport, int mbox); | 423 | void *(*get_inb_message)(struct rio_mport *mport, int mbox); |
427 | int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, | 424 | int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, |
428 | u64 rstart, u32 size, u32 flags); | 425 | u64 rstart, u64 size, u32 flags); |
429 | void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); | 426 | void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); |
430 | int (*query_mport)(struct rio_mport *mport, | 427 | int (*query_mport)(struct rio_mport *mport, |
431 | struct rio_mport_attr *attr); | 428 | struct rio_mport_attr *attr); |
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 2543bc163d54..334c576c151c 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h | |||
@@ -38,5 +38,7 @@ | |||
38 | #define RIO_DID_IDTVPS1616 0x0377 | 38 | #define RIO_DID_IDTVPS1616 0x0377 |
39 | #define RIO_DID_IDTSPS1616 0x0378 | 39 | #define RIO_DID_IDTSPS1616 0x0378 |
40 | #define RIO_DID_TSI721 0x80ab | 40 | #define RIO_DID_TSI721 0x80ab |
41 | #define RIO_DID_IDTRXS1632 0x80e5 | ||
42 | #define RIO_DID_IDTRXS2448 0x80e6 | ||
41 | 43 | ||
42 | #endif /* LINUX_RIO_IDS_H */ | 44 | #endif /* LINUX_RIO_IDS_H */ |
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index 1063ae382bc2..40c04efe7409 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h | |||
@@ -42,9 +42,11 @@ | |||
42 | #define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ | 42 | #define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ |
43 | #define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ | 43 | #define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ |
44 | #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ | 44 | #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ |
45 | #define RIO_PEF_DEV32 0x00001000 /* [III] PE supports Common TRansport Dev32 */ | ||
45 | #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ | 46 | #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ |
46 | #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ | 47 | #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ |
47 | #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ | 48 | #define RIO_PEF_CTLS 0x00000010 /* [III] Common Transport Large System (< rev.3) */ |
49 | #define RIO_PEF_DEV16 0x00000010 /* [III] PE Supports Common Transport Dev16 (rev.3) */ | ||
48 | #define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ | 50 | #define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ |
49 | #define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ | 51 | #define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ |
50 | #define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */ | 52 | #define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */ |
@@ -194,70 +196,101 @@ | |||
194 | #define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK) | 196 | #define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK) |
195 | 197 | ||
196 | /* Extended Feature Block IDs */ | 198 | /* Extended Feature Block IDs */ |
197 | #define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */ | 199 | #define RIO_EFB_SER_EP_M1_ID 0x0001 /* [VI] LP-Serial EP Devices, Map I */ |
198 | #define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */ | 200 | #define RIO_EFB_SER_EP_SW_M1_ID 0x0002 /* [VI] LP-Serial EP w SW Recovery Devices, Map I */ |
199 | #define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */ | 201 | #define RIO_EFB_SER_EPF_M1_ID 0x0003 /* [VI] LP-Serial EP Free Devices, Map I */ |
200 | #define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */ | 202 | #define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP-Serial EP Devices, RIO 1.2 */ |
201 | #define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */ | 203 | #define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP-Serial EP w SW Recovery Devices, RIO 1.2 */ |
202 | #define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */ | 204 | #define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP-Serial EP Free Devices, RIO 1.2 */ |
203 | #define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */ | ||
204 | #define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */ | ||
205 | #define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */ | ||
206 | #define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */ | ||
207 | #define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ | 205 | #define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ |
206 | #define RIO_EFB_SER_EPF_SW_M1_ID 0x0009 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map I */ | ||
207 | #define RIO_EFB_SW_ROUTING_TBL 0x000E /* [III] Switch Routing Table Block */ | ||
208 | #define RIO_EFB_SER_EP_M2_ID 0x0011 /* [VI] LP-Serial EP Devices, Map II */ | ||
209 | #define RIO_EFB_SER_EP_SW_M2_ID 0x0012 /* [VI] LP-Serial EP w SW Recovery Devices, Map II */ | ||
210 | #define RIO_EFB_SER_EPF_M2_ID 0x0013 /* [VI] LP-Serial EP Free Devices, Map II */ | ||
211 | #define RIO_EFB_ERR_MGMNT_HS 0x0017 /* [VIII] Error Management Extensions, Hot-Swap only */ | ||
212 | #define RIO_EFB_SER_EPF_SW_M2_ID 0x0019 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map II */ | ||
208 | 213 | ||
209 | /* | 214 | /* |
210 | * Physical 8/16 LP-LVDS | 215 | * Physical LP-Serial Registers Definitions |
211 | * ID=0x0001, Generic End Point Devices | 216 | * Parameters in register macros: |
212 | * ID=0x0002, Generic End Point Devices, software assisted recovery option | 217 | * n - port number, m - Register Map Type (1 or 2) |
213 | * ID=0x0003, Generic End Point Free Devices | ||
214 | * | ||
215 | * Physical LP-Serial | ||
216 | * ID=0x0004, Generic End Point Devices | ||
217 | * ID=0x0005, Generic End Point Devices, software assisted recovery option | ||
218 | * ID=0x0006, Generic End Point Free Devices | ||
219 | */ | 218 | */ |
220 | #define RIO_PORT_MNT_HEADER 0x0000 | 219 | #define RIO_PORT_MNT_HEADER 0x0000 |
221 | #define RIO_PORT_REQ_CTL_CSR 0x0020 | 220 | #define RIO_PORT_REQ_CTL_CSR 0x0020 |
222 | #define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */ | 221 | #define RIO_PORT_RSP_CTL_CSR 0x0024 |
223 | #define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */ | 222 | #define RIO_PORT_LINKTO_CTL_CSR 0x0020 |
224 | #define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */ | 223 | #define RIO_PORT_RSPTO_CTL_CSR 0x0024 |
225 | #define RIO_PORT_GEN_CTL_CSR 0x003c | 224 | #define RIO_PORT_GEN_CTL_CSR 0x003c |
226 | #define RIO_PORT_GEN_HOST 0x80000000 | 225 | #define RIO_PORT_GEN_HOST 0x80000000 |
227 | #define RIO_PORT_GEN_MASTER 0x40000000 | 226 | #define RIO_PORT_GEN_MASTER 0x40000000 |
228 | #define RIO_PORT_GEN_DISCOVERED 0x20000000 | 227 | #define RIO_PORT_GEN_DISCOVERED 0x20000000 |
229 | #define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */ | 228 | #define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m))) |
230 | #define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */ | 229 | #define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */ |
231 | #define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */ | 230 | #define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */ |
232 | #define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */ | 231 | #define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m))) |
233 | #define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ | 232 | #define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ |
234 | #define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */ | 233 | #define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */ |
235 | #define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ | 234 | #define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ |
236 | #define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */ | 235 | #define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */ |
237 | #define RIO_PORT_N_ACK_CLEAR 0x80000000 | 236 | #define RIO_PORT_N_ACK_CLEAR 0x80000000 |
238 | #define RIO_PORT_N_ACK_INBOUND 0x3f000000 | 237 | #define RIO_PORT_N_ACK_INBOUND 0x3f000000 |
239 | #define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 | 238 | #define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 |
240 | #define RIO_PORT_N_ACK_OUTBOUND 0x0000003f | 239 | #define RIO_PORT_N_ACK_OUTBOUND 0x0000003f |
241 | #define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20) | 240 | #define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m))) |
242 | #define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000 | 241 | #define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000 |
243 | #define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20) | 242 | #define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m))) |
244 | #define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */ | 243 | #define RIO_PORT_N_ERR_STS_OUT_ES 0x00010000 /* Output Error-stopped */ |
245 | #define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */ | 244 | #define RIO_PORT_N_ERR_STS_INP_ES 0x00000100 /* Input Error-stopped */ |
246 | #define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ | 245 | #define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ |
246 | #define RIO_PORT_N_ERR_STS_PORT_UA 0x00000008 /* Port Unavailable */ | ||
247 | #define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 | 247 | #define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 |
248 | #define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 | 248 | #define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 |
249 | #define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 | 249 | #define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 |
250 | #define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20) | 250 | #define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m))) |
251 | #define RIO_PORT_N_CTL_PWIDTH 0xc0000000 | 251 | #define RIO_PORT_N_CTL_PWIDTH 0xc0000000 |
252 | #define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 | 252 | #define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 |
253 | #define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 | 253 | #define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 |
254 | #define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */ | 254 | #define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */ |
255 | #define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 | 255 | #define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 |
256 | #define RIO_PORT_N_CTL_LOCKOUT 0x00000002 | 256 | #define RIO_PORT_N_CTL_LOCKOUT 0x00000002 |
257 | #define RIO_PORT_N_CTL_EN_RX_SER 0x00200000 | 257 | #define RIO_PORT_N_CTL_EN_RX 0x00200000 |
258 | #define RIO_PORT_N_CTL_EN_TX_SER 0x00400000 | 258 | #define RIO_PORT_N_CTL_EN_TX 0x00400000 |
259 | #define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000 | 259 | #define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */ |
260 | #define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000 | 260 | #define RIO_PORT_N_OB_ACK_CLEAR 0x80000000 |
261 | #define RIO_PORT_N_OB_ACK_OUTSTD 0x00fff000 | ||
262 | #define RIO_PORT_N_OB_ACK_OUTBND 0x00000fff | ||
263 | #define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */ | ||
264 | #define RIO_PORT_N_IB_ACK_INBND 0x00000fff | ||
265 | |||
266 | /* | ||
267 | * Device-based helper macros for serial port register access. | ||
268 | * d - pointer to rapidio device object, n - port number | ||
269 | */ | ||
270 | |||
271 | #define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \ | ||
272 | (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap)) | ||
273 | |||
274 | #define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \ | ||
275 | (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap)) | ||
276 | |||
277 | #define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \ | ||
278 | (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n)) | ||
279 | |||
280 | #define RIO_DEV_PORT_N_CTL2_CSR(d, n) \ | ||
281 | (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap)) | ||
282 | |||
283 | #define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \ | ||
284 | (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap)) | ||
285 | |||
286 | #define RIO_DEV_PORT_N_CTL_CSR(d, n) \ | ||
287 | (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap)) | ||
288 | |||
289 | #define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \ | ||
290 | (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n)) | ||
291 | |||
292 | #define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \ | ||
293 | (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n)) | ||
261 | 294 | ||
262 | /* | 295 | /* |
263 | * Error Management Extensions (RapidIO 1.3+, Part 8) | 296 | * Error Management Extensions (RapidIO 1.3+, Part 8) |
@@ -268,6 +301,7 @@ | |||
268 | /* General EM Registers (Common for all Ports) */ | 301 | /* General EM Registers (Common for all Ports) */ |
269 | 302 | ||
270 | #define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ | 303 | #define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ |
304 | #define RIO_EM_EMHS_CAR 0x004 /* EM Functionality CAR */ | ||
271 | #define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ | 305 | #define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ |
272 | #define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ | 306 | #define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ |
273 | #define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */ | 307 | #define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */ |
@@ -278,15 +312,33 @@ | |||
278 | #define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ | 312 | #define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ |
279 | #define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ | 313 | #define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ |
280 | #define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ | 314 | #define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ |
315 | #define RIO_EM_LTL_DID32_CAP 0x020 /* Logical/Transport Layer Dev32 DestID Capture CSR */ | ||
316 | #define RIO_EM_LTL_SID32_CAP 0x024 /* Logical/Transport Layer Dev32 source ID Capture CSR */ | ||
281 | #define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ | 317 | #define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ |
318 | #define RIO_EM_PW_TGT_DEVID_D16M 0xff000000 /* Port-write Target DID16 MSB */ | ||
319 | #define RIO_EM_PW_TGT_DEVID_D8 0x00ff0000 /* Port-write Target DID16 LSB or DID8 */ | ||
320 | #define RIO_EM_PW_TGT_DEVID_DEV16 0x00008000 /* Port-write Target DID16 LSB or DID8 */ | ||
321 | #define RIO_EM_PW_TGT_DEVID_DEV32 0x00004000 /* Port-write Target DID16 LSB or DID8 */ | ||
282 | #define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ | 322 | #define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ |
323 | #define RIO_EM_PKT_TTL_VAL 0xffff0000 /* Packet Time-to-live value */ | ||
324 | #define RIO_EM_PW_TGT32_DEVID 0x030 /* Port-write Dev32 Target deviceID CSR */ | ||
325 | #define RIO_EM_PW_TX_CTRL 0x034 /* Port-write Transmission Control CSR */ | ||
326 | #define RIO_EM_PW_TX_CTRL_PW_DIS 0x00000001 /* Port-write Transmission Disable bit */ | ||
283 | 327 | ||
284 | /* Per-Port EM Registers */ | 328 | /* Per-Port EM Registers */ |
285 | 329 | ||
286 | #define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ | 330 | #define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ |
287 | #define REM_PED_IMPL_SPEC 0x80000000 | 331 | #define REM_PED_IMPL_SPEC 0x80000000 |
332 | #define REM_PED_LINK_OK2U 0x40000000 /* Link OK to Uninit transition */ | ||
333 | #define REM_PED_LINK_UPDA 0x20000000 /* Link Uninit Packet Discard Active */ | ||
334 | #define REM_PED_LINK_U2OK 0x10000000 /* Link Uninit to OK transition */ | ||
288 | #define REM_PED_LINK_TO 0x00000001 | 335 | #define REM_PED_LINK_TO 0x00000001 |
336 | |||
289 | #define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ | 337 | #define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ |
338 | #define RIO_EM_PN_ERRRATE_EN_OK2U 0x40000000 /* Enable notification for OK2U */ | ||
339 | #define RIO_EM_PN_ERRRATE_EN_UPDA 0x20000000 /* Enable notification for UPDA */ | ||
340 | #define RIO_EM_PN_ERRRATE_EN_U2OK 0x10000000 /* Enable notification for U2OK */ | ||
341 | |||
290 | #define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ | 342 | #define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ |
291 | #define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ | 343 | #define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ |
292 | #define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ | 344 | #define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ |
@@ -294,5 +346,50 @@ | |||
294 | #define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ | 346 | #define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ |
295 | #define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ | 347 | #define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ |
296 | #define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ | 348 | #define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ |
349 | #define RIO_EM_PN_LINK_UDT(x) (0x070 + x*0x40) /* Port N Link Uninit Discard Timer CSR */ | ||
350 | #define RIO_EM_PN_LINK_UDT_TO 0xffffff00 /* Link Uninit Timeout value */ | ||
351 | |||
352 | /* | ||
353 | * Switch Routing Table Register Block ID=0x000E (RapidIO 3.0+, part 3) | ||
354 | * Register offsets are defined from beginning of the block. | ||
355 | */ | ||
356 | |||
357 | /* Broadcast Routing Table Control CSR */ | ||
358 | #define RIO_BC_RT_CTL_CSR 0x020 | ||
359 | #define RIO_RT_CTL_THREE_LVL 0x80000000 | ||
360 | #define RIO_RT_CTL_DEV32_RT_CTRL 0x40000000 | ||
361 | #define RIO_RT_CTL_MC_MASK_SZ 0x03000000 /* 3.0+ Part 11: Multicast */ | ||
362 | |||
363 | /* Broadcast Level 0 Info CSR */ | ||
364 | #define RIO_BC_RT_LVL0_INFO_CSR 0x030 | ||
365 | #define RIO_RT_L0I_NUM_GR 0xff000000 | ||
366 | #define RIO_RT_L0I_GR_PTR 0x00fffc00 | ||
367 | |||
368 | /* Broadcast Level 1 Info CSR */ | ||
369 | #define RIO_BC_RT_LVL1_INFO_CSR 0x034 | ||
370 | #define RIO_RT_L1I_NUM_GR 0xff000000 | ||
371 | #define RIO_RT_L1I_GR_PTR 0x00fffc00 | ||
372 | |||
373 | /* Broadcast Level 2 Info CSR */ | ||
374 | #define RIO_BC_RT_LVL2_INFO_CSR 0x038 | ||
375 | #define RIO_RT_L2I_NUM_GR 0xff000000 | ||
376 | #define RIO_RT_L2I_GR_PTR 0x00fffc00 | ||
377 | |||
378 | /* Per-Port Routing Table registers. | ||
379 | * Register fields defined in the broadcast section above are | ||
380 | * applicable to the corresponding registers below. | ||
381 | */ | ||
382 | #define RIO_SPx_RT_CTL_CSR(x) (0x040 + (0x20 * x)) | ||
383 | #define RIO_SPx_RT_LVL0_INFO_CSR(x) (0x50 + (0x20 * x)) | ||
384 | #define RIO_SPx_RT_LVL1_INFO_CSR(x) (0x54 + (0x20 * x)) | ||
385 | #define RIO_SPx_RT_LVL2_INFO_CSR(x) (0x58 + (0x20 * x)) | ||
386 | |||
387 | /* Register Formats for Routing Table Group entry. | ||
388 | * Register offsets are calculated using GR_PTR field in the corresponding | ||
389 | * table Level_N and group/entry numbers (see RapidIO 3.0+ Part 3). | ||
390 | */ | ||
391 | #define RIO_RT_Ln_ENTRY_IMPL_DEF 0xf0000000 | ||
392 | #define RIO_RT_Ln_ENTRY_RTE_VAL 0x000003ff | ||
393 | #define RIO_RT_ENTRY_DROP_PKT 0x300 | ||
297 | 394 | ||
298 | #endif /* LINUX_RIO_REGS_H */ | 395 | #endif /* LINUX_RIO_REGS_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 553af2923824..62c68e513e39 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1547,6 +1547,9 @@ struct task_struct { | |||
1547 | /* unserialized, strictly 'current' */ | 1547 | /* unserialized, strictly 'current' */ |
1548 | unsigned in_execve:1; /* bit to tell LSMs we're in execve */ | 1548 | unsigned in_execve:1; /* bit to tell LSMs we're in execve */ |
1549 | unsigned in_iowait:1; | 1549 | unsigned in_iowait:1; |
1550 | #if !defined(TIF_RESTORE_SIGMASK) | ||
1551 | unsigned restore_sigmask:1; | ||
1552 | #endif | ||
1550 | #ifdef CONFIG_MEMCG | 1553 | #ifdef CONFIG_MEMCG |
1551 | unsigned memcg_may_oom:1; | 1554 | unsigned memcg_may_oom:1; |
1552 | #ifndef CONFIG_SLOB | 1555 | #ifndef CONFIG_SLOB |
@@ -2680,6 +2683,66 @@ extern void sigqueue_free(struct sigqueue *); | |||
2680 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); | 2683 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); |
2681 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); | 2684 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
2682 | 2685 | ||
2686 | #ifdef TIF_RESTORE_SIGMASK | ||
2687 | /* | ||
2688 | * Legacy restore_sigmask accessors. These are inefficient on | ||
2689 | * SMP architectures because they require atomic operations. | ||
2690 | */ | ||
2691 | |||
2692 | /** | ||
2693 | * set_restore_sigmask() - make sure saved_sigmask processing gets done | ||
2694 | * | ||
2695 | * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code | ||
2696 | * will run before returning to user mode, to process the flag. For | ||
2697 | * all callers, TIF_SIGPENDING is already set or it's no harm to set | ||
2698 | * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the | ||
2699 | * arch code will notice on return to user mode, in case those bits | ||
2700 | * are scarce. We set TIF_SIGPENDING here to ensure that the arch | ||
2701 | * signal code always gets run when TIF_RESTORE_SIGMASK is set. | ||
2702 | */ | ||
2703 | static inline void set_restore_sigmask(void) | ||
2704 | { | ||
2705 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
2706 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | ||
2707 | } | ||
2708 | static inline void clear_restore_sigmask(void) | ||
2709 | { | ||
2710 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
2711 | } | ||
2712 | static inline bool test_restore_sigmask(void) | ||
2713 | { | ||
2714 | return test_thread_flag(TIF_RESTORE_SIGMASK); | ||
2715 | } | ||
2716 | static inline bool test_and_clear_restore_sigmask(void) | ||
2717 | { | ||
2718 | return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
2719 | } | ||
2720 | |||
2721 | #else /* TIF_RESTORE_SIGMASK */ | ||
2722 | |||
2723 | /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ | ||
2724 | static inline void set_restore_sigmask(void) | ||
2725 | { | ||
2726 | current->restore_sigmask = true; | ||
2727 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | ||
2728 | } | ||
2729 | static inline void clear_restore_sigmask(void) | ||
2730 | { | ||
2731 | current->restore_sigmask = false; | ||
2732 | } | ||
2733 | static inline bool test_restore_sigmask(void) | ||
2734 | { | ||
2735 | return current->restore_sigmask; | ||
2736 | } | ||
2737 | static inline bool test_and_clear_restore_sigmask(void) | ||
2738 | { | ||
2739 | if (!current->restore_sigmask) | ||
2740 | return false; | ||
2741 | current->restore_sigmask = false; | ||
2742 | return true; | ||
2743 | } | ||
2744 | #endif | ||
2745 | |||
2683 | static inline void restore_saved_sigmask(void) | 2746 | static inline void restore_saved_sigmask(void) |
2684 | { | 2747 | { |
2685 | if (test_and_clear_restore_sigmask()) | 2748 | if (test_and_clear_restore_sigmask()) |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index fa7bc29925c9..697e160c78d0 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <uapi/linux/sysctl.h> | 28 | #include <uapi/linux/sysctl.h> |
29 | 29 | ||
30 | /* For the /proc/sys support */ | 30 | /* For the /proc/sys support */ |
31 | struct completion; | ||
31 | struct ctl_table; | 32 | struct ctl_table; |
32 | struct nsproxy; | 33 | struct nsproxy; |
33 | struct ctl_table_root; | 34 | struct ctl_table_root; |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index b4c2a485b28a..352b1542f5cc 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -105,47 +105,6 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) | |||
105 | 105 | ||
106 | #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) | 106 | #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) |
107 | 107 | ||
108 | #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK | ||
109 | /* | ||
110 | * An arch can define its own version of set_restore_sigmask() to get the | ||
111 | * job done however works, with or without TIF_RESTORE_SIGMASK. | ||
112 | */ | ||
113 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
114 | |||
115 | /** | ||
116 | * set_restore_sigmask() - make sure saved_sigmask processing gets done | ||
117 | * | ||
118 | * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code | ||
119 | * will run before returning to user mode, to process the flag. For | ||
120 | * all callers, TIF_SIGPENDING is already set or it's no harm to set | ||
121 | * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the | ||
122 | * arch code will notice on return to user mode, in case those bits | ||
123 | * are scarce. We set TIF_SIGPENDING here to ensure that the arch | ||
124 | * signal code always gets run when TIF_RESTORE_SIGMASK is set. | ||
125 | */ | ||
126 | static inline void set_restore_sigmask(void) | ||
127 | { | ||
128 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
129 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | ||
130 | } | ||
131 | static inline void clear_restore_sigmask(void) | ||
132 | { | ||
133 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
134 | } | ||
135 | static inline bool test_restore_sigmask(void) | ||
136 | { | ||
137 | return test_thread_flag(TIF_RESTORE_SIGMASK); | ||
138 | } | ||
139 | static inline bool test_and_clear_restore_sigmask(void) | ||
140 | { | ||
141 | return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
142 | } | ||
143 | #endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */ | ||
144 | |||
145 | #ifndef HAVE_SET_RESTORE_SIGMASK | ||
146 | #error "no set_restore_sigmask() provided and default one won't work" | ||
147 | #endif | ||
148 | |||
149 | #endif /* __KERNEL__ */ | 108 | #endif /* __KERNEL__ */ |
150 | 109 | ||
151 | #endif /* _LINUX_THREAD_INFO_H */ | 110 | #endif /* _LINUX_THREAD_INFO_H */ |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 4089abc6e9c0..0933c7455a30 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -275,7 +275,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet) | |||
275 | #define __net_initconst | 275 | #define __net_initconst |
276 | #else | 276 | #else |
277 | #define __net_init __init | 277 | #define __net_init __init |
278 | #define __net_exit __exit_refok | 278 | #define __net_exit __ref |
279 | #define __net_initdata __initdata | 279 | #define __net_initdata __initdata |
280 | #define __net_initconst __initconst | 280 | #define __net_initconst __initconst |
281 | #endif | 281 | #endif |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 6d4e92ccdc91..c44747c0796a 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -357,6 +357,7 @@ header-y += reiserfs_fs.h | |||
357 | header-y += reiserfs_xattr.h | 357 | header-y += reiserfs_xattr.h |
358 | header-y += resource.h | 358 | header-y += resource.h |
359 | header-y += rfkill.h | 359 | header-y += rfkill.h |
360 | header-y += rio_cm_cdev.h | ||
360 | header-y += rio_mport_cdev.h | 361 | header-y += rio_mport_cdev.h |
361 | header-y += romfs_fs.h | 362 | header-y += romfs_fs.h |
362 | header-y += rose.h | 363 | header-y += rose.h |
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 12c37a197d24..49bc06295398 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h | |||
@@ -15,8 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | struct task_struct; | ||
19 | |||
20 | /* User-level do most of the mapping between kernel and user | 18 | /* User-level do most of the mapping between kernel and user |
21 | capabilities based on the version tag given by the kernel. The | 19 | capabilities based on the version tag given by the kernel. The |
22 | kernel might be somewhat backwards compatible, but don't bet on | 20 | kernel might be somewhat backwards compatible, but don't bet on |
diff --git a/include/uapi/linux/nilfs2_api.h b/include/uapi/linux/nilfs2_api.h new file mode 100644 index 000000000000..ef4c1de89b11 --- /dev/null +++ b/include/uapi/linux/nilfs2_api.h | |||
@@ -0,0 +1,292 @@ | |||
1 | /* | ||
2 | * nilfs2_api.h - NILFS2 user space API | ||
3 | * | ||
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU Lesser General Public License as published | ||
8 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_NILFS2_API_H | ||
13 | #define _LINUX_NILFS2_API_H | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/ioctl.h> | ||
17 | |||
18 | /** | ||
19 | * struct nilfs_cpinfo - checkpoint information | ||
20 | * @ci_flags: flags | ||
21 | * @ci_pad: padding | ||
22 | * @ci_cno: checkpoint number | ||
23 | * @ci_create: creation timestamp | ||
24 | * @ci_nblk_inc: number of blocks incremented by this checkpoint | ||
25 | * @ci_inodes_count: inodes count | ||
26 | * @ci_blocks_count: blocks count | ||
27 | * @ci_next: next checkpoint number in snapshot list | ||
28 | */ | ||
29 | struct nilfs_cpinfo { | ||
30 | __u32 ci_flags; | ||
31 | __u32 ci_pad; | ||
32 | __u64 ci_cno; | ||
33 | __u64 ci_create; | ||
34 | __u64 ci_nblk_inc; | ||
35 | __u64 ci_inodes_count; | ||
36 | __u64 ci_blocks_count; | ||
37 | __u64 ci_next; | ||
38 | }; | ||
39 | |||
40 | /* checkpoint flags */ | ||
41 | enum { | ||
42 | NILFS_CPINFO_SNAPSHOT, | ||
43 | NILFS_CPINFO_INVALID, | ||
44 | NILFS_CPINFO_SKETCH, | ||
45 | NILFS_CPINFO_MINOR, | ||
46 | }; | ||
47 | |||
48 | #define NILFS_CPINFO_FNS(flag, name) \ | ||
49 | static inline int \ | ||
50 | nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ | ||
51 | { \ | ||
52 | return !!(cpinfo->ci_flags & (1UL << NILFS_CPINFO_##flag)); \ | ||
53 | } | ||
54 | |||
55 | NILFS_CPINFO_FNS(SNAPSHOT, snapshot) | ||
56 | NILFS_CPINFO_FNS(INVALID, invalid) | ||
57 | NILFS_CPINFO_FNS(MINOR, minor) | ||
58 | |||
59 | /** | ||
60 | * nilfs_suinfo - segment usage information | ||
61 | * @sui_lastmod: timestamp of last modification | ||
62 | * @sui_nblocks: number of written blocks in segment | ||
63 | * @sui_flags: segment usage flags | ||
64 | */ | ||
65 | struct nilfs_suinfo { | ||
66 | __u64 sui_lastmod; | ||
67 | __u32 sui_nblocks; | ||
68 | __u32 sui_flags; | ||
69 | }; | ||
70 | |||
71 | /* segment usage flags */ | ||
72 | enum { | ||
73 | NILFS_SUINFO_ACTIVE, | ||
74 | NILFS_SUINFO_DIRTY, | ||
75 | NILFS_SUINFO_ERROR, | ||
76 | }; | ||
77 | |||
78 | #define NILFS_SUINFO_FNS(flag, name) \ | ||
79 | static inline int \ | ||
80 | nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ | ||
81 | { \ | ||
82 | return si->sui_flags & (1UL << NILFS_SUINFO_##flag); \ | ||
83 | } | ||
84 | |||
85 | NILFS_SUINFO_FNS(ACTIVE, active) | ||
86 | NILFS_SUINFO_FNS(DIRTY, dirty) | ||
87 | NILFS_SUINFO_FNS(ERROR, error) | ||
88 | |||
89 | static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) | ||
90 | { | ||
91 | return !si->sui_flags; | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * nilfs_suinfo_update - segment usage information update | ||
96 | * @sup_segnum: segment number | ||
97 | * @sup_flags: flags for which fields are active in sup_sui | ||
98 | * @sup_reserved: reserved necessary for alignment | ||
99 | * @sup_sui: segment usage information | ||
100 | */ | ||
101 | struct nilfs_suinfo_update { | ||
102 | __u64 sup_segnum; | ||
103 | __u32 sup_flags; | ||
104 | __u32 sup_reserved; | ||
105 | struct nilfs_suinfo sup_sui; | ||
106 | }; | ||
107 | |||
108 | enum { | ||
109 | NILFS_SUINFO_UPDATE_LASTMOD, | ||
110 | NILFS_SUINFO_UPDATE_NBLOCKS, | ||
111 | NILFS_SUINFO_UPDATE_FLAGS, | ||
112 | __NR_NILFS_SUINFO_UPDATE_FIELDS, | ||
113 | }; | ||
114 | |||
115 | #define NILFS_SUINFO_UPDATE_FNS(flag, name) \ | ||
116 | static inline void \ | ||
117 | nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \ | ||
118 | { \ | ||
119 | sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \ | ||
120 | } \ | ||
121 | static inline void \ | ||
122 | nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \ | ||
123 | { \ | ||
124 | sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \ | ||
125 | } \ | ||
126 | static inline int \ | ||
127 | nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \ | ||
128 | { \ | ||
129 | return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\ | ||
130 | } | ||
131 | |||
132 | NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod) | ||
133 | NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks) | ||
134 | NILFS_SUINFO_UPDATE_FNS(FLAGS, flags) | ||
135 | |||
136 | enum { | ||
137 | NILFS_CHECKPOINT, | ||
138 | NILFS_SNAPSHOT, | ||
139 | }; | ||
140 | |||
141 | /** | ||
142 | * struct nilfs_cpmode - change checkpoint mode structure | ||
143 | * @cm_cno: checkpoint number | ||
144 | * @cm_mode: mode of checkpoint | ||
145 | * @cm_pad: padding | ||
146 | */ | ||
147 | struct nilfs_cpmode { | ||
148 | __u64 cm_cno; | ||
149 | __u32 cm_mode; | ||
150 | __u32 cm_pad; | ||
151 | }; | ||
152 | |||
153 | /** | ||
154 | * struct nilfs_argv - argument vector | ||
155 | * @v_base: pointer on data array from userspace | ||
156 | * @v_nmembs: number of members in data array | ||
157 | * @v_size: size of data array in bytes | ||
158 | * @v_flags: flags | ||
159 | * @v_index: start number of target data items | ||
160 | */ | ||
161 | struct nilfs_argv { | ||
162 | __u64 v_base; | ||
163 | __u32 v_nmembs; /* number of members */ | ||
164 | __u16 v_size; /* size of members */ | ||
165 | __u16 v_flags; | ||
166 | __u64 v_index; | ||
167 | }; | ||
168 | |||
169 | /** | ||
170 | * struct nilfs_period - period of checkpoint numbers | ||
171 | * @p_start: start checkpoint number (inclusive) | ||
172 | * @p_end: end checkpoint number (exclusive) | ||
173 | */ | ||
174 | struct nilfs_period { | ||
175 | __u64 p_start; | ||
176 | __u64 p_end; | ||
177 | }; | ||
178 | |||
179 | /** | ||
180 | * struct nilfs_cpstat - checkpoint statistics | ||
181 | * @cs_cno: checkpoint number | ||
182 | * @cs_ncps: number of checkpoints | ||
183 | * @cs_nsss: number of snapshots | ||
184 | */ | ||
185 | struct nilfs_cpstat { | ||
186 | __u64 cs_cno; | ||
187 | __u64 cs_ncps; | ||
188 | __u64 cs_nsss; | ||
189 | }; | ||
190 | |||
191 | /** | ||
192 | * struct nilfs_sustat - segment usage statistics | ||
193 | * @ss_nsegs: number of segments | ||
194 | * @ss_ncleansegs: number of clean segments | ||
195 | * @ss_ndirtysegs: number of dirty segments | ||
196 | * @ss_ctime: creation time of the last segment | ||
197 | * @ss_nongc_ctime: creation time of the last segment not for GC | ||
198 | * @ss_prot_seq: least sequence number of segments which must not be reclaimed | ||
199 | */ | ||
200 | struct nilfs_sustat { | ||
201 | __u64 ss_nsegs; | ||
202 | __u64 ss_ncleansegs; | ||
203 | __u64 ss_ndirtysegs; | ||
204 | __u64 ss_ctime; | ||
205 | __u64 ss_nongc_ctime; | ||
206 | __u64 ss_prot_seq; | ||
207 | }; | ||
208 | |||
209 | /** | ||
210 | * struct nilfs_vinfo - virtual block number information | ||
211 | * @vi_vblocknr: virtual block number | ||
212 | * @vi_start: start checkpoint number (inclusive) | ||
213 | * @vi_end: end checkpoint number (exclusive) | ||
214 | * @vi_blocknr: disk block number | ||
215 | */ | ||
216 | struct nilfs_vinfo { | ||
217 | __u64 vi_vblocknr; | ||
218 | __u64 vi_start; | ||
219 | __u64 vi_end; | ||
220 | __u64 vi_blocknr; | ||
221 | }; | ||
222 | |||
223 | /** | ||
224 | * struct nilfs_vdesc - descriptor of virtual block number | ||
225 | * @vd_ino: inode number | ||
226 | * @vd_cno: checkpoint number | ||
227 | * @vd_vblocknr: virtual block number | ||
228 | * @vd_period: period of checkpoint numbers | ||
229 | * @vd_blocknr: disk block number | ||
230 | * @vd_offset: logical block offset inside a file | ||
231 | * @vd_flags: flags (data or node block) | ||
232 | * @vd_pad: padding | ||
233 | */ | ||
234 | struct nilfs_vdesc { | ||
235 | __u64 vd_ino; | ||
236 | __u64 vd_cno; | ||
237 | __u64 vd_vblocknr; | ||
238 | struct nilfs_period vd_period; | ||
239 | __u64 vd_blocknr; | ||
240 | __u64 vd_offset; | ||
241 | __u32 vd_flags; | ||
242 | __u32 vd_pad; | ||
243 | }; | ||
244 | |||
245 | /** | ||
246 | * struct nilfs_bdesc - descriptor of disk block number | ||
247 | * @bd_ino: inode number | ||
248 | * @bd_oblocknr: disk block address (for skipping dead blocks) | ||
249 | * @bd_blocknr: disk block address | ||
250 | * @bd_offset: logical block offset inside a file | ||
251 | * @bd_level: level in the b-tree organization | ||
252 | * @bd_pad: padding | ||
253 | */ | ||
254 | struct nilfs_bdesc { | ||
255 | __u64 bd_ino; | ||
256 | __u64 bd_oblocknr; | ||
257 | __u64 bd_blocknr; | ||
258 | __u64 bd_offset; | ||
259 | __u32 bd_level; | ||
260 | __u32 bd_pad; | ||
261 | }; | ||
262 | |||
263 | #define NILFS_IOCTL_IDENT 'n' | ||
264 | |||
265 | #define NILFS_IOCTL_CHANGE_CPMODE \ | ||
266 | _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) | ||
267 | #define NILFS_IOCTL_DELETE_CHECKPOINT \ | ||
268 | _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) | ||
269 | #define NILFS_IOCTL_GET_CPINFO \ | ||
270 | _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) | ||
271 | #define NILFS_IOCTL_GET_CPSTAT \ | ||
272 | _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) | ||
273 | #define NILFS_IOCTL_GET_SUINFO \ | ||
274 | _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) | ||
275 | #define NILFS_IOCTL_GET_SUSTAT \ | ||
276 | _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) | ||
277 | #define NILFS_IOCTL_GET_VINFO \ | ||
278 | _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) | ||
279 | #define NILFS_IOCTL_GET_BDESCS \ | ||
280 | _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) | ||
281 | #define NILFS_IOCTL_CLEAN_SEGMENTS \ | ||
282 | _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) | ||
283 | #define NILFS_IOCTL_SYNC \ | ||
284 | _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) | ||
285 | #define NILFS_IOCTL_RESIZE \ | ||
286 | _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) | ||
287 | #define NILFS_IOCTL_SET_ALLOC_RANGE \ | ||
288 | _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2]) | ||
289 | #define NILFS_IOCTL_SET_SUINFO \ | ||
290 | _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv) | ||
291 | |||
292 | #endif /* _LINUX_NILFS2_API_H */ | ||
diff --git a/include/linux/nilfs2_fs.h b/include/uapi/linux/nilfs2_ondisk.h index 5988dd57ba66..2a8a3addb675 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/uapi/linux/nilfs2_ondisk.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * nilfs2_fs.h - NILFS2 on-disk structures and common declarations. | 2 | * nilfs2_ondisk.h - NILFS2 on-disk structures |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 5 | * |
@@ -7,13 +7,6 @@ | |||
7 | * it under the terms of the GNU Lesser General Public License as published | 7 | * it under the terms of the GNU Lesser General Public License as published |
8 | * by the Free Software Foundation; either version 2.1 of the License, or | 8 | * by the Free Software Foundation; either version 2.1 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU Lesser General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato and Ryusuke Konishi. | ||
17 | */ | 10 | */ |
18 | /* | 11 | /* |
19 | * linux/include/linux/ext2_fs.h | 12 | * linux/include/linux/ext2_fs.h |
@@ -30,16 +23,15 @@ | |||
30 | * Copyright (C) 1991, 1992 Linus Torvalds | 23 | * Copyright (C) 1991, 1992 Linus Torvalds |
31 | */ | 24 | */ |
32 | 25 | ||
33 | #ifndef _LINUX_NILFS_FS_H | 26 | #ifndef _LINUX_NILFS2_ONDISK_H |
34 | #define _LINUX_NILFS_FS_H | 27 | #define _LINUX_NILFS2_ONDISK_H |
35 | 28 | ||
36 | #include <linux/types.h> | 29 | #include <linux/types.h> |
37 | #include <linux/ioctl.h> | ||
38 | #include <linux/magic.h> | 30 | #include <linux/magic.h> |
39 | #include <linux/bug.h> | ||
40 | 31 | ||
41 | 32 | ||
42 | #define NILFS_INODE_BMAP_SIZE 7 | 33 | #define NILFS_INODE_BMAP_SIZE 7 |
34 | |||
43 | /** | 35 | /** |
44 | * struct nilfs_inode - structure of an inode on disk | 36 | * struct nilfs_inode - structure of an inode on disk |
45 | * @i_blocks: blocks count | 37 | * @i_blocks: blocks count |
@@ -56,7 +48,7 @@ | |||
56 | * @i_bmap: block mapping | 48 | * @i_bmap: block mapping |
57 | * @i_xattr: extended attributes | 49 | * @i_xattr: extended attributes |
58 | * @i_generation: file generation (for NFS) | 50 | * @i_generation: file generation (for NFS) |
59 | * @i_pad: padding | 51 | * @i_pad: padding |
60 | */ | 52 | */ |
61 | struct nilfs_inode { | 53 | struct nilfs_inode { |
62 | __le64 i_blocks; | 54 | __le64 i_blocks; |
@@ -338,29 +330,7 @@ enum { | |||
338 | #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) | 330 | #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) |
339 | #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ | 331 | #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ |
340 | ~NILFS_DIR_ROUND) | 332 | ~NILFS_DIR_ROUND) |
341 | #define NILFS_MAX_REC_LEN ((1<<16)-1) | 333 | #define NILFS_MAX_REC_LEN ((1 << 16) - 1) |
342 | |||
343 | static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) | ||
344 | { | ||
345 | unsigned int len = le16_to_cpu(dlen); | ||
346 | |||
347 | #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) | ||
348 | if (len == NILFS_MAX_REC_LEN) | ||
349 | return 1 << 16; | ||
350 | #endif | ||
351 | return len; | ||
352 | } | ||
353 | |||
354 | static inline __le16 nilfs_rec_len_to_disk(unsigned int len) | ||
355 | { | ||
356 | #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) | ||
357 | if (len == (1 << 16)) | ||
358 | return cpu_to_le16(NILFS_MAX_REC_LEN); | ||
359 | else if (len > (1 << 16)) | ||
360 | BUG(); | ||
361 | #endif | ||
362 | return cpu_to_le16(len); | ||
363 | } | ||
364 | 334 | ||
365 | /** | 335 | /** |
366 | * struct nilfs_finfo - file information | 336 | * struct nilfs_finfo - file information |
@@ -374,11 +344,10 @@ struct nilfs_finfo { | |||
374 | __le64 fi_cno; | 344 | __le64 fi_cno; |
375 | __le32 fi_nblocks; | 345 | __le32 fi_nblocks; |
376 | __le32 fi_ndatablk; | 346 | __le32 fi_ndatablk; |
377 | /* array of virtual block numbers */ | ||
378 | }; | 347 | }; |
379 | 348 | ||
380 | /** | 349 | /** |
381 | * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned | 350 | * struct nilfs_binfo_v - information on a data block (except DAT) |
382 | * @bi_vblocknr: virtual block number | 351 | * @bi_vblocknr: virtual block number |
383 | * @bi_blkoff: block offset | 352 | * @bi_blkoff: block offset |
384 | */ | 353 | */ |
@@ -388,7 +357,7 @@ struct nilfs_binfo_v { | |||
388 | }; | 357 | }; |
389 | 358 | ||
390 | /** | 359 | /** |
391 | * struct nilfs_binfo_dat - information for the block which belongs to the DAT file | 360 | * struct nilfs_binfo_dat - information on a DAT node block |
392 | * @bi_blkoff: block offset | 361 | * @bi_blkoff: block offset |
393 | * @bi_level: level | 362 | * @bi_level: level |
394 | * @bi_pad: padding | 363 | * @bi_pad: padding |
@@ -454,7 +423,7 @@ struct nilfs_segment_summary { | |||
454 | #define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ | 423 | #define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ |
455 | 424 | ||
456 | /** | 425 | /** |
457 | * struct nilfs_btree_node - B-tree node | 426 | * struct nilfs_btree_node - header of B-tree node block |
458 | * @bn_flags: flags | 427 | * @bn_flags: flags |
459 | * @bn_level: level | 428 | * @bn_level: level |
460 | * @bn_nchildren: number of children | 429 | * @bn_nchildren: number of children |
@@ -476,6 +445,16 @@ struct nilfs_btree_node { | |||
476 | #define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ | 445 | #define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ |
477 | 446 | ||
478 | /** | 447 | /** |
448 | * struct nilfs_direct_node - header of built-in bmap array | ||
449 | * @dn_flags: flags | ||
450 | * @dn_pad: padding | ||
451 | */ | ||
452 | struct nilfs_direct_node { | ||
453 | __u8 dn_flags; | ||
454 | __u8 pad[7]; | ||
455 | }; | ||
456 | |||
457 | /** | ||
479 | * struct nilfs_palloc_group_desc - block group descriptor | 458 | * struct nilfs_palloc_group_desc - block group descriptor |
480 | * @pg_nfrees: number of free entries in block group | 459 | * @pg_nfrees: number of free entries in block group |
481 | */ | 460 | */ |
@@ -574,40 +553,6 @@ NILFS_CHECKPOINT_FNS(INVALID, invalid) | |||
574 | NILFS_CHECKPOINT_FNS(MINOR, minor) | 553 | NILFS_CHECKPOINT_FNS(MINOR, minor) |
575 | 554 | ||
576 | /** | 555 | /** |
577 | * struct nilfs_cpinfo - checkpoint information | ||
578 | * @ci_flags: flags | ||
579 | * @ci_pad: padding | ||
580 | * @ci_cno: checkpoint number | ||
581 | * @ci_create: creation timestamp | ||
582 | * @ci_nblk_inc: number of blocks incremented by this checkpoint | ||
583 | * @ci_inodes_count: inodes count | ||
584 | * @ci_blocks_count: blocks count | ||
585 | * @ci_next: next checkpoint number in snapshot list | ||
586 | */ | ||
587 | struct nilfs_cpinfo { | ||
588 | __u32 ci_flags; | ||
589 | __u32 ci_pad; | ||
590 | __u64 ci_cno; | ||
591 | __u64 ci_create; | ||
592 | __u64 ci_nblk_inc; | ||
593 | __u64 ci_inodes_count; | ||
594 | __u64 ci_blocks_count; | ||
595 | __u64 ci_next; | ||
596 | }; | ||
597 | |||
598 | #define NILFS_CPINFO_FNS(flag, name) \ | ||
599 | static inline int \ | ||
600 | nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ | ||
601 | { \ | ||
602 | return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
603 | } | ||
604 | |||
605 | NILFS_CPINFO_FNS(SNAPSHOT, snapshot) | ||
606 | NILFS_CPINFO_FNS(INVALID, invalid) | ||
607 | NILFS_CPINFO_FNS(MINOR, minor) | ||
608 | |||
609 | |||
610 | /** | ||
611 | * struct nilfs_cpfile_header - checkpoint file header | 556 | * struct nilfs_cpfile_header - checkpoint file header |
612 | * @ch_ncheckpoints: number of checkpoints | 557 | * @ch_ncheckpoints: number of checkpoints |
613 | * @ch_nsnapshots: number of snapshots | 558 | * @ch_nsnapshots: number of snapshots |
@@ -619,7 +564,7 @@ struct nilfs_cpfile_header { | |||
619 | struct nilfs_snapshot_list ch_snapshot_list; | 564 | struct nilfs_snapshot_list ch_snapshot_list; |
620 | }; | 565 | }; |
621 | 566 | ||
622 | #define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ | 567 | #define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ |
623 | ((sizeof(struct nilfs_cpfile_header) + \ | 568 | ((sizeof(struct nilfs_cpfile_header) + \ |
624 | sizeof(struct nilfs_checkpoint) - 1) / \ | 569 | sizeof(struct nilfs_checkpoint) - 1) / \ |
625 | sizeof(struct nilfs_checkpoint)) | 570 | sizeof(struct nilfs_checkpoint)) |
@@ -643,8 +588,6 @@ enum { | |||
643 | NILFS_SEGMENT_USAGE_ACTIVE, | 588 | NILFS_SEGMENT_USAGE_ACTIVE, |
644 | NILFS_SEGMENT_USAGE_DIRTY, | 589 | NILFS_SEGMENT_USAGE_DIRTY, |
645 | NILFS_SEGMENT_USAGE_ERROR, | 590 | NILFS_SEGMENT_USAGE_ERROR, |
646 | |||
647 | /* ... */ | ||
648 | }; | 591 | }; |
649 | 592 | ||
650 | #define NILFS_SEGMENT_USAGE_FNS(flag, name) \ | 593 | #define NILFS_SEGMENT_USAGE_FNS(flag, name) \ |
@@ -699,236 +642,9 @@ struct nilfs_sufile_header { | |||
699 | /* ... */ | 642 | /* ... */ |
700 | }; | 643 | }; |
701 | 644 | ||
702 | #define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ | 645 | #define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ |
703 | ((sizeof(struct nilfs_sufile_header) + \ | 646 | ((sizeof(struct nilfs_sufile_header) + \ |
704 | sizeof(struct nilfs_segment_usage) - 1) / \ | 647 | sizeof(struct nilfs_segment_usage) - 1) / \ |
705 | sizeof(struct nilfs_segment_usage)) | 648 | sizeof(struct nilfs_segment_usage)) |
706 | 649 | ||
707 | /** | 650 | #endif /* _LINUX_NILFS2_ONDISK_H */ |
708 | * nilfs_suinfo - segment usage information | ||
709 | * @sui_lastmod: timestamp of last modification | ||
710 | * @sui_nblocks: number of written blocks in segment | ||
711 | * @sui_flags: segment usage flags | ||
712 | */ | ||
713 | struct nilfs_suinfo { | ||
714 | __u64 sui_lastmod; | ||
715 | __u32 sui_nblocks; | ||
716 | __u32 sui_flags; | ||
717 | }; | ||
718 | |||
719 | #define NILFS_SUINFO_FNS(flag, name) \ | ||
720 | static inline int \ | ||
721 | nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ | ||
722 | { \ | ||
723 | return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \ | ||
724 | } | ||
725 | |||
726 | NILFS_SUINFO_FNS(ACTIVE, active) | ||
727 | NILFS_SUINFO_FNS(DIRTY, dirty) | ||
728 | NILFS_SUINFO_FNS(ERROR, error) | ||
729 | |||
730 | static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) | ||
731 | { | ||
732 | return !si->sui_flags; | ||
733 | } | ||
734 | |||
735 | /* ioctl */ | ||
736 | /** | ||
737 | * nilfs_suinfo_update - segment usage information update | ||
738 | * @sup_segnum: segment number | ||
739 | * @sup_flags: flags for which fields are active in sup_sui | ||
740 | * @sup_reserved: reserved necessary for alignment | ||
741 | * @sup_sui: segment usage information | ||
742 | */ | ||
743 | struct nilfs_suinfo_update { | ||
744 | __u64 sup_segnum; | ||
745 | __u32 sup_flags; | ||
746 | __u32 sup_reserved; | ||
747 | struct nilfs_suinfo sup_sui; | ||
748 | }; | ||
749 | |||
750 | enum { | ||
751 | NILFS_SUINFO_UPDATE_LASTMOD, | ||
752 | NILFS_SUINFO_UPDATE_NBLOCKS, | ||
753 | NILFS_SUINFO_UPDATE_FLAGS, | ||
754 | __NR_NILFS_SUINFO_UPDATE_FIELDS, | ||
755 | }; | ||
756 | |||
757 | #define NILFS_SUINFO_UPDATE_FNS(flag, name) \ | ||
758 | static inline void \ | ||
759 | nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \ | ||
760 | { \ | ||
761 | sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \ | ||
762 | } \ | ||
763 | static inline void \ | ||
764 | nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \ | ||
765 | { \ | ||
766 | sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \ | ||
767 | } \ | ||
768 | static inline int \ | ||
769 | nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \ | ||
770 | { \ | ||
771 | return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\ | ||
772 | } | ||
773 | |||
774 | NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod) | ||
775 | NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks) | ||
776 | NILFS_SUINFO_UPDATE_FNS(FLAGS, flags) | ||
777 | |||
778 | enum { | ||
779 | NILFS_CHECKPOINT, | ||
780 | NILFS_SNAPSHOT, | ||
781 | }; | ||
782 | |||
783 | /** | ||
784 | * struct nilfs_cpmode - change checkpoint mode structure | ||
785 | * @cm_cno: checkpoint number | ||
786 | * @cm_mode: mode of checkpoint | ||
787 | * @cm_pad: padding | ||
788 | */ | ||
789 | struct nilfs_cpmode { | ||
790 | __u64 cm_cno; | ||
791 | __u32 cm_mode; | ||
792 | __u32 cm_pad; | ||
793 | }; | ||
794 | |||
795 | /** | ||
796 | * struct nilfs_argv - argument vector | ||
797 | * @v_base: pointer on data array from userspace | ||
798 | * @v_nmembs: number of members in data array | ||
799 | * @v_size: size of data array in bytes | ||
800 | * @v_flags: flags | ||
801 | * @v_index: start number of target data items | ||
802 | */ | ||
803 | struct nilfs_argv { | ||
804 | __u64 v_base; | ||
805 | __u32 v_nmembs; /* number of members */ | ||
806 | __u16 v_size; /* size of members */ | ||
807 | __u16 v_flags; | ||
808 | __u64 v_index; | ||
809 | }; | ||
810 | |||
811 | /** | ||
812 | * struct nilfs_period - period of checkpoint numbers | ||
813 | * @p_start: start checkpoint number (inclusive) | ||
814 | * @p_end: end checkpoint number (exclusive) | ||
815 | */ | ||
816 | struct nilfs_period { | ||
817 | __u64 p_start; | ||
818 | __u64 p_end; | ||
819 | }; | ||
820 | |||
821 | /** | ||
822 | * struct nilfs_cpstat - checkpoint statistics | ||
823 | * @cs_cno: checkpoint number | ||
824 | * @cs_ncps: number of checkpoints | ||
825 | * @cs_nsss: number of snapshots | ||
826 | */ | ||
827 | struct nilfs_cpstat { | ||
828 | __u64 cs_cno; | ||
829 | __u64 cs_ncps; | ||
830 | __u64 cs_nsss; | ||
831 | }; | ||
832 | |||
833 | /** | ||
834 | * struct nilfs_sustat - segment usage statistics | ||
835 | * @ss_nsegs: number of segments | ||
836 | * @ss_ncleansegs: number of clean segments | ||
837 | * @ss_ndirtysegs: number of dirty segments | ||
838 | * @ss_ctime: creation time of the last segment | ||
839 | * @ss_nongc_ctime: creation time of the last segment not for GC | ||
840 | * @ss_prot_seq: least sequence number of segments which must not be reclaimed | ||
841 | */ | ||
842 | struct nilfs_sustat { | ||
843 | __u64 ss_nsegs; | ||
844 | __u64 ss_ncleansegs; | ||
845 | __u64 ss_ndirtysegs; | ||
846 | __u64 ss_ctime; | ||
847 | __u64 ss_nongc_ctime; | ||
848 | __u64 ss_prot_seq; | ||
849 | }; | ||
850 | |||
851 | /** | ||
852 | * struct nilfs_vinfo - virtual block number information | ||
853 | * @vi_vblocknr: virtual block number | ||
854 | * @vi_start: start checkpoint number (inclusive) | ||
855 | * @vi_end: end checkpoint number (exclusive) | ||
856 | * @vi_blocknr: disk block number | ||
857 | */ | ||
858 | struct nilfs_vinfo { | ||
859 | __u64 vi_vblocknr; | ||
860 | __u64 vi_start; | ||
861 | __u64 vi_end; | ||
862 | __u64 vi_blocknr; | ||
863 | }; | ||
864 | |||
865 | /** | ||
866 | * struct nilfs_vdesc - descriptor of virtual block number | ||
867 | * @vd_ino: inode number | ||
868 | * @vd_cno: checkpoint number | ||
869 | * @vd_vblocknr: virtual block number | ||
870 | * @vd_period: period of checkpoint numbers | ||
871 | * @vd_blocknr: disk block number | ||
872 | * @vd_offset: logical block offset inside a file | ||
873 | * @vd_flags: flags (data or node block) | ||
874 | * @vd_pad: padding | ||
875 | */ | ||
876 | struct nilfs_vdesc { | ||
877 | __u64 vd_ino; | ||
878 | __u64 vd_cno; | ||
879 | __u64 vd_vblocknr; | ||
880 | struct nilfs_period vd_period; | ||
881 | __u64 vd_blocknr; | ||
882 | __u64 vd_offset; | ||
883 | __u32 vd_flags; | ||
884 | __u32 vd_pad; | ||
885 | }; | ||
886 | |||
887 | /** | ||
888 | * struct nilfs_bdesc - descriptor of disk block number | ||
889 | * @bd_ino: inode number | ||
890 | * @bd_oblocknr: disk block address (for skipping dead blocks) | ||
891 | * @bd_blocknr: disk block address | ||
892 | * @bd_offset: logical block offset inside a file | ||
893 | * @bd_level: level in the b-tree organization | ||
894 | * @bd_pad: padding | ||
895 | */ | ||
896 | struct nilfs_bdesc { | ||
897 | __u64 bd_ino; | ||
898 | __u64 bd_oblocknr; | ||
899 | __u64 bd_blocknr; | ||
900 | __u64 bd_offset; | ||
901 | __u32 bd_level; | ||
902 | __u32 bd_pad; | ||
903 | }; | ||
904 | |||
905 | #define NILFS_IOCTL_IDENT 'n' | ||
906 | |||
907 | #define NILFS_IOCTL_CHANGE_CPMODE \ | ||
908 | _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) | ||
909 | #define NILFS_IOCTL_DELETE_CHECKPOINT \ | ||
910 | _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) | ||
911 | #define NILFS_IOCTL_GET_CPINFO \ | ||
912 | _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) | ||
913 | #define NILFS_IOCTL_GET_CPSTAT \ | ||
914 | _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) | ||
915 | #define NILFS_IOCTL_GET_SUINFO \ | ||
916 | _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) | ||
917 | #define NILFS_IOCTL_GET_SUSTAT \ | ||
918 | _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) | ||
919 | #define NILFS_IOCTL_GET_VINFO \ | ||
920 | _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) | ||
921 | #define NILFS_IOCTL_GET_BDESCS \ | ||
922 | _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) | ||
923 | #define NILFS_IOCTL_CLEAN_SEGMENTS \ | ||
924 | _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) | ||
925 | #define NILFS_IOCTL_SYNC \ | ||
926 | _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) | ||
927 | #define NILFS_IOCTL_RESIZE \ | ||
928 | _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) | ||
929 | #define NILFS_IOCTL_SET_ALLOC_RANGE \ | ||
930 | _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2]) | ||
931 | #define NILFS_IOCTL_SET_SUINFO \ | ||
932 | _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv) | ||
933 | |||
934 | #endif /* _LINUX_NILFS_FS_H */ | ||
diff --git a/include/uapi/linux/rio_cm_cdev.h b/include/uapi/linux/rio_cm_cdev.h new file mode 100644 index 000000000000..6edb900d318d --- /dev/null +++ b/include/uapi/linux/rio_cm_cdev.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015, Integrated Device Technology Inc. | ||
3 | * Copyright (c) 2015, Prodrive Technologies | ||
4 | * Copyright (c) 2015, RapidIO Trade Association | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * This software is available to you under a choice of one of two licenses. | ||
8 | * You may choose to be licensed under the terms of the GNU General Public | ||
9 | * License(GPL) Version 2, or the BSD-3 Clause license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions are met: | ||
13 | * | ||
14 | * 1. Redistributions of source code must retain the above copyright notice, | ||
15 | * this list of conditions and the following disclaimer. | ||
16 | * | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright notice, | ||
18 | * this list of conditions and the following disclaimer in the documentation | ||
19 | * and/or other materials provided with the distribution. | ||
20 | * | ||
21 | * 3. Neither the name of the copyright holder nor the names of its contributors | ||
22 | * may be used to endorse or promote products derived from this software without | ||
23 | * specific prior written permission. | ||
24 | * | ||
25 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
26 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | ||
27 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR | ||
29 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
30 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
31 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | ||
32 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
33 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | ||
34 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | ||
35 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
36 | */ | ||
37 | |||
38 | #ifndef _RIO_CM_CDEV_H_ | ||
39 | #define _RIO_CM_CDEV_H_ | ||
40 | |||
41 | #include <linux/types.h> | ||
42 | |||
43 | struct rio_cm_channel { | ||
44 | __u16 id; | ||
45 | __u16 remote_channel; | ||
46 | __u16 remote_destid; | ||
47 | __u8 mport_id; | ||
48 | }; | ||
49 | |||
50 | struct rio_cm_msg { | ||
51 | __u16 ch_num; | ||
52 | __u16 size; | ||
53 | __u32 rxto; /* receive timeout in mSec. 0 = blocking */ | ||
54 | __u64 msg; | ||
55 | }; | ||
56 | |||
57 | struct rio_cm_accept { | ||
58 | __u16 ch_num; | ||
59 | __u16 pad0; | ||
60 | __u32 wait_to; /* accept timeout in mSec. 0 = blocking */ | ||
61 | }; | ||
62 | |||
63 | /* RapidIO Channelized Messaging Driver IOCTLs */ | ||
64 | #define RIO_CM_IOC_MAGIC 'c' | ||
65 | |||
66 | #define RIO_CM_EP_GET_LIST_SIZE _IOWR(RIO_CM_IOC_MAGIC, 1, __u32) | ||
67 | #define RIO_CM_EP_GET_LIST _IOWR(RIO_CM_IOC_MAGIC, 2, __u32) | ||
68 | #define RIO_CM_CHAN_CREATE _IOWR(RIO_CM_IOC_MAGIC, 3, __u16) | ||
69 | #define RIO_CM_CHAN_CLOSE _IOW(RIO_CM_IOC_MAGIC, 4, __u16) | ||
70 | #define RIO_CM_CHAN_BIND _IOW(RIO_CM_IOC_MAGIC, 5, struct rio_cm_channel) | ||
71 | #define RIO_CM_CHAN_LISTEN _IOW(RIO_CM_IOC_MAGIC, 6, __u16) | ||
72 | #define RIO_CM_CHAN_ACCEPT _IOWR(RIO_CM_IOC_MAGIC, 7, struct rio_cm_accept) | ||
73 | #define RIO_CM_CHAN_CONNECT _IOW(RIO_CM_IOC_MAGIC, 8, struct rio_cm_channel) | ||
74 | #define RIO_CM_CHAN_SEND _IOW(RIO_CM_IOC_MAGIC, 9, struct rio_cm_msg) | ||
75 | #define RIO_CM_CHAN_RECEIVE _IOWR(RIO_CM_IOC_MAGIC, 10, struct rio_cm_msg) | ||
76 | #define RIO_CM_MPORT_GET_LIST _IOWR(RIO_CM_IOC_MAGIC, 11, __u32) | ||
77 | |||
78 | #endif /* _RIO_CM_CDEV_H_ */ | ||
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 0956373b56db..d2b12152e358 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h | |||
@@ -26,8 +26,6 @@ | |||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/compiler.h> | 27 | #include <linux/compiler.h> |
28 | 28 | ||
29 | struct completion; | ||
30 | |||
31 | #define CTL_MAXNAME 10 /* how many path components do we allow in a | 29 | #define CTL_MAXNAME 10 /* how many path components do we allow in a |
32 | call to sysctl? In other words, what is | 30 | call to sysctl? In other words, what is |
33 | the largest acceptable value for the nlen | 31 | the largest acceptable value for the nlen |
diff --git a/init/Kconfig b/init/Kconfig index 46f817abff0e..69886493ff1e 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -55,6 +55,7 @@ config CROSS_COMPILE | |||
55 | 55 | ||
56 | config COMPILE_TEST | 56 | config COMPILE_TEST |
57 | bool "Compile also drivers which will not load" | 57 | bool "Compile also drivers which will not load" |
58 | depends on !UML | ||
58 | default n | 59 | default n |
59 | help | 60 | help |
60 | Some drivers can be compiled on a different platform than they are | 61 | Some drivers can be compiled on a different platform than they are |
@@ -80,6 +81,7 @@ config LOCALVERSION | |||
80 | config LOCALVERSION_AUTO | 81 | config LOCALVERSION_AUTO |
81 | bool "Automatically append version information to the version string" | 82 | bool "Automatically append version information to the version string" |
82 | default y | 83 | default y |
84 | depends on !COMPILE_TEST | ||
83 | help | 85 | help |
84 | This will try to automatically determine if the current tree is a | 86 | This will try to automatically determine if the current tree is a |
85 | release tree by looking for git tags that belong to the current | 87 | release tree by looking for git tags that belong to the current |
@@ -952,7 +954,7 @@ menuconfig CGROUPS | |||
952 | controls or device isolation. | 954 | controls or device isolation. |
953 | See | 955 | See |
954 | - Documentation/scheduler/sched-design-CFS.txt (CFS) | 956 | - Documentation/scheduler/sched-design-CFS.txt (CFS) |
955 | - Documentation/cgroups/ (features for grouping, isolation | 957 | - Documentation/cgroup-v1/ (features for grouping, isolation |
956 | and resource control) | 958 | and resource control) |
957 | 959 | ||
958 | Say N if unsure. | 960 | Say N if unsure. |
@@ -1009,7 +1011,7 @@ config BLK_CGROUP | |||
1009 | CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set | 1011 | CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set |
1010 | CONFIG_BLK_DEV_THROTTLING=y. | 1012 | CONFIG_BLK_DEV_THROTTLING=y. |
1011 | 1013 | ||
1012 | See Documentation/cgroups/blkio-controller.txt for more information. | 1014 | See Documentation/cgroup-v1/blkio-controller.txt for more information. |
1013 | 1015 | ||
1014 | config DEBUG_BLK_CGROUP | 1016 | config DEBUG_BLK_CGROUP |
1015 | bool "IO controller debugging" | 1017 | bool "IO controller debugging" |
@@ -2078,7 +2080,7 @@ config TRIM_UNUSED_KSYMS | |||
2078 | (especially when using LTO) for optimizing the code and reducing | 2080 | (especially when using LTO) for optimizing the code and reducing |
2079 | binary size. This might have some security advantages as well. | 2081 | binary size. This might have some security advantages as well. |
2080 | 2082 | ||
2081 | If unsure say N. | 2083 | If unsure, or if you need to build out-of-tree modules, say N. |
2082 | 2084 | ||
2083 | endif # MODULES | 2085 | endif # MODULES |
2084 | 2086 | ||
diff --git a/init/main.c b/init/main.c index eae02aa03c9e..a8a58e2794a5 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -380,7 +380,7 @@ static void __init setup_command_line(char *command_line) | |||
380 | 380 | ||
381 | static __initdata DECLARE_COMPLETION(kthreadd_done); | 381 | static __initdata DECLARE_COMPLETION(kthreadd_done); |
382 | 382 | ||
383 | static noinline void __init_refok rest_init(void) | 383 | static noinline void __ref rest_init(void) |
384 | { | 384 | { |
385 | int pid; | 385 | int pid; |
386 | 386 | ||
@@ -716,6 +716,12 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn) | |||
716 | addr = (unsigned long) dereference_function_descriptor(fn); | 716 | addr = (unsigned long) dereference_function_descriptor(fn); |
717 | sprint_symbol_no_offset(fn_name, addr); | 717 | sprint_symbol_no_offset(fn_name, addr); |
718 | 718 | ||
719 | /* | ||
720 | * fn will be "function_name [module_name]" where [module_name] is not | ||
721 | * displayed for built-in init functions. Strip off the [module_name]. | ||
722 | */ | ||
723 | strreplace(fn_name, ' ', '\0'); | ||
724 | |||
719 | list_for_each_entry(entry, &blacklisted_initcalls, next) { | 725 | list_for_each_entry(entry, &blacklisted_initcalls, next) { |
720 | if (!strcmp(fn_name, entry->buf)) { | 726 | if (!strcmp(fn_name, entry->buf)) { |
721 | pr_debug("initcall %s blacklisted\n", fn_name); | 727 | pr_debug("initcall %s blacklisted\n", fn_name); |
@@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
680 | rcu_read_lock(); | 680 | rcu_read_lock(); |
681 | ipc_lock_object(&msq->q_perm); | 681 | ipc_lock_object(&msq->q_perm); |
682 | 682 | ||
683 | ipc_rcu_putref(msq, ipc_rcu_free); | 683 | ipc_rcu_putref(msq, msg_rcu_free); |
684 | /* raced with RMID? */ | 684 | /* raced with RMID? */ |
685 | if (!ipc_valid_object(&msq->q_perm)) { | 685 | if (!ipc_valid_object(&msq->q_perm)) { |
686 | err = -EIDRM; | 686 | err = -EIDRM; |
diff --git a/ipc/msgutil.c b/ipc/msgutil.c index ed81aafd2392..a521999de4f1 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c | |||
@@ -37,8 +37,6 @@ struct ipc_namespace init_ipc_ns = { | |||
37 | #endif | 37 | #endif |
38 | }; | 38 | }; |
39 | 39 | ||
40 | atomic_t nr_ipc_ns = ATOMIC_INIT(1); | ||
41 | |||
42 | struct msg_msgseg { | 40 | struct msg_msgseg { |
43 | struct msg_msgseg *next; | 41 | struct msg_msgseg *next; |
44 | /* the next part of the message follows immediately */ | 42 | /* the next part of the message follows immediately */ |
diff --git a/ipc/namespace.c b/ipc/namespace.c index 04cb07eb81f1..d87e6baa1323 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c | |||
@@ -43,7 +43,6 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, | |||
43 | kfree(ns); | 43 | kfree(ns); |
44 | return ERR_PTR(err); | 44 | return ERR_PTR(err); |
45 | } | 45 | } |
46 | atomic_inc(&nr_ipc_ns); | ||
47 | 46 | ||
48 | sem_init_ns(ns); | 47 | sem_init_ns(ns); |
49 | msg_init_ns(ns); | 48 | msg_init_ns(ns); |
@@ -96,7 +95,6 @@ static void free_ipc_ns(struct ipc_namespace *ns) | |||
96 | sem_exit_ns(ns); | 95 | sem_exit_ns(ns); |
97 | msg_exit_ns(ns); | 96 | msg_exit_ns(ns); |
98 | shm_exit_ns(ns); | 97 | shm_exit_ns(ns); |
99 | atomic_dec(&nr_ipc_ns); | ||
100 | 98 | ||
101 | put_user_ns(ns->user_ns); | 99 | put_user_ns(ns->user_ns); |
102 | ns_free_inum(&ns->ns); | 100 | ns_free_inum(&ns->ns); |
@@ -438,7 +438,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns | |||
438 | static inline void sem_lock_and_putref(struct sem_array *sma) | 438 | static inline void sem_lock_and_putref(struct sem_array *sma) |
439 | { | 439 | { |
440 | sem_lock(sma, NULL, -1); | 440 | sem_lock(sma, NULL, -1); |
441 | ipc_rcu_putref(sma, ipc_rcu_free); | 441 | ipc_rcu_putref(sma, sem_rcu_free); |
442 | } | 442 | } |
443 | 443 | ||
444 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | 444 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) |
@@ -1381,7 +1381,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1381 | rcu_read_unlock(); | 1381 | rcu_read_unlock(); |
1382 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | 1382 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
1383 | if (sem_io == NULL) { | 1383 | if (sem_io == NULL) { |
1384 | ipc_rcu_putref(sma, ipc_rcu_free); | 1384 | ipc_rcu_putref(sma, sem_rcu_free); |
1385 | return -ENOMEM; | 1385 | return -ENOMEM; |
1386 | } | 1386 | } |
1387 | 1387 | ||
@@ -1415,20 +1415,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1415 | if (nsems > SEMMSL_FAST) { | 1415 | if (nsems > SEMMSL_FAST) { |
1416 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | 1416 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
1417 | if (sem_io == NULL) { | 1417 | if (sem_io == NULL) { |
1418 | ipc_rcu_putref(sma, ipc_rcu_free); | 1418 | ipc_rcu_putref(sma, sem_rcu_free); |
1419 | return -ENOMEM; | 1419 | return -ENOMEM; |
1420 | } | 1420 | } |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { | 1423 | if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { |
1424 | ipc_rcu_putref(sma, ipc_rcu_free); | 1424 | ipc_rcu_putref(sma, sem_rcu_free); |
1425 | err = -EFAULT; | 1425 | err = -EFAULT; |
1426 | goto out_free; | 1426 | goto out_free; |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | for (i = 0; i < nsems; i++) { | 1429 | for (i = 0; i < nsems; i++) { |
1430 | if (sem_io[i] > SEMVMX) { | 1430 | if (sem_io[i] > SEMVMX) { |
1431 | ipc_rcu_putref(sma, ipc_rcu_free); | 1431 | ipc_rcu_putref(sma, sem_rcu_free); |
1432 | err = -ERANGE; | 1432 | err = -ERANGE; |
1433 | goto out_free; | 1433 | goto out_free; |
1434 | } | 1434 | } |
@@ -1720,7 +1720,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) | |||
1720 | /* step 2: allocate new undo structure */ | 1720 | /* step 2: allocate new undo structure */ |
1721 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); | 1721 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1722 | if (!new) { | 1722 | if (!new) { |
1723 | ipc_rcu_putref(sma, ipc_rcu_free); | 1723 | ipc_rcu_putref(sma, sem_rcu_free); |
1724 | return ERR_PTR(-ENOMEM); | 1724 | return ERR_PTR(-ENOMEM); |
1725 | } | 1725 | } |
1726 | 1726 | ||
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config new file mode 100644 index 000000000000..9f748ed7bea8 --- /dev/null +++ b/kernel/configs/android-base.config | |||
@@ -0,0 +1,152 @@ | |||
1 | # KEEP ALPHABETICALLY SORTED | ||
2 | # CONFIG_DEVKMEM is not set | ||
3 | # CONFIG_DEVMEM is not set | ||
4 | # CONFIG_INET_LRO is not set | ||
5 | # CONFIG_MODULES is not set | ||
6 | # CONFIG_OABI_COMPAT is not set | ||
7 | # CONFIG_SYSVIPC is not set | ||
8 | CONFIG_ANDROID=y | ||
9 | CONFIG_ANDROID_BINDER_IPC=y | ||
10 | CONFIG_ANDROID_LOW_MEMORY_KILLER=y | ||
11 | CONFIG_ARMV8_DEPRECATED=y | ||
12 | CONFIG_ASHMEM=y | ||
13 | CONFIG_AUDIT=y | ||
14 | CONFIG_BLK_DEV_DM=y | ||
15 | CONFIG_BLK_DEV_INITRD=y | ||
16 | CONFIG_CGROUPS=y | ||
17 | CONFIG_CGROUP_CPUACCT=y | ||
18 | CONFIG_CGROUP_DEBUG=y | ||
19 | CONFIG_CGROUP_FREEZER=y | ||
20 | CONFIG_CGROUP_SCHED=y | ||
21 | CONFIG_CP15_BARRIER_EMULATION=y | ||
22 | CONFIG_DM_CRYPT=y | ||
23 | CONFIG_DM_VERITY=y | ||
24 | CONFIG_DM_VERITY_FEC=y | ||
25 | CONFIG_EMBEDDED=y | ||
26 | CONFIG_FB=y | ||
27 | CONFIG_HIGH_RES_TIMERS=y | ||
28 | CONFIG_INET6_AH=y | ||
29 | CONFIG_INET6_ESP=y | ||
30 | CONFIG_INET6_IPCOMP=y | ||
31 | CONFIG_INET=y | ||
32 | CONFIG_INET_DIAG_DESTROY=y | ||
33 | CONFIG_INET_ESP=y | ||
34 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
35 | CONFIG_IP6_NF_FILTER=y | ||
36 | CONFIG_IP6_NF_IPTABLES=y | ||
37 | CONFIG_IP6_NF_MANGLE=y | ||
38 | CONFIG_IP6_NF_RAW=y | ||
39 | CONFIG_IP6_NF_TARGET_REJECT=y | ||
40 | CONFIG_IPV6=y | ||
41 | CONFIG_IPV6_MIP6=y | ||
42 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
43 | CONFIG_IPV6_OPTIMISTIC_DAD=y | ||
44 | CONFIG_IPV6_PRIVACY=y | ||
45 | CONFIG_IPV6_ROUTER_PREF=y | ||
46 | CONFIG_IPV6_ROUTE_INFO=y | ||
47 | CONFIG_IP_ADVANCED_ROUTER=y | ||
48 | CONFIG_IP_MULTICAST=y | ||
49 | CONFIG_IP_MULTIPLE_TABLES=y | ||
50 | CONFIG_IP_NF_ARPFILTER=y | ||
51 | CONFIG_IP_NF_ARPTABLES=y | ||
52 | CONFIG_IP_NF_ARP_MANGLE=y | ||
53 | CONFIG_IP_NF_FILTER=y | ||
54 | CONFIG_IP_NF_IPTABLES=y | ||
55 | CONFIG_IP_NF_MANGLE=y | ||
56 | CONFIG_IP_NF_MATCH_AH=y | ||
57 | CONFIG_IP_NF_MATCH_ECN=y | ||
58 | CONFIG_IP_NF_MATCH_TTL=y | ||
59 | CONFIG_IP_NF_NAT=y | ||
60 | CONFIG_IP_NF_RAW=y | ||
61 | CONFIG_IP_NF_SECURITY=y | ||
62 | CONFIG_IP_NF_TARGET_MASQUERADE=y | ||
63 | CONFIG_IP_NF_TARGET_NETMAP=y | ||
64 | CONFIG_IP_NF_TARGET_REDIRECT=y | ||
65 | CONFIG_IP_NF_TARGET_REJECT=y | ||
66 | CONFIG_NET=y | ||
67 | CONFIG_NETDEVICES=y | ||
68 | CONFIG_NETFILTER=y | ||
69 | CONFIG_NETFILTER_TPROXY=y | ||
70 | CONFIG_NETFILTER_XT_MATCH_COMMENT=y | ||
71 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y | ||
72 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=y | ||
73 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y | ||
74 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y | ||
75 | CONFIG_NETFILTER_XT_MATCH_HELPER=y | ||
76 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=y | ||
77 | CONFIG_NETFILTER_XT_MATCH_LENGTH=y | ||
78 | CONFIG_NETFILTER_XT_MATCH_LIMIT=y | ||
79 | CONFIG_NETFILTER_XT_MATCH_MAC=y | ||
80 | CONFIG_NETFILTER_XT_MATCH_MARK=y | ||
81 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y | ||
82 | CONFIG_NETFILTER_XT_MATCH_POLICY=y | ||
83 | CONFIG_NETFILTER_XT_MATCH_QUOTA=y | ||
84 | CONFIG_NETFILTER_XT_MATCH_SOCKET=y | ||
85 | CONFIG_NETFILTER_XT_MATCH_STATE=y | ||
86 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=y | ||
87 | CONFIG_NETFILTER_XT_MATCH_STRING=y | ||
88 | CONFIG_NETFILTER_XT_MATCH_TIME=y | ||
89 | CONFIG_NETFILTER_XT_MATCH_U32=y | ||
90 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y | ||
91 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=y | ||
92 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y | ||
93 | CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y | ||
94 | CONFIG_NETFILTER_XT_TARGET_MARK=y | ||
95 | CONFIG_NETFILTER_XT_TARGET_NFLOG=y | ||
96 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y | ||
97 | CONFIG_NETFILTER_XT_TARGET_SECMARK=y | ||
98 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=y | ||
99 | CONFIG_NETFILTER_XT_TARGET_TPROXY=y | ||
100 | CONFIG_NETFILTER_XT_TARGET_TRACE=y | ||
101 | CONFIG_NET_CLS_ACT=y | ||
102 | CONFIG_NET_CLS_U32=y | ||
103 | CONFIG_NET_EMATCH=y | ||
104 | CONFIG_NET_EMATCH_U32=y | ||
105 | CONFIG_NET_KEY=y | ||
106 | CONFIG_NET_SCHED=y | ||
107 | CONFIG_NET_SCH_HTB=y | ||
108 | CONFIG_NF_CONNTRACK=y | ||
109 | CONFIG_NF_CONNTRACK_AMANDA=y | ||
110 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
111 | CONFIG_NF_CONNTRACK_FTP=y | ||
112 | CONFIG_NF_CONNTRACK_H323=y | ||
113 | CONFIG_NF_CONNTRACK_IPV4=y | ||
114 | CONFIG_NF_CONNTRACK_IPV6=y | ||
115 | CONFIG_NF_CONNTRACK_IRC=y | ||
116 | CONFIG_NF_CONNTRACK_NETBIOS_NS=y | ||
117 | CONFIG_NF_CONNTRACK_PPTP=y | ||
118 | CONFIG_NF_CONNTRACK_SANE=y | ||
119 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
120 | CONFIG_NF_CONNTRACK_TFTP=y | ||
121 | CONFIG_NF_CT_NETLINK=y | ||
122 | CONFIG_NF_CT_PROTO_DCCP=y | ||
123 | CONFIG_NF_CT_PROTO_SCTP=y | ||
124 | CONFIG_NF_CT_PROTO_UDPLITE=y | ||
125 | CONFIG_NF_NAT=y | ||
126 | CONFIG_NO_HZ=y | ||
127 | CONFIG_PACKET=y | ||
128 | CONFIG_PM_AUTOSLEEP=y | ||
129 | CONFIG_PM_WAKELOCKS=y | ||
130 | CONFIG_PPP=y | ||
131 | CONFIG_PPP_BSDCOMP=y | ||
132 | CONFIG_PPP_DEFLATE=y | ||
133 | CONFIG_PPP_MPPE=y | ||
134 | CONFIG_PREEMPT=y | ||
135 | CONFIG_QUOTA=y | ||
136 | CONFIG_RTC_CLASS=y | ||
137 | CONFIG_RT_GROUP_SCHED=y | ||
138 | CONFIG_SECURITY=y | ||
139 | CONFIG_SECURITY_NETWORK=y | ||
140 | CONFIG_SECURITY_SELINUX=y | ||
141 | CONFIG_SETEND_EMULATION=y | ||
142 | CONFIG_STAGING=y | ||
143 | CONFIG_SWP_EMULATION=y | ||
144 | CONFIG_SYNC=y | ||
145 | CONFIG_TUN=y | ||
146 | CONFIG_UNIX=y | ||
147 | CONFIG_USB_GADGET=y | ||
148 | CONFIG_USB_CONFIGFS=y | ||
149 | CONFIG_USB_CONFIGFS_F_FS=y | ||
150 | CONFIG_USB_CONFIGFS_F_MIDI=y | ||
151 | CONFIG_USB_OTG_WAKELOCK=y | ||
152 | CONFIG_XFRM_USER=y | ||
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config new file mode 100644 index 000000000000..e3b953e966d2 --- /dev/null +++ b/kernel/configs/android-recommended.config | |||
@@ -0,0 +1,121 @@ | |||
1 | # KEEP ALPHABETICALLY SORTED | ||
2 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
3 | # CONFIG_INPUT_MOUSE is not set | ||
4 | # CONFIG_LEGACY_PTYS is not set | ||
5 | # CONFIG_NF_CONNTRACK_SIP is not set | ||
6 | # CONFIG_PM_WAKELOCKS_GC is not set | ||
7 | # CONFIG_VT is not set | ||
8 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | ||
9 | CONFIG_BLK_DEV_LOOP=y | ||
10 | CONFIG_BLK_DEV_RAM=y | ||
11 | CONFIG_BLK_DEV_RAM_SIZE=8192 | ||
12 | CONFIG_COMPACTION=y | ||
13 | CONFIG_DEBUG_RODATA=y | ||
14 | CONFIG_DM_UEVENT=y | ||
15 | CONFIG_DRAGONRISE_FF=y | ||
16 | CONFIG_ENABLE_DEFAULT_TRACERS=y | ||
17 | CONFIG_EXT4_FS=y | ||
18 | CONFIG_EXT4_FS_SECURITY=y | ||
19 | CONFIG_FUSE_FS=y | ||
20 | CONFIG_GREENASIA_FF=y | ||
21 | CONFIG_HIDRAW=y | ||
22 | CONFIG_HID_A4TECH=y | ||
23 | CONFIG_HID_ACRUX=y | ||
24 | CONFIG_HID_ACRUX_FF=y | ||
25 | CONFIG_HID_APPLE=y | ||
26 | CONFIG_HID_BELKIN=y | ||
27 | CONFIG_HID_CHERRY=y | ||
28 | CONFIG_HID_CHICONY=y | ||
29 | CONFIG_HID_CYPRESS=y | ||
30 | CONFIG_HID_DRAGONRISE=y | ||
31 | CONFIG_HID_ELECOM=y | ||
32 | CONFIG_HID_EMS_FF=y | ||
33 | CONFIG_HID_EZKEY=y | ||
34 | CONFIG_HID_GREENASIA=y | ||
35 | CONFIG_HID_GYRATION=y | ||
36 | CONFIG_HID_HOLTEK=y | ||
37 | CONFIG_HID_KENSINGTON=y | ||
38 | CONFIG_HID_KEYTOUCH=y | ||
39 | CONFIG_HID_KYE=y | ||
40 | CONFIG_HID_LCPOWER=y | ||
41 | CONFIG_HID_LOGITECH=y | ||
42 | CONFIG_HID_LOGITECH_DJ=y | ||
43 | CONFIG_HID_MAGICMOUSE=y | ||
44 | CONFIG_HID_MICROSOFT=y | ||
45 | CONFIG_HID_MONTEREY=y | ||
46 | CONFIG_HID_MULTITOUCH=y | ||
47 | CONFIG_HID_NTRIG=y | ||
48 | CONFIG_HID_ORTEK=y | ||
49 | CONFIG_HID_PANTHERLORD=y | ||
50 | CONFIG_HID_PETALYNX=y | ||
51 | CONFIG_HID_PICOLCD=y | ||
52 | CONFIG_HID_PRIMAX=y | ||
53 | CONFIG_HID_PRODIKEYS=y | ||
54 | CONFIG_HID_ROCCAT=y | ||
55 | CONFIG_HID_SAITEK=y | ||
56 | CONFIG_HID_SAMSUNG=y | ||
57 | CONFIG_HID_SMARTJOYPLUS=y | ||
58 | CONFIG_HID_SONY=y | ||
59 | CONFIG_HID_SPEEDLINK=y | ||
60 | CONFIG_HID_SUNPLUS=y | ||
61 | CONFIG_HID_THRUSTMASTER=y | ||
62 | CONFIG_HID_TIVO=y | ||
63 | CONFIG_HID_TOPSEED=y | ||
64 | CONFIG_HID_TWINHAN=y | ||
65 | CONFIG_HID_UCLOGIC=y | ||
66 | CONFIG_HID_WACOM=y | ||
67 | CONFIG_HID_WALTOP=y | ||
68 | CONFIG_HID_WIIMOTE=y | ||
69 | CONFIG_HID_ZEROPLUS=y | ||
70 | CONFIG_HID_ZYDACRON=y | ||
71 | CONFIG_INPUT_EVDEV=y | ||
72 | CONFIG_INPUT_GPIO=y | ||
73 | CONFIG_INPUT_JOYSTICK=y | ||
74 | CONFIG_INPUT_MISC=y | ||
75 | CONFIG_INPUT_TABLET=y | ||
76 | CONFIG_INPUT_UINPUT=y | ||
77 | CONFIG_ION=y | ||
78 | CONFIG_JOYSTICK_XPAD=y | ||
79 | CONFIG_JOYSTICK_XPAD_FF=y | ||
80 | CONFIG_JOYSTICK_XPAD_LEDS=y | ||
81 | CONFIG_KALLSYMS_ALL=y | ||
82 | CONFIG_KSM=y | ||
83 | CONFIG_LOGIG940_FF=y | ||
84 | CONFIG_LOGIRUMBLEPAD2_FF=y | ||
85 | CONFIG_LOGITECH_FF=y | ||
86 | CONFIG_MD=y | ||
87 | CONFIG_MEDIA_SUPPORT=y | ||
88 | CONFIG_MSDOS_FS=y | ||
89 | CONFIG_PANIC_TIMEOUT=5 | ||
90 | CONFIG_PANTHERLORD_FF=y | ||
91 | CONFIG_PERF_EVENTS=y | ||
92 | CONFIG_PM_DEBUG=y | ||
93 | CONFIG_PM_RUNTIME=y | ||
94 | CONFIG_PM_WAKELOCKS_LIMIT=0 | ||
95 | CONFIG_POWER_SUPPLY=y | ||
96 | CONFIG_PSTORE=y | ||
97 | CONFIG_PSTORE_CONSOLE=y | ||
98 | CONFIG_PSTORE_RAM=y | ||
99 | CONFIG_SCHEDSTATS=y | ||
100 | CONFIG_SMARTJOYPLUS_FF=y | ||
101 | CONFIG_SND=y | ||
102 | CONFIG_SOUND=y | ||
103 | CONFIG_SUSPEND_TIME=y | ||
104 | CONFIG_TABLET_USB_ACECAD=y | ||
105 | CONFIG_TABLET_USB_AIPTEK=y | ||
106 | CONFIG_TABLET_USB_GTCO=y | ||
107 | CONFIG_TABLET_USB_HANWANG=y | ||
108 | CONFIG_TABLET_USB_KBTAB=y | ||
109 | CONFIG_TASKSTATS=y | ||
110 | CONFIG_TASK_DELAY_ACCT=y | ||
111 | CONFIG_TASK_IO_ACCOUNTING=y | ||
112 | CONFIG_TASK_XACCT=y | ||
113 | CONFIG_TIMER_STATS=y | ||
114 | CONFIG_TMPFS=y | ||
115 | CONFIG_TMPFS_POSIX_ACL=y | ||
116 | CONFIG_UHID=y | ||
117 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | ||
118 | CONFIG_USB_EHCI_HCD=y | ||
119 | CONFIG_USB_HIDDEV=y | ||
120 | CONFIG_USB_USBNET=y | ||
121 | CONFIG_VFAT_FS=y | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 84ae830234f8..2f974ae042a6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -715,7 +715,7 @@ static void check_stack_usage(void) | |||
715 | 715 | ||
716 | spin_lock(&low_water_lock); | 716 | spin_lock(&low_water_lock); |
717 | if (free < lowest_to_date) { | 717 | if (free < lowest_to_date) { |
718 | pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n", | 718 | pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", |
719 | current->comm, task_pid_nr(current), free); | 719 | current->comm, task_pid_nr(current), free); |
720 | lowest_to_date = free; | 720 | lowest_to_date = free; |
721 | } | 721 | } |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 4384672d3245..980936a90ee6 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -48,7 +48,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, | |||
48 | 48 | ||
49 | if (kexec_on_panic) { | 49 | if (kexec_on_panic) { |
50 | /* Verify we have a valid entry point */ | 50 | /* Verify we have a valid entry point */ |
51 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) | 51 | if ((entry < phys_to_boot_phys(crashk_res.start)) || |
52 | (entry > phys_to_boot_phys(crashk_res.end))) | ||
52 | return -EADDRNOTAVAIL; | 53 | return -EADDRNOTAVAIL; |
53 | } | 54 | } |
54 | 55 | ||
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 56b3ed0927b0..561675589511 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -95,6 +95,12 @@ int kexec_should_crash(struct task_struct *p) | |||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | int kexec_crash_loaded(void) | ||
99 | { | ||
100 | return !!kexec_crash_image; | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(kexec_crash_loaded); | ||
103 | |||
98 | /* | 104 | /* |
99 | * When kexec transitions to the new kernel there is a one-to-one | 105 | * When kexec transitions to the new kernel there is a one-to-one |
100 | * mapping between physical and virtual addresses. On processors | 106 | * mapping between physical and virtual addresses. On processors |
@@ -140,6 +146,7 @@ int kexec_should_crash(struct task_struct *p) | |||
140 | * allocating pages whose destination address we do not care about. | 146 | * allocating pages whose destination address we do not care about. |
141 | */ | 147 | */ |
142 | #define KIMAGE_NO_DEST (-1UL) | 148 | #define KIMAGE_NO_DEST (-1UL) |
149 | #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) | ||
143 | 150 | ||
144 | static struct page *kimage_alloc_page(struct kimage *image, | 151 | static struct page *kimage_alloc_page(struct kimage *image, |
145 | gfp_t gfp_mask, | 152 | gfp_t gfp_mask, |
@@ -147,8 +154,9 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
147 | 154 | ||
148 | int sanity_check_segment_list(struct kimage *image) | 155 | int sanity_check_segment_list(struct kimage *image) |
149 | { | 156 | { |
150 | int result, i; | 157 | int i; |
151 | unsigned long nr_segments = image->nr_segments; | 158 | unsigned long nr_segments = image->nr_segments; |
159 | unsigned long total_pages = 0; | ||
152 | 160 | ||
153 | /* | 161 | /* |
154 | * Verify we have good destination addresses. The caller is | 162 | * Verify we have good destination addresses. The caller is |
@@ -163,16 +171,17 @@ int sanity_check_segment_list(struct kimage *image) | |||
163 | * simply because addresses are changed to page size | 171 | * simply because addresses are changed to page size |
164 | * granularity. | 172 | * granularity. |
165 | */ | 173 | */ |
166 | result = -EADDRNOTAVAIL; | ||
167 | for (i = 0; i < nr_segments; i++) { | 174 | for (i = 0; i < nr_segments; i++) { |
168 | unsigned long mstart, mend; | 175 | unsigned long mstart, mend; |
169 | 176 | ||
170 | mstart = image->segment[i].mem; | 177 | mstart = image->segment[i].mem; |
171 | mend = mstart + image->segment[i].memsz; | 178 | mend = mstart + image->segment[i].memsz; |
179 | if (mstart > mend) | ||
180 | return -EADDRNOTAVAIL; | ||
172 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | 181 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) |
173 | return result; | 182 | return -EADDRNOTAVAIL; |
174 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) | 183 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) |
175 | return result; | 184 | return -EADDRNOTAVAIL; |
176 | } | 185 | } |
177 | 186 | ||
178 | /* Verify our destination addresses do not overlap. | 187 | /* Verify our destination addresses do not overlap. |
@@ -180,7 +189,6 @@ int sanity_check_segment_list(struct kimage *image) | |||
180 | * through very weird things can happen with no | 189 | * through very weird things can happen with no |
181 | * easy explanation as one segment stops on another. | 190 | * easy explanation as one segment stops on another. |
182 | */ | 191 | */ |
183 | result = -EINVAL; | ||
184 | for (i = 0; i < nr_segments; i++) { | 192 | for (i = 0; i < nr_segments; i++) { |
185 | unsigned long mstart, mend; | 193 | unsigned long mstart, mend; |
186 | unsigned long j; | 194 | unsigned long j; |
@@ -194,7 +202,7 @@ int sanity_check_segment_list(struct kimage *image) | |||
194 | pend = pstart + image->segment[j].memsz; | 202 | pend = pstart + image->segment[j].memsz; |
195 | /* Do the segments overlap ? */ | 203 | /* Do the segments overlap ? */ |
196 | if ((mend > pstart) && (mstart < pend)) | 204 | if ((mend > pstart) && (mstart < pend)) |
197 | return result; | 205 | return -EINVAL; |
198 | } | 206 | } |
199 | } | 207 | } |
200 | 208 | ||
@@ -203,12 +211,26 @@ int sanity_check_segment_list(struct kimage *image) | |||
203 | * and it is easier to check up front than to be surprised | 211 | * and it is easier to check up front than to be surprised |
204 | * later on. | 212 | * later on. |
205 | */ | 213 | */ |
206 | result = -EINVAL; | ||
207 | for (i = 0; i < nr_segments; i++) { | 214 | for (i = 0; i < nr_segments; i++) { |
208 | if (image->segment[i].bufsz > image->segment[i].memsz) | 215 | if (image->segment[i].bufsz > image->segment[i].memsz) |
209 | return result; | 216 | return -EINVAL; |
217 | } | ||
218 | |||
219 | /* | ||
220 | * Verify that no more than half of memory will be consumed. If the | ||
221 | * request from userspace is too large, a large amount of time will be | ||
222 | * wasted allocating pages, which can cause a soft lockup. | ||
223 | */ | ||
224 | for (i = 0; i < nr_segments; i++) { | ||
225 | if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2) | ||
226 | return -EINVAL; | ||
227 | |||
228 | total_pages += PAGE_COUNT(image->segment[i].memsz); | ||
210 | } | 229 | } |
211 | 230 | ||
231 | if (total_pages > totalram_pages / 2) | ||
232 | return -EINVAL; | ||
233 | |||
212 | /* | 234 | /* |
213 | * Verify we have good destination addresses. Normally | 235 | * Verify we have good destination addresses. Normally |
214 | * the caller is responsible for making certain we don't | 236 | * the caller is responsible for making certain we don't |
@@ -220,16 +242,15 @@ int sanity_check_segment_list(struct kimage *image) | |||
220 | */ | 242 | */ |
221 | 243 | ||
222 | if (image->type == KEXEC_TYPE_CRASH) { | 244 | if (image->type == KEXEC_TYPE_CRASH) { |
223 | result = -EADDRNOTAVAIL; | ||
224 | for (i = 0; i < nr_segments; i++) { | 245 | for (i = 0; i < nr_segments; i++) { |
225 | unsigned long mstart, mend; | 246 | unsigned long mstart, mend; |
226 | 247 | ||
227 | mstart = image->segment[i].mem; | 248 | mstart = image->segment[i].mem; |
228 | mend = mstart + image->segment[i].memsz - 1; | 249 | mend = mstart + image->segment[i].memsz - 1; |
229 | /* Ensure we are within the crash kernel limits */ | 250 | /* Ensure we are within the crash kernel limits */ |
230 | if ((mstart < crashk_res.start) || | 251 | if ((mstart < phys_to_boot_phys(crashk_res.start)) || |
231 | (mend > crashk_res.end)) | 252 | (mend > phys_to_boot_phys(crashk_res.end))) |
232 | return result; | 253 | return -EADDRNOTAVAIL; |
233 | } | 254 | } |
234 | } | 255 | } |
235 | 256 | ||
@@ -352,7 +373,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |||
352 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); | 373 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); |
353 | if (!pages) | 374 | if (!pages) |
354 | break; | 375 | break; |
355 | pfn = page_to_pfn(pages); | 376 | pfn = page_to_boot_pfn(pages); |
356 | epfn = pfn + count; | 377 | epfn = pfn + count; |
357 | addr = pfn << PAGE_SHIFT; | 378 | addr = pfn << PAGE_SHIFT; |
358 | eaddr = epfn << PAGE_SHIFT; | 379 | eaddr = epfn << PAGE_SHIFT; |
@@ -478,7 +499,7 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | |||
478 | return -ENOMEM; | 499 | return -ENOMEM; |
479 | 500 | ||
480 | ind_page = page_address(page); | 501 | ind_page = page_address(page); |
481 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | 502 | *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; |
482 | image->entry = ind_page; | 503 | image->entry = ind_page; |
483 | image->last_entry = ind_page + | 504 | image->last_entry = ind_page + |
484 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | 505 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); |
@@ -533,13 +554,13 @@ void kimage_terminate(struct kimage *image) | |||
533 | #define for_each_kimage_entry(image, ptr, entry) \ | 554 | #define for_each_kimage_entry(image, ptr, entry) \ |
534 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | 555 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ |
535 | ptr = (entry & IND_INDIRECTION) ? \ | 556 | ptr = (entry & IND_INDIRECTION) ? \ |
536 | phys_to_virt((entry & PAGE_MASK)) : ptr + 1) | 557 | boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) |
537 | 558 | ||
538 | static void kimage_free_entry(kimage_entry_t entry) | 559 | static void kimage_free_entry(kimage_entry_t entry) |
539 | { | 560 | { |
540 | struct page *page; | 561 | struct page *page; |
541 | 562 | ||
542 | page = pfn_to_page(entry >> PAGE_SHIFT); | 563 | page = boot_pfn_to_page(entry >> PAGE_SHIFT); |
543 | kimage_free_pages(page); | 564 | kimage_free_pages(page); |
544 | } | 565 | } |
545 | 566 | ||
@@ -633,7 +654,7 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
633 | * have a match. | 654 | * have a match. |
634 | */ | 655 | */ |
635 | list_for_each_entry(page, &image->dest_pages, lru) { | 656 | list_for_each_entry(page, &image->dest_pages, lru) { |
636 | addr = page_to_pfn(page) << PAGE_SHIFT; | 657 | addr = page_to_boot_pfn(page) << PAGE_SHIFT; |
637 | if (addr == destination) { | 658 | if (addr == destination) { |
638 | list_del(&page->lru); | 659 | list_del(&page->lru); |
639 | return page; | 660 | return page; |
@@ -648,12 +669,12 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
648 | if (!page) | 669 | if (!page) |
649 | return NULL; | 670 | return NULL; |
650 | /* If the page cannot be used file it away */ | 671 | /* If the page cannot be used file it away */ |
651 | if (page_to_pfn(page) > | 672 | if (page_to_boot_pfn(page) > |
652 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | 673 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { |
653 | list_add(&page->lru, &image->unusable_pages); | 674 | list_add(&page->lru, &image->unusable_pages); |
654 | continue; | 675 | continue; |
655 | } | 676 | } |
656 | addr = page_to_pfn(page) << PAGE_SHIFT; | 677 | addr = page_to_boot_pfn(page) << PAGE_SHIFT; |
657 | 678 | ||
658 | /* If it is the destination page we want use it */ | 679 | /* If it is the destination page we want use it */ |
659 | if (addr == destination) | 680 | if (addr == destination) |
@@ -676,7 +697,7 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
676 | struct page *old_page; | 697 | struct page *old_page; |
677 | 698 | ||
678 | old_addr = *old & PAGE_MASK; | 699 | old_addr = *old & PAGE_MASK; |
679 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); | 700 | old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT); |
680 | copy_highpage(page, old_page); | 701 | copy_highpage(page, old_page); |
681 | *old = addr | (*old & ~PAGE_MASK); | 702 | *old = addr | (*old & ~PAGE_MASK); |
682 | 703 | ||
@@ -732,7 +753,7 @@ static int kimage_load_normal_segment(struct kimage *image, | |||
732 | result = -ENOMEM; | 753 | result = -ENOMEM; |
733 | goto out; | 754 | goto out; |
734 | } | 755 | } |
735 | result = kimage_add_page(image, page_to_pfn(page) | 756 | result = kimage_add_page(image, page_to_boot_pfn(page) |
736 | << PAGE_SHIFT); | 757 | << PAGE_SHIFT); |
737 | if (result < 0) | 758 | if (result < 0) |
738 | goto out; | 759 | goto out; |
@@ -793,7 +814,7 @@ static int kimage_load_crash_segment(struct kimage *image, | |||
793 | char *ptr; | 814 | char *ptr; |
794 | size_t uchunk, mchunk; | 815 | size_t uchunk, mchunk; |
795 | 816 | ||
796 | page = pfn_to_page(maddr >> PAGE_SHIFT); | 817 | page = boot_pfn_to_page(maddr >> PAGE_SHIFT); |
797 | if (!page) { | 818 | if (!page) { |
798 | result = -ENOMEM; | 819 | result = -ENOMEM; |
799 | goto out; | 820 | goto out; |
@@ -921,7 +942,7 @@ void __weak crash_free_reserved_phys_range(unsigned long begin, | |||
921 | unsigned long addr; | 942 | unsigned long addr; |
922 | 943 | ||
923 | for (addr = begin; addr < end; addr += PAGE_SIZE) | 944 | for (addr = begin; addr < end; addr += PAGE_SIZE) |
924 | free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); | 945 | free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); |
925 | } | 946 | } |
926 | 947 | ||
927 | int crash_shrink_memory(unsigned long new_size) | 948 | int crash_shrink_memory(unsigned long new_size) |
@@ -1374,7 +1395,7 @@ void vmcoreinfo_append_str(const char *fmt, ...) | |||
1374 | void __weak arch_crash_save_vmcoreinfo(void) | 1395 | void __weak arch_crash_save_vmcoreinfo(void) |
1375 | {} | 1396 | {} |
1376 | 1397 | ||
1377 | unsigned long __weak paddr_vmcoreinfo_note(void) | 1398 | phys_addr_t __weak paddr_vmcoreinfo_note(void) |
1378 | { | 1399 | { |
1379 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | 1400 | return __pa((unsigned long)(char *)&vmcoreinfo_note); |
1380 | } | 1401 | } |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 152da4a48867..ee1bc1bb8feb 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
@@ -101,7 +101,7 @@ KERNEL_ATTR_RO(kexec_loaded); | |||
101 | static ssize_t kexec_crash_loaded_show(struct kobject *kobj, | 101 | static ssize_t kexec_crash_loaded_show(struct kobject *kobj, |
102 | struct kobj_attribute *attr, char *buf) | 102 | struct kobj_attribute *attr, char *buf) |
103 | { | 103 | { |
104 | return sprintf(buf, "%d\n", !!kexec_crash_image); | 104 | return sprintf(buf, "%d\n", kexec_crash_loaded()); |
105 | } | 105 | } |
106 | KERNEL_ATTR_RO(kexec_crash_loaded); | 106 | KERNEL_ATTR_RO(kexec_crash_loaded); |
107 | 107 | ||
@@ -128,8 +128,8 @@ KERNEL_ATTR_RW(kexec_crash_size); | |||
128 | static ssize_t vmcoreinfo_show(struct kobject *kobj, | 128 | static ssize_t vmcoreinfo_show(struct kobject *kobj, |
129 | struct kobj_attribute *attr, char *buf) | 129 | struct kobj_attribute *attr, char *buf) |
130 | { | 130 | { |
131 | return sprintf(buf, "%lx %x\n", | 131 | phys_addr_t vmcore_base = paddr_vmcoreinfo_note(); |
132 | paddr_vmcoreinfo_note(), | 132 | return sprintf(buf, "%pa %x\n", &vmcore_base, |
133 | (unsigned int)sizeof(vmcoreinfo_note)); | 133 | (unsigned int)sizeof(vmcoreinfo_note)); |
134 | } | 134 | } |
135 | KERNEL_ATTR_RO(vmcoreinfo); | 135 | KERNEL_ATTR_RO(vmcoreinfo); |
diff --git a/kernel/module.c b/kernel/module.c index 5f71aa63ed2a..a0f48b8b00da 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/jump_label.h> | 60 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 61 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 62 | #include <linux/bsearch.h> |
63 | #include <linux/dynamic_debug.h> | ||
63 | #include <uapi/linux/module.h> | 64 | #include <uapi/linux/module.h> |
64 | #include "module-internal.h" | 65 | #include "module-internal.h" |
65 | 66 | ||
diff --git a/kernel/panic.c b/kernel/panic.c index 8aa74497cc5a..ca8cea1ef673 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -108,6 +108,7 @@ void panic(const char *fmt, ...) | |||
108 | long i, i_next = 0; | 108 | long i, i_next = 0; |
109 | int state = 0; | 109 | int state = 0; |
110 | int old_cpu, this_cpu; | 110 | int old_cpu, this_cpu; |
111 | bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; | ||
111 | 112 | ||
112 | /* | 113 | /* |
113 | * Disable local interrupts. This will prevent panic_smp_self_stop | 114 | * Disable local interrupts. This will prevent panic_smp_self_stop |
@@ -160,7 +161,7 @@ void panic(const char *fmt, ...) | |||
160 | * | 161 | * |
161 | * Bypass the panic_cpu check and call __crash_kexec directly. | 162 | * Bypass the panic_cpu check and call __crash_kexec directly. |
162 | */ | 163 | */ |
163 | if (!crash_kexec_post_notifiers) { | 164 | if (!_crash_kexec_post_notifiers) { |
164 | printk_nmi_flush_on_panic(); | 165 | printk_nmi_flush_on_panic(); |
165 | __crash_kexec(NULL); | 166 | __crash_kexec(NULL); |
166 | } | 167 | } |
@@ -191,7 +192,7 @@ void panic(const char *fmt, ...) | |||
191 | * | 192 | * |
192 | * Bypass the panic_cpu check and call __crash_kexec directly. | 193 | * Bypass the panic_cpu check and call __crash_kexec directly. |
193 | */ | 194 | */ |
194 | if (crash_kexec_post_notifiers) | 195 | if (_crash_kexec_post_notifiers) |
195 | __crash_kexec(NULL); | 196 | __crash_kexec(NULL); |
196 | 197 | ||
197 | bust_spinlocks(0); | 198 | bust_spinlocks(0); |
@@ -571,13 +572,7 @@ EXPORT_SYMBOL(__stack_chk_fail); | |||
571 | core_param(panic, panic_timeout, int, 0644); | 572 | core_param(panic, panic_timeout, int, 0644); |
572 | core_param(pause_on_oops, pause_on_oops, int, 0644); | 573 | core_param(pause_on_oops, pause_on_oops, int, 0644); |
573 | core_param(panic_on_warn, panic_on_warn, int, 0644); | 574 | core_param(panic_on_warn, panic_on_warn, int, 0644); |
574 | 575 | core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); | |
575 | static int __init setup_crash_kexec_post_notifiers(char *s) | ||
576 | { | ||
577 | crash_kexec_post_notifiers = true; | ||
578 | return 0; | ||
579 | } | ||
580 | early_param("crash_kexec_post_notifiers", setup_crash_kexec_post_notifiers); | ||
581 | 576 | ||
582 | static int __init oops_setup(char *s) | 577 | static int __init oops_setup(char *s) |
583 | { | 578 | { |
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 7fd2838fa417..5d4505f30083 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h | |||
@@ -16,9 +16,11 @@ | |||
16 | */ | 16 | */ |
17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
18 | 18 | ||
19 | typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); | 19 | typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt, |
20 | va_list args); | ||
20 | 21 | ||
21 | int __printf(1, 0) vprintk_default(const char *fmt, va_list args); | 22 | __printf(2, 0) |
23 | int vprintk_default(int level, const char *fmt, va_list args); | ||
22 | 24 | ||
23 | #ifdef CONFIG_PRINTK_NMI | 25 | #ifdef CONFIG_PRINTK_NMI |
24 | 26 | ||
@@ -31,9 +33,10 @@ extern raw_spinlock_t logbuf_lock; | |||
31 | * via per-CPU variable. | 33 | * via per-CPU variable. |
32 | */ | 34 | */ |
33 | DECLARE_PER_CPU(printk_func_t, printk_func); | 35 | DECLARE_PER_CPU(printk_func_t, printk_func); |
34 | static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args) | 36 | __printf(2, 0) |
37 | static inline int vprintk_func(int level, const char *fmt, va_list args) | ||
35 | { | 38 | { |
36 | return this_cpu_read(printk_func)(fmt, args); | 39 | return this_cpu_read(printk_func)(level, fmt, args); |
37 | } | 40 | } |
38 | 41 | ||
39 | extern atomic_t nmi_message_lost; | 42 | extern atomic_t nmi_message_lost; |
@@ -44,9 +47,10 @@ static inline int get_nmi_message_lost(void) | |||
44 | 47 | ||
45 | #else /* CONFIG_PRINTK_NMI */ | 48 | #else /* CONFIG_PRINTK_NMI */ |
46 | 49 | ||
47 | static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args) | 50 | __printf(2, 0) |
51 | static inline int vprintk_func(int level, const char *fmt, va_list args) | ||
48 | { | 52 | { |
49 | return vprintk_default(fmt, args); | 53 | return vprintk_default(level, fmt, args); |
50 | } | 54 | } |
51 | 55 | ||
52 | static inline int get_nmi_message_lost(void) | 56 | static inline int get_nmi_message_lost(void) |
diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c index b69eb8a2876f..bc3eeb1ae6da 100644 --- a/kernel/printk/nmi.c +++ b/kernel/printk/nmi.c | |||
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); | |||
58 | * one writer running. But the buffer might get flushed from another | 58 | * one writer running. But the buffer might get flushed from another |
59 | * CPU, so we need to be careful. | 59 | * CPU, so we need to be careful. |
60 | */ | 60 | */ |
61 | static int vprintk_nmi(const char *fmt, va_list args) | 61 | static int vprintk_nmi(int level, const char *fmt, va_list args) |
62 | { | 62 | { |
63 | struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | 63 | struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); |
64 | int add = 0; | 64 | int add = 0; |
@@ -79,7 +79,16 @@ again: | |||
79 | if (!len) | 79 | if (!len) |
80 | smp_rmb(); | 80 | smp_rmb(); |
81 | 81 | ||
82 | add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args); | 82 | if (level != LOGLEVEL_DEFAULT) { |
83 | add = snprintf(s->buffer + len, sizeof(s->buffer) - len, | ||
84 | KERN_SOH "%c", '0' + level); | ||
85 | add += vsnprintf(s->buffer + len + add, | ||
86 | sizeof(s->buffer) - len - add, | ||
87 | fmt, args); | ||
88 | } else { | ||
89 | add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, | ||
90 | fmt, args); | ||
91 | } | ||
83 | 92 | ||
84 | /* | 93 | /* |
85 | * Do it once again if the buffer has been flushed in the meantime. | 94 | * Do it once again if the buffer has been flushed in the meantime. |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index d4de33934dac..a5ef95ca18c9 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/nmi.h> | 26 | #include <linux/nmi.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/moduleparam.h> | 28 | #include <linux/moduleparam.h> |
29 | #include <linux/interrupt.h> /* For in_interrupt() */ | ||
30 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
31 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
32 | #include <linux/security.h> | 31 | #include <linux/security.h> |
@@ -48,7 +47,7 @@ | |||
48 | #include <linux/uio.h> | 47 | #include <linux/uio.h> |
49 | 48 | ||
50 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
51 | #include <asm-generic/sections.h> | 50 | #include <asm/sections.h> |
52 | 51 | ||
53 | #define CREATE_TRACE_POINTS | 52 | #define CREATE_TRACE_POINTS |
54 | #include <trace/events/printk.h> | 53 | #include <trace/events/printk.h> |
@@ -86,6 +85,111 @@ static struct lockdep_map console_lock_dep_map = { | |||
86 | }; | 85 | }; |
87 | #endif | 86 | #endif |
88 | 87 | ||
88 | enum devkmsg_log_bits { | ||
89 | __DEVKMSG_LOG_BIT_ON = 0, | ||
90 | __DEVKMSG_LOG_BIT_OFF, | ||
91 | __DEVKMSG_LOG_BIT_LOCK, | ||
92 | }; | ||
93 | |||
94 | enum devkmsg_log_masks { | ||
95 | DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), | ||
96 | DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), | ||
97 | DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), | ||
98 | }; | ||
99 | |||
100 | /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ | ||
101 | #define DEVKMSG_LOG_MASK_DEFAULT 0 | ||
102 | |||
103 | static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; | ||
104 | |||
105 | static int __control_devkmsg(char *str) | ||
106 | { | ||
107 | if (!str) | ||
108 | return -EINVAL; | ||
109 | |||
110 | if (!strncmp(str, "on", 2)) { | ||
111 | devkmsg_log = DEVKMSG_LOG_MASK_ON; | ||
112 | return 2; | ||
113 | } else if (!strncmp(str, "off", 3)) { | ||
114 | devkmsg_log = DEVKMSG_LOG_MASK_OFF; | ||
115 | return 3; | ||
116 | } else if (!strncmp(str, "ratelimit", 9)) { | ||
117 | devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; | ||
118 | return 9; | ||
119 | } | ||
120 | return -EINVAL; | ||
121 | } | ||
122 | |||
123 | static int __init control_devkmsg(char *str) | ||
124 | { | ||
125 | if (__control_devkmsg(str) < 0) | ||
126 | return 1; | ||
127 | |||
128 | /* | ||
129 | * Set sysctl string accordingly: | ||
130 | */ | ||
131 | if (devkmsg_log == DEVKMSG_LOG_MASK_ON) { | ||
132 | memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE); | ||
133 | strncpy(devkmsg_log_str, "on", 2); | ||
134 | } else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) { | ||
135 | memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE); | ||
136 | strncpy(devkmsg_log_str, "off", 3); | ||
137 | } | ||
138 | /* else "ratelimit" which is set by default. */ | ||
139 | |||
140 | /* | ||
141 | * Sysctl cannot change it anymore. The kernel command line setting of | ||
142 | * this parameter is to force the setting to be permanent throughout the | ||
143 | * runtime of the system. This is a precation measure against userspace | ||
144 | * trying to be a smarta** and attempting to change it up on us. | ||
145 | */ | ||
146 | devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | __setup("printk.devkmsg=", control_devkmsg); | ||
151 | |||
152 | char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; | ||
153 | |||
154 | int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, | ||
155 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
156 | { | ||
157 | char old_str[DEVKMSG_STR_MAX_SIZE]; | ||
158 | unsigned int old; | ||
159 | int err; | ||
160 | |||
161 | if (write) { | ||
162 | if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) | ||
163 | return -EINVAL; | ||
164 | |||
165 | old = devkmsg_log; | ||
166 | strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE); | ||
167 | } | ||
168 | |||
169 | err = proc_dostring(table, write, buffer, lenp, ppos); | ||
170 | if (err) | ||
171 | return err; | ||
172 | |||
173 | if (write) { | ||
174 | err = __control_devkmsg(devkmsg_log_str); | ||
175 | |||
176 | /* | ||
177 | * Do not accept an unknown string OR a known string with | ||
178 | * trailing crap... | ||
179 | */ | ||
180 | if (err < 0 || (err + 1 != *lenp)) { | ||
181 | |||
182 | /* ... and restore old setting. */ | ||
183 | devkmsg_log = old; | ||
184 | strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE); | ||
185 | |||
186 | return -EINVAL; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
89 | /* | 193 | /* |
90 | * Number of registered extended console drivers. | 194 | * Number of registered extended console drivers. |
91 | * | 195 | * |
@@ -614,6 +718,7 @@ struct devkmsg_user { | |||
614 | u64 seq; | 718 | u64 seq; |
615 | u32 idx; | 719 | u32 idx; |
616 | enum log_flags prev; | 720 | enum log_flags prev; |
721 | struct ratelimit_state rs; | ||
617 | struct mutex lock; | 722 | struct mutex lock; |
618 | char buf[CONSOLE_EXT_LOG_MAX]; | 723 | char buf[CONSOLE_EXT_LOG_MAX]; |
619 | }; | 724 | }; |
@@ -623,11 +728,24 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) | |||
623 | char *buf, *line; | 728 | char *buf, *line; |
624 | int level = default_message_loglevel; | 729 | int level = default_message_loglevel; |
625 | int facility = 1; /* LOG_USER */ | 730 | int facility = 1; /* LOG_USER */ |
731 | struct file *file = iocb->ki_filp; | ||
732 | struct devkmsg_user *user = file->private_data; | ||
626 | size_t len = iov_iter_count(from); | 733 | size_t len = iov_iter_count(from); |
627 | ssize_t ret = len; | 734 | ssize_t ret = len; |
628 | 735 | ||
629 | if (len > LOG_LINE_MAX) | 736 | if (!user || len > LOG_LINE_MAX) |
630 | return -EINVAL; | 737 | return -EINVAL; |
738 | |||
739 | /* Ignore when user logging is disabled. */ | ||
740 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) | ||
741 | return len; | ||
742 | |||
743 | /* Ratelimit when not explicitly enabled. */ | ||
744 | if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { | ||
745 | if (!___ratelimit(&user->rs, current->comm)) | ||
746 | return ret; | ||
747 | } | ||
748 | |||
631 | buf = kmalloc(len+1, GFP_KERNEL); | 749 | buf = kmalloc(len+1, GFP_KERNEL); |
632 | if (buf == NULL) | 750 | if (buf == NULL) |
633 | return -ENOMEM; | 751 | return -ENOMEM; |
@@ -800,19 +918,24 @@ static int devkmsg_open(struct inode *inode, struct file *file) | |||
800 | struct devkmsg_user *user; | 918 | struct devkmsg_user *user; |
801 | int err; | 919 | int err; |
802 | 920 | ||
803 | /* write-only does not need any file context */ | 921 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
804 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 922 | return -EPERM; |
805 | return 0; | ||
806 | 923 | ||
807 | err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, | 924 | /* write-only does not need any file context */ |
808 | SYSLOG_FROM_READER); | 925 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
809 | if (err) | 926 | err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, |
810 | return err; | 927 | SYSLOG_FROM_READER); |
928 | if (err) | ||
929 | return err; | ||
930 | } | ||
811 | 931 | ||
812 | user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); | 932 | user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); |
813 | if (!user) | 933 | if (!user) |
814 | return -ENOMEM; | 934 | return -ENOMEM; |
815 | 935 | ||
936 | ratelimit_default_init(&user->rs); | ||
937 | ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); | ||
938 | |||
816 | mutex_init(&user->lock); | 939 | mutex_init(&user->lock); |
817 | 940 | ||
818 | raw_spin_lock_irq(&logbuf_lock); | 941 | raw_spin_lock_irq(&logbuf_lock); |
@@ -831,6 +954,8 @@ static int devkmsg_release(struct inode *inode, struct file *file) | |||
831 | if (!user) | 954 | if (!user) |
832 | return 0; | 955 | return 0; |
833 | 956 | ||
957 | ratelimit_state_exit(&user->rs); | ||
958 | |||
834 | mutex_destroy(&user->lock); | 959 | mutex_destroy(&user->lock); |
835 | kfree(user); | 960 | kfree(user); |
836 | return 0; | 961 | return 0; |
@@ -986,6 +1111,11 @@ module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); | |||
986 | MODULE_PARM_DESC(ignore_loglevel, | 1111 | MODULE_PARM_DESC(ignore_loglevel, |
987 | "ignore loglevel setting (prints all kernel messages to the console)"); | 1112 | "ignore loglevel setting (prints all kernel messages to the console)"); |
988 | 1113 | ||
1114 | static bool suppress_message_printing(int level) | ||
1115 | { | ||
1116 | return (level >= console_loglevel && !ignore_loglevel); | ||
1117 | } | ||
1118 | |||
989 | #ifdef CONFIG_BOOT_PRINTK_DELAY | 1119 | #ifdef CONFIG_BOOT_PRINTK_DELAY |
990 | 1120 | ||
991 | static int boot_delay; /* msecs delay after each printk during bootup */ | 1121 | static int boot_delay; /* msecs delay after each printk during bootup */ |
@@ -1015,7 +1145,7 @@ static void boot_delay_msec(int level) | |||
1015 | unsigned long timeout; | 1145 | unsigned long timeout; |
1016 | 1146 | ||
1017 | if ((boot_delay == 0 || system_state != SYSTEM_BOOTING) | 1147 | if ((boot_delay == 0 || system_state != SYSTEM_BOOTING) |
1018 | || (level >= console_loglevel && !ignore_loglevel)) { | 1148 | || suppress_message_printing(level)) { |
1019 | return; | 1149 | return; |
1020 | } | 1150 | } |
1021 | 1151 | ||
@@ -1439,8 +1569,6 @@ static void call_console_drivers(int level, | |||
1439 | 1569 | ||
1440 | trace_console(text, len); | 1570 | trace_console(text, len); |
1441 | 1571 | ||
1442 | if (level >= console_loglevel && !ignore_loglevel) | ||
1443 | return; | ||
1444 | if (!console_drivers) | 1572 | if (!console_drivers) |
1445 | return; | 1573 | return; |
1446 | 1574 | ||
@@ -1802,7 +1930,28 @@ asmlinkage int printk_emit(int facility, int level, | |||
1802 | } | 1930 | } |
1803 | EXPORT_SYMBOL(printk_emit); | 1931 | EXPORT_SYMBOL(printk_emit); |
1804 | 1932 | ||
1805 | int vprintk_default(const char *fmt, va_list args) | 1933 | #ifdef CONFIG_PRINTK |
1934 | #define define_pr_level(func, loglevel) \ | ||
1935 | asmlinkage __visible void func(const char *fmt, ...) \ | ||
1936 | { \ | ||
1937 | va_list args; \ | ||
1938 | \ | ||
1939 | va_start(args, fmt); \ | ||
1940 | vprintk_default(loglevel, fmt, args); \ | ||
1941 | va_end(args); \ | ||
1942 | } \ | ||
1943 | EXPORT_SYMBOL(func) | ||
1944 | |||
1945 | define_pr_level(__pr_emerg, LOGLEVEL_EMERG); | ||
1946 | define_pr_level(__pr_alert, LOGLEVEL_ALERT); | ||
1947 | define_pr_level(__pr_crit, LOGLEVEL_CRIT); | ||
1948 | define_pr_level(__pr_err, LOGLEVEL_ERR); | ||
1949 | define_pr_level(__pr_warn, LOGLEVEL_WARNING); | ||
1950 | define_pr_level(__pr_notice, LOGLEVEL_NOTICE); | ||
1951 | define_pr_level(__pr_info, LOGLEVEL_INFO); | ||
1952 | #endif | ||
1953 | |||
1954 | int vprintk_default(int level, const char *fmt, va_list args) | ||
1806 | { | 1955 | { |
1807 | int r; | 1956 | int r; |
1808 | 1957 | ||
@@ -1812,7 +1961,7 @@ int vprintk_default(const char *fmt, va_list args) | |||
1812 | return r; | 1961 | return r; |
1813 | } | 1962 | } |
1814 | #endif | 1963 | #endif |
1815 | r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); | 1964 | r = vprintk_emit(0, level, NULL, 0, fmt, args); |
1816 | 1965 | ||
1817 | return r; | 1966 | return r; |
1818 | } | 1967 | } |
@@ -1845,7 +1994,7 @@ asmlinkage __visible int printk(const char *fmt, ...) | |||
1845 | int r; | 1994 | int r; |
1846 | 1995 | ||
1847 | va_start(args, fmt); | 1996 | va_start(args, fmt); |
1848 | r = vprintk_func(fmt, args); | 1997 | r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args); |
1849 | va_end(args); | 1998 | va_end(args); |
1850 | 1999 | ||
1851 | return r; | 2000 | return r; |
@@ -1888,6 +2037,7 @@ static void call_console_drivers(int level, | |||
1888 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, | 2037 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
1889 | bool syslog, char *buf, size_t size) { return 0; } | 2038 | bool syslog, char *buf, size_t size) { return 0; } |
1890 | static size_t cont_print_text(char *text, size_t size) { return 0; } | 2039 | static size_t cont_print_text(char *text, size_t size) { return 0; } |
2040 | static bool suppress_message_printing(int level) { return false; } | ||
1891 | 2041 | ||
1892 | /* Still needs to be defined for users */ | 2042 | /* Still needs to be defined for users */ |
1893 | DEFINE_PER_CPU(printk_func_t, printk_func); | 2043 | DEFINE_PER_CPU(printk_func_t, printk_func); |
@@ -2167,6 +2317,13 @@ static void console_cont_flush(char *text, size_t size) | |||
2167 | if (!cont.len) | 2317 | if (!cont.len) |
2168 | goto out; | 2318 | goto out; |
2169 | 2319 | ||
2320 | if (suppress_message_printing(cont.level)) { | ||
2321 | cont.cons = cont.len; | ||
2322 | if (cont.flushed) | ||
2323 | cont.len = 0; | ||
2324 | goto out; | ||
2325 | } | ||
2326 | |||
2170 | /* | 2327 | /* |
2171 | * We still queue earlier records, likely because the console was | 2328 | * We still queue earlier records, likely because the console was |
2172 | * busy. The earlier ones need to be printed before this one, we | 2329 | * busy. The earlier ones need to be printed before this one, we |
@@ -2270,10 +2427,13 @@ skip: | |||
2270 | break; | 2427 | break; |
2271 | 2428 | ||
2272 | msg = log_from_idx(console_idx); | 2429 | msg = log_from_idx(console_idx); |
2273 | if (msg->flags & LOG_NOCONS) { | 2430 | level = msg->level; |
2431 | if ((msg->flags & LOG_NOCONS) || | ||
2432 | suppress_message_printing(level)) { | ||
2274 | /* | 2433 | /* |
2275 | * Skip record we have buffered and already printed | 2434 | * Skip record we have buffered and already printed |
2276 | * directly to the console when we received it. | 2435 | * directly to the console when we received it, and |
2436 | * record that has level above the console loglevel. | ||
2277 | */ | 2437 | */ |
2278 | console_idx = log_next(console_idx); | 2438 | console_idx = log_next(console_idx); |
2279 | console_seq++; | 2439 | console_seq++; |
@@ -2287,7 +2447,6 @@ skip: | |||
2287 | goto skip; | 2447 | goto skip; |
2288 | } | 2448 | } |
2289 | 2449 | ||
2290 | level = msg->level; | ||
2291 | len += msg_print_text(msg, console_prev, false, | 2450 | len += msg_print_text(msg, console_prev, false, |
2292 | text + len, sizeof(text) - len); | 2451 | text + len, sizeof(text) - len); |
2293 | if (nr_ext_console_drivers) { | 2452 | if (nr_ext_console_drivers) { |
diff --git a/kernel/relay.c b/kernel/relay.c index 04d7cf3ef8cf..d797502140b9 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -451,6 +451,13 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) | |||
451 | if (!dentry) | 451 | if (!dentry) |
452 | goto free_buf; | 452 | goto free_buf; |
453 | relay_set_buf_dentry(buf, dentry); | 453 | relay_set_buf_dentry(buf, dentry); |
454 | } else { | ||
455 | /* Only retrieve global info, nothing more, nothing less */ | ||
456 | dentry = chan->cb->create_buf_file(NULL, NULL, | ||
457 | S_IRUSR, buf, | ||
458 | &chan->is_global); | ||
459 | if (WARN_ON(dentry)) | ||
460 | goto free_buf; | ||
454 | } | 461 | } |
455 | 462 | ||
456 | buf->cpu = cpu; | 463 | buf->cpu = cpu; |
@@ -562,6 +569,10 @@ static int relay_hotcpu_callback(struct notifier_block *nb, | |||
562 | * attributes specified. The created channel buffer files | 569 | * attributes specified. The created channel buffer files |
563 | * will be named base_filename0...base_filenameN-1. File | 570 | * will be named base_filename0...base_filenameN-1. File |
564 | * permissions will be %S_IRUSR. | 571 | * permissions will be %S_IRUSR. |
572 | * | ||
573 | * If opening a buffer (@parent = NULL) that you later wish to register | ||
574 | * in a filesystem, call relay_late_setup_files() once the @parent dentry | ||
575 | * is available. | ||
565 | */ | 576 | */ |
566 | struct rchan *relay_open(const char *base_filename, | 577 | struct rchan *relay_open(const char *base_filename, |
567 | struct dentry *parent, | 578 | struct dentry *parent, |
@@ -640,8 +651,12 @@ static void __relay_set_buf_dentry(void *info) | |||
640 | * | 651 | * |
641 | * Returns 0 if successful, non-zero otherwise. | 652 | * Returns 0 if successful, non-zero otherwise. |
642 | * | 653 | * |
643 | * Use to setup files for a previously buffer-only channel. | 654 | * Use to setup files for a previously buffer-only channel created |
644 | * Useful to do early tracing in kernel, before VFS is up, for example. | 655 | * by relay_open() with a NULL parent dentry. |
656 | * | ||
657 | * For example, this is useful for perfomring early tracing in kernel, | ||
658 | * before VFS is up and then exposing the early results once the dentry | ||
659 | * is available. | ||
645 | */ | 660 | */ |
646 | int relay_late_setup_files(struct rchan *chan, | 661 | int relay_late_setup_files(struct rchan *chan, |
647 | const char *base_filename, | 662 | const char *base_filename, |
@@ -666,6 +681,20 @@ int relay_late_setup_files(struct rchan *chan, | |||
666 | } | 681 | } |
667 | chan->has_base_filename = 1; | 682 | chan->has_base_filename = 1; |
668 | chan->parent = parent; | 683 | chan->parent = parent; |
684 | |||
685 | if (chan->is_global) { | ||
686 | err = -EINVAL; | ||
687 | if (!WARN_ON_ONCE(!chan->buf[0])) { | ||
688 | dentry = relay_create_buf_file(chan, chan->buf[0], 0); | ||
689 | if (dentry && !WARN_ON_ONCE(!chan->is_global)) { | ||
690 | relay_set_buf_dentry(chan->buf[0], dentry); | ||
691 | err = 0; | ||
692 | } | ||
693 | } | ||
694 | mutex_unlock(&relay_channels_mutex); | ||
695 | return err; | ||
696 | } | ||
697 | |||
669 | curr_cpu = get_cpu(); | 698 | curr_cpu = get_cpu(); |
670 | /* | 699 | /* |
671 | * The CPU hotplug notifier ran before us and created buffers with | 700 | * The CPU hotplug notifier ran before us and created buffers with |
@@ -706,6 +735,7 @@ int relay_late_setup_files(struct rchan *chan, | |||
706 | 735 | ||
707 | return err; | 736 | return err; |
708 | } | 737 | } |
738 | EXPORT_SYMBOL_GPL(relay_late_setup_files); | ||
709 | 739 | ||
710 | /** | 740 | /** |
711 | * relay_switch_subbuf - switch to a new sub-buffer | 741 | * relay_switch_subbuf - switch to a new sub-buffer |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 53954631a4e1..b43d0b27c1fe 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -814,6 +814,13 @@ static struct ctl_table kern_table[] = { | |||
814 | .extra2 = &ten_thousand, | 814 | .extra2 = &ten_thousand, |
815 | }, | 815 | }, |
816 | { | 816 | { |
817 | .procname = "printk_devkmsg", | ||
818 | .data = devkmsg_log_str, | ||
819 | .maxlen = DEVKMSG_STR_MAX_SIZE, | ||
820 | .mode = 0644, | ||
821 | .proc_handler = devkmsg_sysctl_set_loglvl, | ||
822 | }, | ||
823 | { | ||
817 | .procname = "dmesg_restrict", | 824 | .procname = "dmesg_restrict", |
818 | .data = &dmesg_restrict, | 825 | .data = &dmesg_restrict, |
819 | .maxlen = sizeof(int), | 826 | .maxlen = sizeof(int), |
diff --git a/kernel/task_work.c b/kernel/task_work.c index 6ab4842b00e8..d513051fcca2 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c | |||
@@ -29,7 +29,7 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify) | |||
29 | struct callback_head *head; | 29 | struct callback_head *head; |
30 | 30 | ||
31 | do { | 31 | do { |
32 | head = ACCESS_ONCE(task->task_works); | 32 | head = READ_ONCE(task->task_works); |
33 | if (unlikely(head == &work_exited)) | 33 | if (unlikely(head == &work_exited)) |
34 | return -ESRCH; | 34 | return -ESRCH; |
35 | work->next = head; | 35 | work->next = head; |
@@ -57,6 +57,9 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) | |||
57 | struct callback_head **pprev = &task->task_works; | 57 | struct callback_head **pprev = &task->task_works; |
58 | struct callback_head *work; | 58 | struct callback_head *work; |
59 | unsigned long flags; | 59 | unsigned long flags; |
60 | |||
61 | if (likely(!task->task_works)) | ||
62 | return NULL; | ||
60 | /* | 63 | /* |
61 | * If cmpxchg() fails we continue without updating pprev. | 64 | * If cmpxchg() fails we continue without updating pprev. |
62 | * Either we raced with task_work_add() which added the | 65 | * Either we raced with task_work_add() which added the |
@@ -64,8 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) | |||
64 | * we raced with task_work_run(), *pprev == NULL/exited. | 67 | * we raced with task_work_run(), *pprev == NULL/exited. |
65 | */ | 68 | */ |
66 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 69 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
67 | while ((work = ACCESS_ONCE(*pprev))) { | 70 | while ((work = lockless_dereference(*pprev))) { |
68 | smp_read_barrier_depends(); | ||
69 | if (work->func != func) | 71 | if (work->func != func) |
70 | pprev = &work->next; | 72 | pprev = &work->next; |
71 | else if (cmpxchg(pprev, work, work->next) == work) | 73 | else if (cmpxchg(pprev, work, work->next) == work) |
@@ -95,7 +97,7 @@ void task_work_run(void) | |||
95 | * work_exited unless the list is empty. | 97 | * work_exited unless the list is empty. |
96 | */ | 98 | */ |
97 | do { | 99 | do { |
98 | work = ACCESS_ONCE(task->task_works); | 100 | work = READ_ONCE(task->task_works); |
99 | head = !work && (task->flags & PF_EXITING) ? | 101 | head = !work && (task->flags & PF_EXITING) ? |
100 | &work_exited : NULL; | 102 | &work_exited : NULL; |
101 | } while (cmpxchg(&task->task_works, work, head) != work); | 103 | } while (cmpxchg(&task->task_works, work, head) != work); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index eb8917a71489..2307d7c89dac 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -721,6 +721,17 @@ config KCOV | |||
721 | 721 | ||
722 | For more details, see Documentation/kcov.txt. | 722 | For more details, see Documentation/kcov.txt. |
723 | 723 | ||
724 | config KCOV_INSTRUMENT_ALL | ||
725 | bool "Instrument all code by default" | ||
726 | depends on KCOV | ||
727 | default y if KCOV | ||
728 | help | ||
729 | If you are doing generic system call fuzzing (like e.g. syzkaller), | ||
730 | then you will want to instrument the whole kernel and you should | ||
731 | say y here. If you are doing more targeted fuzzing (like e.g. | ||
732 | filesystem fuzzing with AFL) then you will want to enable coverage | ||
733 | for more specific subsets of files, and should say n here. | ||
734 | |||
724 | config DEBUG_SHIRQ | 735 | config DEBUG_SHIRQ |
725 | bool "Debug shared IRQ handlers" | 736 | bool "Debug shared IRQ handlers" |
726 | depends on DEBUG_KERNEL | 737 | depends on DEBUG_KERNEL |
diff --git a/lib/crc32.c b/lib/crc32.c index 9a907d489d95..7fbd1a112b9d 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -979,7 +979,6 @@ static int __init crc32c_test(void) | |||
979 | int i; | 979 | int i; |
980 | int errors = 0; | 980 | int errors = 0; |
981 | int bytes = 0; | 981 | int bytes = 0; |
982 | struct timespec start, stop; | ||
983 | u64 nsec; | 982 | u64 nsec; |
984 | unsigned long flags; | 983 | unsigned long flags; |
985 | 984 | ||
@@ -999,20 +998,17 @@ static int __init crc32c_test(void) | |||
999 | local_irq_save(flags); | 998 | local_irq_save(flags); |
1000 | local_irq_disable(); | 999 | local_irq_disable(); |
1001 | 1000 | ||
1002 | getnstimeofday(&start); | 1001 | nsec = ktime_get_ns(); |
1003 | for (i = 0; i < 100; i++) { | 1002 | for (i = 0; i < 100; i++) { |
1004 | if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + | 1003 | if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + |
1005 | test[i].start, test[i].length)) | 1004 | test[i].start, test[i].length)) |
1006 | errors++; | 1005 | errors++; |
1007 | } | 1006 | } |
1008 | getnstimeofday(&stop); | 1007 | nsec = ktime_get_ns() - nsec; |
1009 | 1008 | ||
1010 | local_irq_restore(flags); | 1009 | local_irq_restore(flags); |
1011 | local_irq_enable(); | 1010 | local_irq_enable(); |
1012 | 1011 | ||
1013 | nsec = stop.tv_nsec - start.tv_nsec + | ||
1014 | 1000000000 * (stop.tv_sec - start.tv_sec); | ||
1015 | |||
1016 | pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); | 1012 | pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); |
1017 | 1013 | ||
1018 | if (errors) | 1014 | if (errors) |
@@ -1065,7 +1061,6 @@ static int __init crc32_test(void) | |||
1065 | int i; | 1061 | int i; |
1066 | int errors = 0; | 1062 | int errors = 0; |
1067 | int bytes = 0; | 1063 | int bytes = 0; |
1068 | struct timespec start, stop; | ||
1069 | u64 nsec; | 1064 | u64 nsec; |
1070 | unsigned long flags; | 1065 | unsigned long flags; |
1071 | 1066 | ||
@@ -1088,7 +1083,7 @@ static int __init crc32_test(void) | |||
1088 | local_irq_save(flags); | 1083 | local_irq_save(flags); |
1089 | local_irq_disable(); | 1084 | local_irq_disable(); |
1090 | 1085 | ||
1091 | getnstimeofday(&start); | 1086 | nsec = ktime_get_ns(); |
1092 | for (i = 0; i < 100; i++) { | 1087 | for (i = 0; i < 100; i++) { |
1093 | if (test[i].crc_le != crc32_le(test[i].crc, test_buf + | 1088 | if (test[i].crc_le != crc32_le(test[i].crc, test_buf + |
1094 | test[i].start, test[i].length)) | 1089 | test[i].start, test[i].length)) |
@@ -1098,14 +1093,11 @@ static int __init crc32_test(void) | |||
1098 | test[i].start, test[i].length)) | 1093 | test[i].start, test[i].length)) |
1099 | errors++; | 1094 | errors++; |
1100 | } | 1095 | } |
1101 | getnstimeofday(&stop); | 1096 | nsec = ktime_get_ns() - nsec; |
1102 | 1097 | ||
1103 | local_irq_restore(flags); | 1098 | local_irq_restore(flags); |
1104 | local_irq_enable(); | 1099 | local_irq_enable(); |
1105 | 1100 | ||
1106 | nsec = stop.tv_nsec - start.tv_nsec + | ||
1107 | 1000000000 * (stop.tv_sec - start.tv_sec); | ||
1108 | |||
1109 | pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", | 1101 | pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", |
1110 | CRC_LE_BITS, CRC_BE_BITS); | 1102 | CRC_LE_BITS, CRC_BE_BITS); |
1111 | 1103 | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index c27e269210c4..a816f3a80625 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -29,8 +29,7 @@ again: | |||
29 | index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); | 29 | index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
30 | if (index < size) { | 30 | if (index < size) { |
31 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { | 31 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { |
32 | /* we could do more effectively */ | 32 | start = ALIGN(shift + index, boundary_size) - shift; |
33 | start = index + 1; | ||
34 | goto again; | 33 | goto again; |
35 | } | 34 | } |
36 | bitmap_set(map, index, nr); | 35 | bitmap_set(map, index, nr); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 61b8fb529cef..1b7bf7314141 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
277 | 277 | ||
278 | /* | 278 | /* |
279 | * Even if the caller has preloaded, try to allocate from the | 279 | * Even if the caller has preloaded, try to allocate from the |
280 | * cache first for the new node to get accounted. | 280 | * cache first for the new node to get accounted to the memory |
281 | * cgroup. | ||
281 | */ | 282 | */ |
282 | ret = kmem_cache_alloc(radix_tree_node_cachep, | 283 | ret = kmem_cache_alloc(radix_tree_node_cachep, |
283 | gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); | 284 | gfp_mask | __GFP_NOWARN); |
284 | if (ret) | 285 | if (ret) |
285 | goto out; | 286 | goto out; |
286 | 287 | ||
@@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
303 | kmemleak_update_trace(ret); | 304 | kmemleak_update_trace(ret); |
304 | goto out; | 305 | goto out; |
305 | } | 306 | } |
306 | ret = kmem_cache_alloc(radix_tree_node_cachep, | 307 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
307 | gfp_mask | __GFP_ACCOUNT); | ||
308 | out: | 308 | out: |
309 | BUG_ON(radix_tree_is_internal_node(ret)); | 309 | BUG_ON(radix_tree_is_internal_node(ret)); |
310 | return ret; | 310 | return ret; |
@@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr) | |||
351 | struct radix_tree_node *node; | 351 | struct radix_tree_node *node; |
352 | int ret = -ENOMEM; | 352 | int ret = -ENOMEM; |
353 | 353 | ||
354 | /* | ||
355 | * Nodes preloaded by one cgroup can be be used by another cgroup, so | ||
356 | * they should never be accounted to any particular memory cgroup. | ||
357 | */ | ||
358 | gfp_mask &= ~__GFP_ACCOUNT; | ||
359 | |||
354 | preempt_disable(); | 360 | preempt_disable(); |
355 | rtp = this_cpu_ptr(&radix_tree_preloads); | 361 | rtp = this_cpu_ptr(&radix_tree_preloads); |
356 | while (rtp->nr < nr) { | 362 | while (rtp->nr < nr) { |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 2c5de86460c5..08f8043cac61 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -46,12 +46,14 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
46 | rs->begin = jiffies; | 46 | rs->begin = jiffies; |
47 | 47 | ||
48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
49 | if (rs->missed) | 49 | if (rs->missed) { |
50 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | 50 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { |
51 | func, rs->missed); | 51 | pr_warn("%s: %d callbacks suppressed\n", func, rs->missed); |
52 | rs->missed = 0; | ||
53 | } | ||
54 | } | ||
52 | rs->begin = jiffies; | 55 | rs->begin = jiffies; |
53 | rs->printed = 0; | 56 | rs->printed = 0; |
54 | rs->missed = 0; | ||
55 | } | 57 | } |
56 | if (rs->burst && rs->burst > rs->printed) { | 58 | if (rs->burst && rs->burst > rs->printed) { |
57 | rs->printed++; | 59 | rs->printed++; |
diff --git a/lib/ubsan.c b/lib/ubsan.c index 8799ae5e2e42..fb0409df1bcf 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c | |||
@@ -308,7 +308,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data, | |||
308 | return; | 308 | return; |
309 | 309 | ||
310 | ubsan_prologue(&data->location, &flags); | 310 | ubsan_prologue(&data->location, &flags); |
311 | pr_err("%s address %pk with insufficient space\n", | 311 | pr_err("%s address %p with insufficient space\n", |
312 | type_check_kinds[data->type_check_kind], | 312 | type_check_kinds[data->type_check_kind], |
313 | (void *) ptr); | 313 | (void *) ptr); |
314 | pr_err("for an object of type %s\n", data->type->type_name); | 314 | pr_err("for an object of type %s\n", data->type->type_name); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f904246a8fd5..ef968306fd5b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2216,6 +2216,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, | |||
2216 | * and reducing the surplus. | 2216 | * and reducing the surplus. |
2217 | */ | 2217 | */ |
2218 | spin_unlock(&hugetlb_lock); | 2218 | spin_unlock(&hugetlb_lock); |
2219 | |||
2220 | /* yield cpu to avoid soft lockup */ | ||
2221 | cond_resched(); | ||
2222 | |||
2219 | if (hstate_is_gigantic(h)) | 2223 | if (hstate_is_gigantic(h)) |
2220 | ret = alloc_fresh_gigantic_page(h, nodes_allowed); | 2224 | ret = alloc_fresh_gigantic_page(h, nodes_allowed); |
2221 | else | 2225 | else |
@@ -4306,7 +4310,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
4306 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 4310 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
4307 | } | 4311 | } |
4308 | } | 4312 | } |
4309 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | 4313 | BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); |
4310 | 4314 | ||
4311 | return pte; | 4315 | return pte; |
4312 | } | 4316 | } |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index b6f99e81bfeb..88af13c00d3c 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -442,11 +442,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |||
442 | kasan_poison_shadow(object, | 442 | kasan_poison_shadow(object, |
443 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), | 443 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), |
444 | KASAN_KMALLOC_REDZONE); | 444 | KASAN_KMALLOC_REDZONE); |
445 | if (cache->flags & SLAB_KASAN) { | ||
446 | struct kasan_alloc_meta *alloc_info = | ||
447 | get_alloc_info(cache, object); | ||
448 | alloc_info->state = KASAN_STATE_INIT; | ||
449 | } | ||
450 | } | 445 | } |
451 | 446 | ||
452 | static inline int in_irqentry_text(unsigned long ptr) | 447 | static inline int in_irqentry_text(unsigned long ptr) |
@@ -510,6 +505,17 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, | |||
510 | return (void *)object + cache->kasan_info.free_meta_offset; | 505 | return (void *)object + cache->kasan_info.free_meta_offset; |
511 | } | 506 | } |
512 | 507 | ||
508 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) | ||
509 | { | ||
510 | struct kasan_alloc_meta *alloc_info; | ||
511 | |||
512 | if (!(cache->flags & SLAB_KASAN)) | ||
513 | return; | ||
514 | |||
515 | alloc_info = get_alloc_info(cache, object); | ||
516 | __memset(alloc_info, 0, sizeof(*alloc_info)); | ||
517 | } | ||
518 | |||
513 | void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) | 519 | void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) |
514 | { | 520 | { |
515 | kasan_kmalloc(cache, object, cache->object_size, flags); | 521 | kasan_kmalloc(cache, object, cache->object_size, flags); |
@@ -529,34 +535,26 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) | |||
529 | 535 | ||
530 | bool kasan_slab_free(struct kmem_cache *cache, void *object) | 536 | bool kasan_slab_free(struct kmem_cache *cache, void *object) |
531 | { | 537 | { |
538 | s8 shadow_byte; | ||
539 | |||
532 | /* RCU slabs could be legally used after free within the RCU period */ | 540 | /* RCU slabs could be legally used after free within the RCU period */ |
533 | if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) | 541 | if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) |
534 | return false; | 542 | return false; |
535 | 543 | ||
536 | if (likely(cache->flags & SLAB_KASAN)) { | 544 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); |
537 | struct kasan_alloc_meta *alloc_info; | 545 | if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { |
538 | struct kasan_free_meta *free_info; | 546 | kasan_report_double_free(cache, object, shadow_byte); |
547 | return true; | ||
548 | } | ||
539 | 549 | ||
540 | alloc_info = get_alloc_info(cache, object); | 550 | kasan_poison_slab_free(cache, object); |
541 | free_info = get_free_info(cache, object); | ||
542 | 551 | ||
543 | switch (alloc_info->state) { | 552 | if (unlikely(!(cache->flags & SLAB_KASAN))) |
544 | case KASAN_STATE_ALLOC: | 553 | return false; |
545 | alloc_info->state = KASAN_STATE_QUARANTINE; | 554 | |
546 | quarantine_put(free_info, cache); | 555 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); |
547 | set_track(&free_info->track, GFP_NOWAIT); | 556 | quarantine_put(get_free_info(cache, object), cache); |
548 | kasan_poison_slab_free(cache, object); | 557 | return true; |
549 | return true; | ||
550 | case KASAN_STATE_QUARANTINE: | ||
551 | case KASAN_STATE_FREE: | ||
552 | pr_err("Double free"); | ||
553 | dump_stack(); | ||
554 | break; | ||
555 | default: | ||
556 | break; | ||
557 | } | ||
558 | } | ||
559 | return false; | ||
560 | } | 558 | } |
561 | 559 | ||
562 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, | 560 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, |
@@ -565,7 +563,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, | |||
565 | unsigned long redzone_start; | 563 | unsigned long redzone_start; |
566 | unsigned long redzone_end; | 564 | unsigned long redzone_end; |
567 | 565 | ||
568 | if (flags & __GFP_RECLAIM) | 566 | if (gfpflags_allow_blocking(flags)) |
569 | quarantine_reduce(); | 567 | quarantine_reduce(); |
570 | 568 | ||
571 | if (unlikely(object == NULL)) | 569 | if (unlikely(object == NULL)) |
@@ -579,14 +577,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, | |||
579 | kasan_unpoison_shadow(object, size); | 577 | kasan_unpoison_shadow(object, size); |
580 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | 578 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
581 | KASAN_KMALLOC_REDZONE); | 579 | KASAN_KMALLOC_REDZONE); |
582 | if (cache->flags & SLAB_KASAN) { | ||
583 | struct kasan_alloc_meta *alloc_info = | ||
584 | get_alloc_info(cache, object); | ||
585 | 580 | ||
586 | alloc_info->state = KASAN_STATE_ALLOC; | 581 | if (cache->flags & SLAB_KASAN) |
587 | alloc_info->alloc_size = size; | 582 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); |
588 | set_track(&alloc_info->track, flags); | ||
589 | } | ||
590 | } | 583 | } |
591 | EXPORT_SYMBOL(kasan_kmalloc); | 584 | EXPORT_SYMBOL(kasan_kmalloc); |
592 | 585 | ||
@@ -596,7 +589,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) | |||
596 | unsigned long redzone_start; | 589 | unsigned long redzone_start; |
597 | unsigned long redzone_end; | 590 | unsigned long redzone_end; |
598 | 591 | ||
599 | if (flags & __GFP_RECLAIM) | 592 | if (gfpflags_allow_blocking(flags)) |
600 | quarantine_reduce(); | 593 | quarantine_reduce(); |
601 | 594 | ||
602 | if (unlikely(ptr == NULL)) | 595 | if (unlikely(ptr == NULL)) |
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 31972cdba433..e5c2181fee6f 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h | |||
@@ -59,13 +59,6 @@ struct kasan_global { | |||
59 | * Structures to keep alloc and free tracks * | 59 | * Structures to keep alloc and free tracks * |
60 | */ | 60 | */ |
61 | 61 | ||
62 | enum kasan_state { | ||
63 | KASAN_STATE_INIT, | ||
64 | KASAN_STATE_ALLOC, | ||
65 | KASAN_STATE_QUARANTINE, | ||
66 | KASAN_STATE_FREE | ||
67 | }; | ||
68 | |||
69 | #define KASAN_STACK_DEPTH 64 | 62 | #define KASAN_STACK_DEPTH 64 |
70 | 63 | ||
71 | struct kasan_track { | 64 | struct kasan_track { |
@@ -74,9 +67,8 @@ struct kasan_track { | |||
74 | }; | 67 | }; |
75 | 68 | ||
76 | struct kasan_alloc_meta { | 69 | struct kasan_alloc_meta { |
77 | struct kasan_track track; | 70 | struct kasan_track alloc_track; |
78 | u32 state : 2; /* enum kasan_state */ | 71 | struct kasan_track free_track; |
79 | u32 alloc_size : 30; | ||
80 | }; | 72 | }; |
81 | 73 | ||
82 | struct qlist_node { | 74 | struct qlist_node { |
@@ -87,7 +79,6 @@ struct kasan_free_meta { | |||
87 | * Otherwise it might be used for the allocator freelist. | 79 | * Otherwise it might be used for the allocator freelist. |
88 | */ | 80 | */ |
89 | struct qlist_node quarantine_link; | 81 | struct qlist_node quarantine_link; |
90 | struct kasan_track track; | ||
91 | }; | 82 | }; |
92 | 83 | ||
93 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, | 84 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
@@ -108,6 +99,8 @@ static inline bool kasan_report_enabled(void) | |||
108 | 99 | ||
109 | void kasan_report(unsigned long addr, size_t size, | 100 | void kasan_report(unsigned long addr, size_t size, |
110 | bool is_write, unsigned long ip); | 101 | bool is_write, unsigned long ip); |
102 | void kasan_report_double_free(struct kmem_cache *cache, void *object, | ||
103 | s8 shadow); | ||
111 | 104 | ||
112 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) | 105 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) |
113 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); | 106 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 65793f150d1f..b6728a33a4ac 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
@@ -144,13 +144,15 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) | |||
144 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) | 144 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) |
145 | { | 145 | { |
146 | void *object = qlink_to_object(qlink, cache); | 146 | void *object = qlink_to_object(qlink, cache); |
147 | struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); | ||
148 | unsigned long flags; | 147 | unsigned long flags; |
149 | 148 | ||
150 | local_irq_save(flags); | 149 | if (IS_ENABLED(CONFIG_SLAB)) |
151 | alloc_info->state = KASAN_STATE_FREE; | 150 | local_irq_save(flags); |
151 | |||
152 | ___cache_free(cache, object, _THIS_IP_); | 152 | ___cache_free(cache, object, _THIS_IP_); |
153 | local_irq_restore(flags); | 153 | |
154 | if (IS_ENABLED(CONFIG_SLAB)) | ||
155 | local_irq_restore(flags); | ||
154 | } | 156 | } |
155 | 157 | ||
156 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) | 158 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) |
@@ -196,7 +198,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) | |||
196 | 198 | ||
197 | void quarantine_reduce(void) | 199 | void quarantine_reduce(void) |
198 | { | 200 | { |
199 | size_t new_quarantine_size; | 201 | size_t new_quarantine_size, percpu_quarantines; |
200 | unsigned long flags; | 202 | unsigned long flags; |
201 | struct qlist_head to_free = QLIST_INIT; | 203 | struct qlist_head to_free = QLIST_INIT; |
202 | size_t size_to_free = 0; | 204 | size_t size_to_free = 0; |
@@ -214,7 +216,12 @@ void quarantine_reduce(void) | |||
214 | */ | 216 | */ |
215 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / | 217 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / |
216 | QUARANTINE_FRACTION; | 218 | QUARANTINE_FRACTION; |
217 | new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus(); | 219 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
220 | if (WARN_ONCE(new_quarantine_size < percpu_quarantines, | ||
221 | "Too little memory, disabling global KASAN quarantine.\n")) | ||
222 | new_quarantine_size = 0; | ||
223 | else | ||
224 | new_quarantine_size -= percpu_quarantines; | ||
218 | WRITE_ONCE(quarantine_size, new_quarantine_size); | 225 | WRITE_ONCE(quarantine_size, new_quarantine_size); |
219 | 226 | ||
220 | last = global_quarantine.head; | 227 | last = global_quarantine.head; |
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 861b9776841a..24c1211fe9d5 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c | |||
@@ -116,6 +116,26 @@ static inline bool init_task_stack_addr(const void *addr) | |||
116 | sizeof(init_thread_union.stack)); | 116 | sizeof(init_thread_union.stack)); |
117 | } | 117 | } |
118 | 118 | ||
119 | static DEFINE_SPINLOCK(report_lock); | ||
120 | |||
121 | static void kasan_start_report(unsigned long *flags) | ||
122 | { | ||
123 | /* | ||
124 | * Make sure we don't end up in loop. | ||
125 | */ | ||
126 | kasan_disable_current(); | ||
127 | spin_lock_irqsave(&report_lock, *flags); | ||
128 | pr_err("==================================================================\n"); | ||
129 | } | ||
130 | |||
131 | static void kasan_end_report(unsigned long *flags) | ||
132 | { | ||
133 | pr_err("==================================================================\n"); | ||
134 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | ||
135 | spin_unlock_irqrestore(&report_lock, *flags); | ||
136 | kasan_enable_current(); | ||
137 | } | ||
138 | |||
119 | static void print_track(struct kasan_track *track) | 139 | static void print_track(struct kasan_track *track) |
120 | { | 140 | { |
121 | pr_err("PID = %u\n", track->pid); | 141 | pr_err("PID = %u\n", track->pid); |
@@ -129,37 +149,33 @@ static void print_track(struct kasan_track *track) | |||
129 | } | 149 | } |
130 | } | 150 | } |
131 | 151 | ||
132 | static void kasan_object_err(struct kmem_cache *cache, struct page *page, | 152 | static void kasan_object_err(struct kmem_cache *cache, void *object) |
133 | void *object, char *unused_reason) | ||
134 | { | 153 | { |
135 | struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); | 154 | struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); |
136 | struct kasan_free_meta *free_info; | ||
137 | 155 | ||
138 | dump_stack(); | 156 | dump_stack(); |
139 | pr_err("Object at %p, in cache %s\n", object, cache->name); | 157 | pr_err("Object at %p, in cache %s size: %d\n", object, cache->name, |
158 | cache->object_size); | ||
159 | |||
140 | if (!(cache->flags & SLAB_KASAN)) | 160 | if (!(cache->flags & SLAB_KASAN)) |
141 | return; | 161 | return; |
142 | switch (alloc_info->state) { | 162 | |
143 | case KASAN_STATE_INIT: | 163 | pr_err("Allocated:\n"); |
144 | pr_err("Object not allocated yet\n"); | 164 | print_track(&alloc_info->alloc_track); |
145 | break; | 165 | pr_err("Freed:\n"); |
146 | case KASAN_STATE_ALLOC: | 166 | print_track(&alloc_info->free_track); |
147 | pr_err("Object allocated with size %u bytes.\n", | 167 | } |
148 | alloc_info->alloc_size); | 168 | |
149 | pr_err("Allocation:\n"); | 169 | void kasan_report_double_free(struct kmem_cache *cache, void *object, |
150 | print_track(&alloc_info->track); | 170 | s8 shadow) |
151 | break; | 171 | { |
152 | case KASAN_STATE_FREE: | 172 | unsigned long flags; |
153 | case KASAN_STATE_QUARANTINE: | 173 | |
154 | pr_err("Object freed, allocated with size %u bytes\n", | 174 | kasan_start_report(&flags); |
155 | alloc_info->alloc_size); | 175 | pr_err("BUG: Double free or freeing an invalid pointer\n"); |
156 | free_info = get_free_info(cache, object); | 176 | pr_err("Unexpected shadow byte: 0x%hhX\n", shadow); |
157 | pr_err("Allocation:\n"); | 177 | kasan_object_err(cache, object); |
158 | print_track(&alloc_info->track); | 178 | kasan_end_report(&flags); |
159 | pr_err("Deallocation:\n"); | ||
160 | print_track(&free_info->track); | ||
161 | break; | ||
162 | } | ||
163 | } | 179 | } |
164 | 180 | ||
165 | static void print_address_description(struct kasan_access_info *info) | 181 | static void print_address_description(struct kasan_access_info *info) |
@@ -175,8 +191,7 @@ static void print_address_description(struct kasan_access_info *info) | |||
175 | struct kmem_cache *cache = page->slab_cache; | 191 | struct kmem_cache *cache = page->slab_cache; |
176 | object = nearest_obj(cache, page, | 192 | object = nearest_obj(cache, page, |
177 | (void *)info->access_addr); | 193 | (void *)info->access_addr); |
178 | kasan_object_err(cache, page, object, | 194 | kasan_object_err(cache, object); |
179 | "kasan: bad access detected"); | ||
180 | return; | 195 | return; |
181 | } | 196 | } |
182 | dump_page(page, "kasan: bad access detected"); | 197 | dump_page(page, "kasan: bad access detected"); |
@@ -241,19 +256,13 @@ static void print_shadow_for_address(const void *addr) | |||
241 | } | 256 | } |
242 | } | 257 | } |
243 | 258 | ||
244 | static DEFINE_SPINLOCK(report_lock); | ||
245 | |||
246 | static void kasan_report_error(struct kasan_access_info *info) | 259 | static void kasan_report_error(struct kasan_access_info *info) |
247 | { | 260 | { |
248 | unsigned long flags; | 261 | unsigned long flags; |
249 | const char *bug_type; | 262 | const char *bug_type; |
250 | 263 | ||
251 | /* | 264 | kasan_start_report(&flags); |
252 | * Make sure we don't end up in loop. | 265 | |
253 | */ | ||
254 | kasan_disable_current(); | ||
255 | spin_lock_irqsave(&report_lock, flags); | ||
256 | pr_err("==================================================================\n"); | ||
257 | if (info->access_addr < | 266 | if (info->access_addr < |
258 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { | 267 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { |
259 | if ((unsigned long)info->access_addr < PAGE_SIZE) | 268 | if ((unsigned long)info->access_addr < PAGE_SIZE) |
@@ -274,10 +283,8 @@ static void kasan_report_error(struct kasan_access_info *info) | |||
274 | print_address_description(info); | 283 | print_address_description(info); |
275 | print_shadow_for_address(info->first_bad_addr); | 284 | print_shadow_for_address(info->first_bad_addr); |
276 | } | 285 | } |
277 | pr_err("==================================================================\n"); | 286 | |
278 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | 287 | kasan_end_report(&flags); |
279 | spin_unlock_irqrestore(&report_lock, flags); | ||
280 | kasan_enable_current(); | ||
281 | } | 288 | } |
282 | 289 | ||
283 | void kasan_report(unsigned long addr, size_t size, | 290 | void kasan_report(unsigned long addr, size_t size, |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c265212bec8c..66beca1ad92f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2559,6 +2559,15 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |||
2559 | return 0; | 2559 | return 0; |
2560 | 2560 | ||
2561 | mctz = soft_limit_tree_node(pgdat->node_id); | 2561 | mctz = soft_limit_tree_node(pgdat->node_id); |
2562 | |||
2563 | /* | ||
2564 | * Do not even bother to check the largest node if the root | ||
2565 | * is empty. Do it lockless to prevent lock bouncing. Races | ||
2566 | * are acceptable as soft limit is best effort anyway. | ||
2567 | */ | ||
2568 | if (RB_EMPTY_ROOT(&mctz->rb_root)) | ||
2569 | return 0; | ||
2570 | |||
2562 | /* | 2571 | /* |
2563 | * This loop can run a while, specially if mem_cgroup's continuously | 2572 | * This loop can run a while, specially if mem_cgroup's continuously |
2564 | * keep exceeding their soft limit and putting the system under | 2573 | * keep exceeding their soft limit and putting the system under |
diff --git a/mm/memory.c b/mm/memory.c index 4425b6059339..83be99d9d8a1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2642,6 +2642,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) | |||
2642 | if (page == swapcache) { | 2642 | if (page == swapcache) { |
2643 | do_page_add_anon_rmap(page, vma, fe->address, exclusive); | 2643 | do_page_add_anon_rmap(page, vma, fe->address, exclusive); |
2644 | mem_cgroup_commit_charge(page, memcg, true, false); | 2644 | mem_cgroup_commit_charge(page, memcg, true, false); |
2645 | activate_page(page); | ||
2645 | } else { /* ksm created a completely new copy */ | 2646 | } else { /* ksm created a completely new copy */ |
2646 | page_add_new_anon_rmap(page, vma, fe->address, false); | 2647 | page_add_new_anon_rmap(page, vma, fe->address, false); |
2647 | mem_cgroup_commit_charge(page, memcg, false, false); | 2648 | mem_cgroup_commit_charge(page, memcg, false, false); |
@@ -3133,6 +3134,8 @@ static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) | |||
3133 | 3134 | ||
3134 | if (pmd_none(*fe->pmd)) { | 3135 | if (pmd_none(*fe->pmd)) { |
3135 | fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address); | 3136 | fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address); |
3137 | if (!fe->prealloc_pte) | ||
3138 | goto out; | ||
3136 | smp_wmb(); /* See comment in __pte_alloc() */ | 3139 | smp_wmb(); /* See comment in __pte_alloc() */ |
3137 | } | 3140 | } |
3138 | 3141 | ||
@@ -2653,16 +2653,18 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
2653 | * anonymous maps. eventually we may be able to do some | 2653 | * anonymous maps. eventually we may be able to do some |
2654 | * brk-specific accounting here. | 2654 | * brk-specific accounting here. |
2655 | */ | 2655 | */ |
2656 | static int do_brk(unsigned long addr, unsigned long len) | 2656 | static int do_brk(unsigned long addr, unsigned long request) |
2657 | { | 2657 | { |
2658 | struct mm_struct *mm = current->mm; | 2658 | struct mm_struct *mm = current->mm; |
2659 | struct vm_area_struct *vma, *prev; | 2659 | struct vm_area_struct *vma, *prev; |
2660 | unsigned long flags; | 2660 | unsigned long flags, len; |
2661 | struct rb_node **rb_link, *rb_parent; | 2661 | struct rb_node **rb_link, *rb_parent; |
2662 | pgoff_t pgoff = addr >> PAGE_SHIFT; | 2662 | pgoff_t pgoff = addr >> PAGE_SHIFT; |
2663 | int error; | 2663 | int error; |
2664 | 2664 | ||
2665 | len = PAGE_ALIGN(len); | 2665 | len = PAGE_ALIGN(request); |
2666 | if (len < request) | ||
2667 | return -ENOMEM; | ||
2666 | if (!len) | 2668 | if (!len) |
2667 | return 0; | 2669 | return 0; |
2668 | 2670 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ea759b935360..39a372a2a1d6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5276,7 +5276,7 @@ void __init setup_per_cpu_pageset(void) | |||
5276 | setup_zone_pageset(zone); | 5276 | setup_zone_pageset(zone); |
5277 | } | 5277 | } |
5278 | 5278 | ||
5279 | static noinline __init_refok | 5279 | static noinline __ref |
5280 | int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | 5280 | int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) |
5281 | { | 5281 | { |
5282 | int i; | 5282 | int i; |
@@ -5903,7 +5903,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
5903 | } | 5903 | } |
5904 | } | 5904 | } |
5905 | 5905 | ||
5906 | static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | 5906 | static void __ref alloc_node_mem_map(struct pglist_data *pgdat) |
5907 | { | 5907 | { |
5908 | unsigned long __maybe_unused start = 0; | 5908 | unsigned long __maybe_unused start = 0; |
5909 | unsigned long __maybe_unused offset = 0; | 5909 | unsigned long __maybe_unused offset = 0; |
@@ -1877,7 +1877,7 @@ static struct array_cache __percpu *alloc_kmem_cache_cpus( | |||
1877 | return cpu_cache; | 1877 | return cpu_cache; |
1878 | } | 1878 | } |
1879 | 1879 | ||
1880 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | 1880 | static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
1881 | { | 1881 | { |
1882 | if (slab_state >= FULL) | 1882 | if (slab_state >= FULL) |
1883 | return enable_cpucache(cachep, gfp); | 1883 | return enable_cpucache(cachep, gfp); |
@@ -2604,9 +2604,11 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2604 | } | 2604 | } |
2605 | 2605 | ||
2606 | for (i = 0; i < cachep->num; i++) { | 2606 | for (i = 0; i < cachep->num; i++) { |
2607 | objp = index_to_obj(cachep, page, i); | ||
2608 | kasan_init_slab_obj(cachep, objp); | ||
2609 | |||
2607 | /* constructor could break poison info */ | 2610 | /* constructor could break poison info */ |
2608 | if (DEBUG == 0 && cachep->ctor) { | 2611 | if (DEBUG == 0 && cachep->ctor) { |
2609 | objp = index_to_obj(cachep, page, i); | ||
2610 | kasan_unpoison_object_data(cachep, objp); | 2612 | kasan_unpoison_object_data(cachep, objp); |
2611 | cachep->ctor(objp); | 2613 | cachep->ctor(objp); |
2612 | kasan_poison_object_data(cachep, objp); | 2614 | kasan_poison_object_data(cachep, objp); |
@@ -1384,6 +1384,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, | |||
1384 | void *object) | 1384 | void *object) |
1385 | { | 1385 | { |
1386 | setup_object_debug(s, page, object); | 1386 | setup_object_debug(s, page, object); |
1387 | kasan_init_slab_obj(s, object); | ||
1387 | if (unlikely(s->ctor)) { | 1388 | if (unlikely(s->ctor)) { |
1388 | kasan_unpoison_object_data(s, object); | 1389 | kasan_unpoison_object_data(s, object); |
1389 | s->ctor(object); | 1390 | s->ctor(object); |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 68885dcbaf40..574c67b663fe 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -36,7 +36,7 @@ | |||
36 | * Uses the main allocators if they are available, else bootmem. | 36 | * Uses the main allocators if they are available, else bootmem. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | static void * __init_refok __earlyonly_bootmem_alloc(int node, | 39 | static void * __ref __earlyonly_bootmem_alloc(int node, |
40 | unsigned long size, | 40 | unsigned long size, |
41 | unsigned long align, | 41 | unsigned long align, |
42 | unsigned long goal) | 42 | unsigned long goal) |
diff --git a/mm/sparse.c b/mm/sparse.c index 36d7bbb80e49..1e168bf2779a 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -59,7 +59,7 @@ static inline void set_section_nid(unsigned long section_nr, int nid) | |||
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #ifdef CONFIG_SPARSEMEM_EXTREME | 61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
62 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | 62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
63 | { | 63 | { |
64 | struct mem_section *section = NULL; | 64 | struct mem_section *section = NULL; |
65 | unsigned long array_size = SECTIONS_PER_ROOT * | 65 | unsigned long array_size = SECTIONS_PER_ROOT * |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 650d26832569..374d95d04178 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2561,7 +2561,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) | |||
2561 | shrink_node_memcg(pgdat, memcg, sc, &lru_pages); | 2561 | shrink_node_memcg(pgdat, memcg, sc, &lru_pages); |
2562 | node_lru_pages += lru_pages; | 2562 | node_lru_pages += lru_pages; |
2563 | 2563 | ||
2564 | if (!global_reclaim(sc)) | 2564 | if (memcg) |
2565 | shrink_slab(sc->gfp_mask, pgdat->node_id, | 2565 | shrink_slab(sc->gfp_mask, pgdat->node_id, |
2566 | memcg, sc->nr_scanned - scanned, | 2566 | memcg, sc->nr_scanned - scanned, |
2567 | lru_pages); | 2567 | lru_pages); |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index e89c214745eb..0a07f9014944 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -138,7 +138,7 @@ endif | |||
138 | 138 | ||
139 | ifeq ($(CONFIG_KCOV),y) | 139 | ifeq ($(CONFIG_KCOV),y) |
140 | _c_flags += $(if $(patsubst n%,, \ | 140 | _c_flags += $(if $(patsubst n%,, \ |
141 | $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)y), \ | 141 | $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)$(CONFIG_KCOV_INSTRUMENT_ALL)), \ |
142 | $(CFLAGS_KCOV)) | 142 | $(CFLAGS_KCOV)) |
143 | endif | 143 | endif |
144 | 144 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 24a08363995a..4de3cc42fc50 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -55,6 +55,7 @@ my $spelling_file = "$D/spelling.txt"; | |||
55 | my $codespell = 0; | 55 | my $codespell = 0; |
56 | my $codespellfile = "/usr/share/codespell/dictionary.txt"; | 56 | my $codespellfile = "/usr/share/codespell/dictionary.txt"; |
57 | my $color = 1; | 57 | my $color = 1; |
58 | my $allow_c99_comments = 1; | ||
58 | 59 | ||
59 | sub help { | 60 | sub help { |
60 | my ($exitcode) = @_; | 61 | my ($exitcode) = @_; |
@@ -227,9 +228,9 @@ if ($^V && $^V lt $minimum_perl_version) { | |||
227 | } | 228 | } |
228 | } | 229 | } |
229 | 230 | ||
231 | #if no filenames are given, push '-' to read patch from stdin | ||
230 | if ($#ARGV < 0) { | 232 | if ($#ARGV < 0) { |
231 | print "$P: no input files\n"; | 233 | push(@ARGV, '-'); |
232 | exit(1); | ||
233 | } | 234 | } |
234 | 235 | ||
235 | sub hash_save_array_words { | 236 | sub hash_save_array_words { |
@@ -1144,6 +1145,11 @@ sub sanitise_line { | |||
1144 | $res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@; | 1145 | $res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@; |
1145 | } | 1146 | } |
1146 | 1147 | ||
1148 | if ($allow_c99_comments && $res =~ m@(//.*$)@) { | ||
1149 | my $match = $1; | ||
1150 | $res =~ s/\Q$match\E/"$;" x length($match)/e; | ||
1151 | } | ||
1152 | |||
1147 | return $res; | 1153 | return $res; |
1148 | } | 1154 | } |
1149 | 1155 | ||
@@ -2063,6 +2069,7 @@ sub process { | |||
2063 | my $is_patch = 0; | 2069 | my $is_patch = 0; |
2064 | my $in_header_lines = $file ? 0 : 1; | 2070 | my $in_header_lines = $file ? 0 : 1; |
2065 | my $in_commit_log = 0; #Scanning lines before patch | 2071 | my $in_commit_log = 0; #Scanning lines before patch |
2072 | my $has_commit_log = 0; #Encountered lines before patch | ||
2066 | my $commit_log_possible_stack_dump = 0; | 2073 | my $commit_log_possible_stack_dump = 0; |
2067 | my $commit_log_long_line = 0; | 2074 | my $commit_log_long_line = 0; |
2068 | my $commit_log_has_diff = 0; | 2075 | my $commit_log_has_diff = 0; |
@@ -2453,9 +2460,9 @@ sub process { | |||
2453 | 2460 | ||
2454 | # Check for git id commit length and improperly formed commit descriptions | 2461 | # Check for git id commit length and improperly formed commit descriptions |
2455 | if ($in_commit_log && !$commit_log_possible_stack_dump && | 2462 | if ($in_commit_log && !$commit_log_possible_stack_dump && |
2456 | $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i && | 2463 | $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i && |
2457 | ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || | 2464 | ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || |
2458 | ($line =~ /\b[0-9a-f]{12,40}\b/i && | 2465 | ($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i && |
2459 | $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && | 2466 | $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && |
2460 | $line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) { | 2467 | $line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) { |
2461 | my $init_char = "c"; | 2468 | my $init_char = "c"; |
@@ -2560,6 +2567,7 @@ sub process { | |||
2560 | $rawline =~ /^(commit\b|from\b|[\w-]+:).*$/i)) { | 2567 | $rawline =~ /^(commit\b|from\b|[\w-]+:).*$/i)) { |
2561 | $in_header_lines = 0; | 2568 | $in_header_lines = 0; |
2562 | $in_commit_log = 1; | 2569 | $in_commit_log = 1; |
2570 | $has_commit_log = 1; | ||
2563 | } | 2571 | } |
2564 | 2572 | ||
2565 | # Check if there is UTF-8 in a commit log when a mail header has explicitly | 2573 | # Check if there is UTF-8 in a commit log when a mail header has explicitly |
@@ -2763,6 +2771,10 @@ sub process { | |||
2763 | $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) { | 2771 | $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) { |
2764 | $msg_type = ""; | 2772 | $msg_type = ""; |
2765 | 2773 | ||
2774 | # EFI_GUID is another special case | ||
2775 | } elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/) { | ||
2776 | $msg_type = ""; | ||
2777 | |||
2766 | # Otherwise set the alternate message types | 2778 | # Otherwise set the alternate message types |
2767 | 2779 | ||
2768 | # a comment starts before $max_line_length | 2780 | # a comment starts before $max_line_length |
@@ -3337,7 +3349,7 @@ sub process { | |||
3337 | next if ($line =~ /^[^\+]/); | 3349 | next if ($line =~ /^[^\+]/); |
3338 | 3350 | ||
3339 | # check for declarations of signed or unsigned without int | 3351 | # check for declarations of signed or unsigned without int |
3340 | while ($line =~ m{($Declare)\s*(?!char\b|short\b|int\b|long\b)\s*($Ident)?\s*[=,;\[\)\(]}g) { | 3352 | while ($line =~ m{\b($Declare)\s*(?!char\b|short\b|int\b|long\b)\s*($Ident)?\s*[=,;\[\)\(]}g) { |
3341 | my $type = $1; | 3353 | my $type = $1; |
3342 | my $var = $2; | 3354 | my $var = $2; |
3343 | $var = "" if (!defined $var); | 3355 | $var = "" if (!defined $var); |
@@ -5722,8 +5734,9 @@ sub process { | |||
5722 | } | 5734 | } |
5723 | } | 5735 | } |
5724 | 5736 | ||
5725 | # check for #defines like: 1 << <digit> that could be BIT(digit) | 5737 | # check for #defines like: 1 << <digit> that could be BIT(digit), it is not exported to uapi |
5726 | if ($line =~ /#\s*define\s+\w+\s+\(?\s*1\s*([ulUL]*)\s*\<\<\s*(?:\d+|$Ident)\s*\)?/) { | 5738 | if ($realfile !~ m@^include/uapi/@ && |
5739 | $line =~ /#\s*define\s+\w+\s+\(?\s*1\s*([ulUL]*)\s*\<\<\s*(?:\d+|$Ident)\s*\)?/) { | ||
5727 | my $ull = ""; | 5740 | my $ull = ""; |
5728 | $ull = "_ULL" if (defined($1) && $1 =~ /ll/i); | 5741 | $ull = "_ULL" if (defined($1) && $1 =~ /ll/i); |
5729 | if (CHK("BIT_MACRO", | 5742 | if (CHK("BIT_MACRO", |
@@ -6044,7 +6057,7 @@ sub process { | |||
6044 | ERROR("NOT_UNIFIED_DIFF", | 6057 | ERROR("NOT_UNIFIED_DIFF", |
6045 | "Does not appear to be a unified-diff format patch\n"); | 6058 | "Does not appear to be a unified-diff format patch\n"); |
6046 | } | 6059 | } |
6047 | if ($is_patch && $filename ne '-' && $chk_signoff && $signoff == 0) { | 6060 | if ($is_patch && $has_commit_log && $chk_signoff && $signoff == 0) { |
6048 | ERROR("MISSING_SIGN_OFF", | 6061 | ERROR("MISSING_SIGN_OFF", |
6049 | "Missing Signed-off-by: line(s)\n"); | 6062 | "Missing Signed-off-by: line(s)\n"); |
6050 | } | 6063 | } |
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 1873421f2305..122fcdaf42c8 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -133,6 +133,7 @@ my %VCS_cmds_git = ( | |||
133 | "author_pattern" => "^GitAuthor: (.*)", | 133 | "author_pattern" => "^GitAuthor: (.*)", |
134 | "subject_pattern" => "^GitSubject: (.*)", | 134 | "subject_pattern" => "^GitSubject: (.*)", |
135 | "stat_pattern" => "^(\\d+)\\t(\\d+)\\t\$file\$", | 135 | "stat_pattern" => "^(\\d+)\\t(\\d+)\\t\$file\$", |
136 | "file_exists_cmd" => "git ls-files \$file", | ||
136 | ); | 137 | ); |
137 | 138 | ||
138 | my %VCS_cmds_hg = ( | 139 | my %VCS_cmds_hg = ( |
@@ -161,6 +162,7 @@ my %VCS_cmds_hg = ( | |||
161 | "author_pattern" => "^HgAuthor: (.*)", | 162 | "author_pattern" => "^HgAuthor: (.*)", |
162 | "subject_pattern" => "^HgSubject: (.*)", | 163 | "subject_pattern" => "^HgSubject: (.*)", |
163 | "stat_pattern" => "^(\\d+)\t(\\d+)\t\$file\$", | 164 | "stat_pattern" => "^(\\d+)\t(\\d+)\t\$file\$", |
165 | "file_exists_cmd" => "hg files \$file", | ||
164 | ); | 166 | ); |
165 | 167 | ||
166 | my $conf = which_conf(".get_maintainer.conf"); | 168 | my $conf = which_conf(".get_maintainer.conf"); |
@@ -430,7 +432,7 @@ foreach my $file (@ARGV) { | |||
430 | die "$P: file '${file}' not found\n"; | 432 | die "$P: file '${file}' not found\n"; |
431 | } | 433 | } |
432 | } | 434 | } |
433 | if ($from_filename) { | 435 | if ($from_filename || vcs_file_exists($file)) { |
434 | $file =~ s/^\Q${cur_path}\E//; #strip any absolute path | 436 | $file =~ s/^\Q${cur_path}\E//; #strip any absolute path |
435 | $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree | 437 | $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree |
436 | push(@files, $file); | 438 | push(@files, $file); |
@@ -2124,6 +2126,22 @@ sub vcs_file_blame { | |||
2124 | } | 2126 | } |
2125 | } | 2127 | } |
2126 | 2128 | ||
2129 | sub vcs_file_exists { | ||
2130 | my ($file) = @_; | ||
2131 | |||
2132 | my $exists; | ||
2133 | |||
2134 | my $vcs_used = vcs_exists(); | ||
2135 | return 0 if (!$vcs_used); | ||
2136 | |||
2137 | my $cmd = $VCS_cmds{"file_exists_cmd"}; | ||
2138 | $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd | ||
2139 | |||
2140 | $exists = &{$VCS_cmds{"execute_cmd"}}($cmd); | ||
2141 | |||
2142 | return $exists; | ||
2143 | } | ||
2144 | |||
2127 | sub uniq { | 2145 | sub uniq { |
2128 | my (@parms) = @_; | 2146 | my (@parms) = @_; |
2129 | 2147 | ||
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h index 0e37f7a760eb..5201b915f631 100644 --- a/tools/testing/radix-tree/linux/gfp.h +++ b/tools/testing/radix-tree/linux/gfp.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _GFP_H | 1 | #ifndef _GFP_H |
2 | #define _GFP_H | 2 | #define _GFP_H |
3 | 3 | ||
4 | #define __GFP_BITS_SHIFT 22 | 4 | #define __GFP_BITS_SHIFT 26 |
5 | #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 5 | #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
6 | #define __GFP_WAIT 1 | 6 | #define __GFP_WAIT 1 |
7 | #define __GFP_ACCOUNT 0 | 7 | #define __GFP_ACCOUNT 0 |