diff options
105 files changed, 3632 insertions, 884 deletions
diff --git a/Documentation/ABI/stable/sysfs-module b/Documentation/ABI/stable/sysfs-module index a0dd21c6db59..6272ae5fb366 100644 --- a/Documentation/ABI/stable/sysfs-module +++ b/Documentation/ABI/stable/sysfs-module | |||
@@ -4,9 +4,13 @@ Description: | |||
4 | 4 | ||
5 | /sys/module/MODULENAME | 5 | /sys/module/MODULENAME |
6 | The name of the module that is in the kernel. This | 6 | The name of the module that is in the kernel. This |
7 | module name will show up either if the module is built | 7 | module name will always show up if the module is loaded as a |
8 | directly into the kernel, or if it is loaded as a | 8 | dynamic module. If it is built directly into the kernel, it |
9 | dynamic module. | 9 | will only show up if it has a version or at least one |
10 | parameter. | ||
11 | |||
12 | Note: The conditions of creation in the built-in case are not | ||
13 | by design and may be removed in the future. | ||
10 | 14 | ||
11 | /sys/module/MODULENAME/parameters | 15 | /sys/module/MODULENAME/parameters |
12 | This directory contains individual files that are each | 16 | This directory contains individual files that are each |
diff --git a/Documentation/coccinelle.txt b/Documentation/coccinelle.txt index 18de78599dd4..7f773d51fdd9 100644 --- a/Documentation/coccinelle.txt +++ b/Documentation/coccinelle.txt | |||
@@ -6,15 +6,17 @@ Copyright 2010 Gilles Muller <Gilles.Muller@lip6.fr> | |||
6 | Getting Coccinelle | 6 | Getting Coccinelle |
7 | ~~~~~~~~~~~~~~~~~~~~ | 7 | ~~~~~~~~~~~~~~~~~~~~ |
8 | 8 | ||
9 | The semantic patches included in the kernel use the 'virtual rule' | 9 | The semantic patches included in the kernel use features and options |
10 | feature which was introduced in Coccinelle version 0.1.11. | 10 | which are provided by Coccinelle version 1.0.0-rc11 and above. |
11 | Using earlier versions will fail as the option names used by | ||
12 | the Coccinelle files and coccicheck have been updated. | ||
11 | 13 | ||
12 | Coccinelle (>=0.2.0) is available through the package manager | 14 | Coccinelle is available through the package manager |
13 | of many distributions, e.g. : | 15 | of many distributions, e.g. : |
14 | 16 | ||
15 | - Debian (>=squeeze) | 17 | - Debian |
16 | - Fedora (>=13) | 18 | - Fedora |
17 | - Ubuntu (>=10.04 Lucid Lynx) | 19 | - Ubuntu |
18 | - OpenSUSE | 20 | - OpenSUSE |
19 | - Arch Linux | 21 | - Arch Linux |
20 | - NetBSD | 22 | - NetBSD |
@@ -36,11 +38,6 @@ as a regular user, and install it with | |||
36 | 38 | ||
37 | sudo make install | 39 | sudo make install |
38 | 40 | ||
39 | The semantic patches in the kernel will work best with Coccinelle version | ||
40 | 0.2.4 or later. Using earlier versions may incur some parse errors in the | ||
41 | semantic patch code, but any results that are obtained should still be | ||
42 | correct. | ||
43 | |||
44 | Using Coccinelle on the Linux kernel | 41 | Using Coccinelle on the Linux kernel |
45 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 42 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
46 | 43 | ||
@@ -48,7 +45,7 @@ A Coccinelle-specific target is defined in the top level | |||
48 | Makefile. This target is named 'coccicheck' and calls the 'coccicheck' | 45 | Makefile. This target is named 'coccicheck' and calls the 'coccicheck' |
49 | front-end in the 'scripts' directory. | 46 | front-end in the 'scripts' directory. |
50 | 47 | ||
51 | Four modes are defined: patch, report, context, and org. The mode to | 48 | Four basic modes are defined: patch, report, context, and org. The mode to |
52 | use is specified by setting the MODE variable with 'MODE=<mode>'. | 49 | use is specified by setting the MODE variable with 'MODE=<mode>'. |
53 | 50 | ||
54 | 'patch' proposes a fix, when possible. | 51 | 'patch' proposes a fix, when possible. |
@@ -62,18 +59,24 @@ diff-like style.Lines of interest are indicated with '-'. | |||
62 | 'org' generates a report in the Org mode format of Emacs. | 59 | 'org' generates a report in the Org mode format of Emacs. |
63 | 60 | ||
64 | Note that not all semantic patches implement all modes. For easy use | 61 | Note that not all semantic patches implement all modes. For easy use |
65 | of Coccinelle, the default mode is "chain" which tries the previous | 62 | of Coccinelle, the default mode is "report". |
66 | modes in the order above until one succeeds. | 63 | |
64 | Two other modes provide some common combinations of these modes. | ||
67 | 65 | ||
68 | To make a report for every semantic patch, run the following command: | 66 | 'chain' tries the previous modes in the order above until one succeeds. |
69 | 67 | ||
70 | make coccicheck MODE=report | 68 | 'rep+ctxt' runs successively the report mode and the context mode. |
69 | It should be used with the C option (described later) | ||
70 | which checks the code on a file basis. | ||
71 | 71 | ||
72 | NB: The 'report' mode is the default one. | 72 | Examples: |
73 | To make a report for every semantic patch, run the following command: | ||
73 | 74 | ||
74 | To produce patches, run: | 75 | make coccicheck MODE=report |
75 | 76 | ||
76 | make coccicheck MODE=patch | 77 | To produce patches, run: |
78 | |||
79 | make coccicheck MODE=patch | ||
77 | 80 | ||
78 | 81 | ||
79 | The coccicheck target applies every semantic patch available in the | 82 | The coccicheck target applies every semantic patch available in the |
@@ -91,6 +94,11 @@ To enable verbose messages set the V= variable, for example: | |||
91 | 94 | ||
92 | make coccicheck MODE=report V=1 | 95 | make coccicheck MODE=report V=1 |
93 | 96 | ||
97 | By default, coccicheck tries to run as parallel as possible. To change | ||
98 | the parallelism, set the J= variable. For example, to run across 4 CPUs: | ||
99 | |||
100 | make coccicheck MODE=report J=4 | ||
101 | |||
94 | 102 | ||
95 | Using Coccinelle with a single semantic patch | 103 | Using Coccinelle with a single semantic patch |
96 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 104 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
@@ -124,26 +132,33 @@ To check only newly edited code, use the value 2 for the C flag, i.e. | |||
124 | 132 | ||
125 | make C=2 CHECK="scripts/coccicheck" | 133 | make C=2 CHECK="scripts/coccicheck" |
126 | 134 | ||
135 | In these modes, which works on a file basis, there is no information | ||
136 | about semantic patches displayed, and no commit message proposed. | ||
137 | |||
127 | This runs every semantic patch in scripts/coccinelle by default. The | 138 | This runs every semantic patch in scripts/coccinelle by default. The |
128 | COCCI variable may additionally be used to only apply a single | 139 | COCCI variable may additionally be used to only apply a single |
129 | semantic patch as shown in the previous section. | 140 | semantic patch as shown in the previous section. |
130 | 141 | ||
131 | The "chain" mode is the default. You can select another one with the | 142 | The "report" mode is the default. You can select another one with the |
132 | MODE variable explained above. | 143 | MODE variable explained above. |
133 | 144 | ||
134 | In this mode, there is no information about semantic patches | ||
135 | displayed, and no commit message proposed. | ||
136 | |||
137 | Additional flags | 145 | Additional flags |
138 | ~~~~~~~~~~~~~~~~~~ | 146 | ~~~~~~~~~~~~~~~~~~ |
139 | 147 | ||
140 | Additional flags can be passed to spatch through the SPFLAGS | 148 | Additional flags can be passed to spatch through the SPFLAGS |
141 | variable. | 149 | variable. |
142 | 150 | ||
143 | make SPFLAGS=--use_glimpse coccicheck | 151 | make SPFLAGS=--use-glimpse coccicheck |
152 | make SPFLAGS=--use-idutils coccicheck | ||
144 | 153 | ||
145 | See spatch --help to learn more about spatch options. | 154 | See spatch --help to learn more about spatch options. |
146 | 155 | ||
156 | Note that the '--use-glimpse' and '--use-idutils' options | ||
157 | require external tools for indexing the code. None of them is | ||
158 | thus active by default. However, by indexing the code with | ||
159 | one of these tools, and according to the cocci file used, | ||
160 | spatch could proceed the entire code base more quickly. | ||
161 | |||
147 | Proposing new semantic patches | 162 | Proposing new semantic patches |
148 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 163 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
149 | 164 | ||
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt new file mode 100644 index 000000000000..e34c6cdd8ba8 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
@@ -0,0 +1,70 @@ | |||
1 | * ARM System MMU Architecture Implementation | ||
2 | |||
3 | ARM SoCs may contain an implementation of the ARM System Memory | ||
4 | Management Unit Architecture, which can be used to provide 1 or 2 stages | ||
5 | of address translation to bus masters external to the CPU. | ||
6 | |||
7 | The SMMU may also raise interrupts in response to various fault | ||
8 | conditions. | ||
9 | |||
10 | ** System MMU required properties: | ||
11 | |||
12 | - compatible : Should be one of: | ||
13 | |||
14 | "arm,smmu-v1" | ||
15 | "arm,smmu-v2" | ||
16 | "arm,mmu-400" | ||
17 | "arm,mmu-500" | ||
18 | |||
19 | depending on the particular implementation and/or the | ||
20 | version of the architecture implemented. | ||
21 | |||
22 | - reg : Base address and size of the SMMU. | ||
23 | |||
24 | - #global-interrupts : The number of global interrupts exposed by the | ||
25 | device. | ||
26 | |||
27 | - interrupts : Interrupt list, with the first #global-irqs entries | ||
28 | corresponding to the global interrupts and any | ||
29 | following entries corresponding to context interrupts, | ||
30 | specified in order of their indexing by the SMMU. | ||
31 | |||
32 | For SMMUv2 implementations, there must be exactly one | ||
33 | interrupt per context bank. In the case of a single, | ||
34 | combined interrupt, it must be listed multiple times. | ||
35 | |||
36 | - mmu-masters : A list of phandles to device nodes representing bus | ||
37 | masters for which the SMMU can provide a translation | ||
38 | and their corresponding StreamIDs (see example below). | ||
39 | Each device node linked from this list must have a | ||
40 | "#stream-id-cells" property, indicating the number of | ||
41 | StreamIDs associated with it. | ||
42 | |||
43 | ** System MMU optional properties: | ||
44 | |||
45 | - smmu-parent : When multiple SMMUs are chained together, this | ||
46 | property can be used to provide a phandle to the | ||
47 | parent SMMU (that is the next SMMU on the path going | ||
48 | from the mmu-masters towards memory) node for this | ||
49 | SMMU. | ||
50 | |||
51 | Example: | ||
52 | |||
53 | smmu { | ||
54 | compatible = "arm,smmu-v1"; | ||
55 | reg = <0xba5e0000 0x10000>; | ||
56 | #global-interrupts = <2>; | ||
57 | interrupts = <0 32 4>, | ||
58 | <0 33 4>, | ||
59 | <0 34 4>, /* This is the first context interrupt */ | ||
60 | <0 35 4>, | ||
61 | <0 36 4>, | ||
62 | <0 37 4>; | ||
63 | |||
64 | /* | ||
65 | * Two DMA controllers, the first with two StreamIDs (0xd01d | ||
66 | * and 0xd01e) and the second with only one (0xd11c). | ||
67 | */ | ||
68 | mmu-masters = <&dma0 0xd01d 0xd01e>, | ||
69 | <&dma1 0xd11c>; | ||
70 | }; | ||
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt index 213859e69e88..e349f293cc98 100644 --- a/Documentation/kbuild/kconfig.txt +++ b/Documentation/kbuild/kconfig.txt | |||
@@ -174,6 +174,19 @@ Searching in menuconfig: | |||
174 | 174 | ||
175 | /^hotplug | 175 | /^hotplug |
176 | 176 | ||
177 | When searching, symbols are sorted thus: | ||
178 | - exact match first: an exact match is when the search matches | ||
179 | the complete symbol name; | ||
180 | - alphabetical order: when two symbols do not match exactly, | ||
181 | they are sorted in alphabetical order (in the user's current | ||
182 | locale). | ||
183 | For example: ^ATH.K matches: | ||
184 | ATH5K ATH9K ATH5K_AHB ATH5K_DEBUG [...] ATH6KL ATH6KL_DEBUG | ||
185 | [...] ATH9K_AHB ATH9K_BTCOEX_SUPPORT ATH9K_COMMON [...] | ||
186 | of which only ATH5K and ATH9K match exactly and so are sorted | ||
187 | first (and in alphabetical order), then come all other symbols, | ||
188 | sorted in alphabetical order. | ||
189 | |||
177 | ______________________________________________________________________ | 190 | ______________________________________________________________________ |
178 | User interface options for 'menuconfig' | 191 | User interface options for 'menuconfig' |
179 | 192 | ||
diff --git a/Documentation/vfio.txt b/Documentation/vfio.txt index c55533c0adb3..d7993dcf8537 100644 --- a/Documentation/vfio.txt +++ b/Documentation/vfio.txt | |||
@@ -172,12 +172,12 @@ group and can access them as follows: | |||
172 | struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; | 172 | struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; |
173 | 173 | ||
174 | /* Create a new container */ | 174 | /* Create a new container */ |
175 | container = open("/dev/vfio/vfio, O_RDWR); | 175 | container = open("/dev/vfio/vfio", O_RDWR); |
176 | 176 | ||
177 | if (ioctl(container, VFIO_GET_API_VERSION) != VFIO_API_VERSION) | 177 | if (ioctl(container, VFIO_GET_API_VERSION) != VFIO_API_VERSION) |
178 | /* Unknown API version */ | 178 | /* Unknown API version */ |
179 | 179 | ||
180 | if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_X86_IOMMU)) | 180 | if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) |
181 | /* Doesn't support the IOMMU driver we want. */ | 181 | /* Doesn't support the IOMMU driver we want. */ |
182 | 182 | ||
183 | /* Open the group */ | 183 | /* Open the group */ |
@@ -193,7 +193,7 @@ group and can access them as follows: | |||
193 | ioctl(group, VFIO_GROUP_SET_CONTAINER, &container); | 193 | ioctl(group, VFIO_GROUP_SET_CONTAINER, &container); |
194 | 194 | ||
195 | /* Enable the IOMMU model we want */ | 195 | /* Enable the IOMMU model we want */ |
196 | ioctl(container, VFIO_SET_IOMMU, VFIO_X86_IOMMU) | 196 | ioctl(container, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU) |
197 | 197 | ||
198 | /* Get addition IOMMU info */ | 198 | /* Get addition IOMMU info */ |
199 | ioctl(container, VFIO_IOMMU_GET_INFO, &iommu_info); | 199 | ioctl(container, VFIO_IOMMU_GET_INFO, &iommu_info); |
diff --git a/MAINTAINERS b/MAINTAINERS index 37f9a71c744f..cbd4f66bc677 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1333,6 +1333,12 @@ S: Supported | |||
1333 | F: arch/arm/mach-zynq/ | 1333 | F: arch/arm/mach-zynq/ |
1334 | F: drivers/cpuidle/cpuidle-zynq.c | 1334 | F: drivers/cpuidle/cpuidle-zynq.c |
1335 | 1335 | ||
1336 | ARM SMMU DRIVER | ||
1337 | M: Will Deacon <will.deacon@arm.com> | ||
1338 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1339 | S: Maintained | ||
1340 | F: drivers/iommu/arm-smmu.c | ||
1341 | |||
1336 | ARM64 PORT (AARCH64 ARCHITECTURE) | 1342 | ARM64 PORT (AARCH64 ARCHITECTURE) |
1337 | M: Catalin Marinas <catalin.marinas@arm.com> | 1343 | M: Catalin Marinas <catalin.marinas@arm.com> |
1338 | M: Will Deacon <will.deacon@arm.com> | 1344 | M: Will Deacon <will.deacon@arm.com> |
@@ -2129,9 +2135,12 @@ COCCINELLE/Semantic Patches (SmPL) | |||
2129 | M: Julia Lawall <Julia.Lawall@lip6.fr> | 2135 | M: Julia Lawall <Julia.Lawall@lip6.fr> |
2130 | M: Gilles Muller <Gilles.Muller@lip6.fr> | 2136 | M: Gilles Muller <Gilles.Muller@lip6.fr> |
2131 | M: Nicolas Palix <nicolas.palix@imag.fr> | 2137 | M: Nicolas Palix <nicolas.palix@imag.fr> |
2138 | M: Michal Marek <mmarek@suse.cz> | ||
2132 | L: cocci@systeme.lip6.fr (moderated for non-subscribers) | 2139 | L: cocci@systeme.lip6.fr (moderated for non-subscribers) |
2140 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc | ||
2133 | W: http://coccinelle.lip6.fr/ | 2141 | W: http://coccinelle.lip6.fr/ |
2134 | S: Supported | 2142 | S: Supported |
2143 | F: Documentation/coccinelle.txt | ||
2135 | F: scripts/coccinelle/ | 2144 | F: scripts/coccinelle/ |
2136 | F: scripts/coccicheck | 2145 | F: scripts/coccicheck |
2137 | 2146 | ||
@@ -8884,6 +8893,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com> | |||
8884 | L: virtualization@lists.linux-foundation.org | 8893 | L: virtualization@lists.linux-foundation.org |
8885 | S: Maintained | 8894 | S: Maintained |
8886 | F: drivers/virtio/ | 8895 | F: drivers/virtio/ |
8896 | F: tools/virtio/ | ||
8887 | F: drivers/net/virtio_net.c | 8897 | F: drivers/net/virtio_net.c |
8888 | F: drivers/block/virtio_blk.c | 8898 | F: drivers/block/virtio_blk.c |
8889 | F: include/linux/virtio_*.h | 8899 | F: include/linux/virtio_*.h |
@@ -981,7 +981,7 @@ _modinst_: | |||
981 | # boot a modules.dep even before / is mounted read-write. However the | 981 | # boot a modules.dep even before / is mounted read-write. However the |
982 | # boot script depmod is the master version. | 982 | # boot script depmod is the master version. |
983 | PHONY += _modinst_post | 983 | PHONY += _modinst_post |
984 | _modinst_post: _modinst_ | 984 | _modinst_post: include/config/kernel.release _modinst_ |
985 | $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst | 985 | $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst |
986 | $(call cmd,depmod) | 986 | $(call cmd,depmod) |
987 | 987 | ||
@@ -1116,6 +1116,7 @@ help: | |||
1116 | @echo ' gtags - Generate GNU GLOBAL index' | 1116 | @echo ' gtags - Generate GNU GLOBAL index' |
1117 | @echo ' kernelrelease - Output the release version string' | 1117 | @echo ' kernelrelease - Output the release version string' |
1118 | @echo ' kernelversion - Output the version stored in Makefile' | 1118 | @echo ' kernelversion - Output the version stored in Makefile' |
1119 | @echo ' image_name - Output the image name' | ||
1119 | @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \ | 1120 | @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \ |
1120 | echo ' (default: $(INSTALL_HDR_PATH))'; \ | 1121 | echo ' (default: $(INSTALL_HDR_PATH))'; \ |
1121 | echo '' | 1122 | echo '' |
@@ -1310,7 +1311,7 @@ export_report: | |||
1310 | endif #ifeq ($(config-targets),1) | 1311 | endif #ifeq ($(config-targets),1) |
1311 | endif #ifeq ($(mixed-targets),1) | 1312 | endif #ifeq ($(mixed-targets),1) |
1312 | 1313 | ||
1313 | PHONY += checkstack kernelrelease kernelversion | 1314 | PHONY += checkstack kernelrelease kernelversion image_name |
1314 | 1315 | ||
1315 | # UML needs a little special treatment here. It wants to use the host | 1316 | # UML needs a little special treatment here. It wants to use the host |
1316 | # toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone | 1317 | # toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone |
@@ -1331,6 +1332,9 @@ kernelrelease: | |||
1331 | kernelversion: | 1332 | kernelversion: |
1332 | @echo $(KERNELVERSION) | 1333 | @echo $(KERNELVERSION) |
1333 | 1334 | ||
1335 | image_name: | ||
1336 | @echo $(KBUILD_IMAGE) | ||
1337 | |||
1334 | # Clear a bunch of variables before executing the submake | 1338 | # Clear a bunch of variables before executing the submake |
1335 | tools/: FORCE | 1339 | tools/: FORCE |
1336 | $(Q)mkdir -p $(objtree)/tools | 1340 | $(Q)mkdir -p $(objtree)/tools |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c index 0db655ef3918..639d1289b1ba 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c | |||
@@ -491,10 +491,8 @@ static struct perf_amd_iommu __perf_iommu = { | |||
491 | static __init int amd_iommu_pc_init(void) | 491 | static __init int amd_iommu_pc_init(void) |
492 | { | 492 | { |
493 | /* Make sure the IOMMU PC resource is available */ | 493 | /* Make sure the IOMMU PC resource is available */ |
494 | if (!amd_iommu_pc_supported()) { | 494 | if (!amd_iommu_pc_supported()) |
495 | pr_err("perf: amd_iommu PMU not installed. No support!\n"); | ||
496 | return -ENODEV; | 495 | return -ENODEV; |
497 | } | ||
498 | 496 | ||
499 | _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); | 497 | _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); |
500 | 498 | ||
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index a4f5ce14dc1c..271b42bbfb72 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c | |||
@@ -133,8 +133,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = { | |||
133 | }, | 133 | }, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static u8 *edid_load(struct drm_connector *connector, char *name, | 136 | static u8 *edid_load(struct drm_connector *connector, const char *name, |
137 | char *connector_name) | 137 | const char *connector_name) |
138 | { | 138 | { |
139 | const struct firmware *fw; | 139 | const struct firmware *fw; |
140 | struct platform_device *pdev; | 140 | struct platform_device *pdev; |
@@ -242,7 +242,7 @@ out: | |||
242 | 242 | ||
243 | int drm_load_edid_firmware(struct drm_connector *connector) | 243 | int drm_load_edid_firmware(struct drm_connector *connector) |
244 | { | 244 | { |
245 | char *connector_name = drm_get_connector_name(connector); | 245 | const char *connector_name = drm_get_connector_name(connector); |
246 | char *edidname = edid_firmware, *last, *colon; | 246 | char *edidname = edid_firmware, *last, *colon; |
247 | int ret; | 247 | int ret; |
248 | struct edid *edid; | 248 | struct edid *edid; |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 01730b2b9954..820d85c4a4a0 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -269,4 +269,17 @@ config SPAPR_TCE_IOMMU | |||
269 | Enables bits of IOMMU API required by VFIO. The iommu_ops | 269 | Enables bits of IOMMU API required by VFIO. The iommu_ops |
270 | is not implemented as it is not necessary for VFIO. | 270 | is not implemented as it is not necessary for VFIO. |
271 | 271 | ||
272 | config ARM_SMMU | ||
273 | bool "ARM Ltd. System MMU (SMMU) Support" | ||
274 | depends on ARM64 || (ARM_LPAE && OF) | ||
275 | select IOMMU_API | ||
276 | select ARM_DMA_USE_IOMMU if ARM | ||
277 | help | ||
278 | Support for implementations of the ARM System MMU architecture | ||
279 | versions 1 and 2. The driver supports both v7l and v8l table | ||
280 | formats with 4k and 64k page sizes. | ||
281 | |||
282 | Say Y here if your SoC includes an IOMMU device implementing | ||
283 | the ARM SMMU architecture. | ||
284 | |||
272 | endif # IOMMU_SUPPORT | 285 | endif # IOMMU_SUPPORT |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index ef0e5207ad69..bbe7041212dd 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU) += of_iommu.o | |||
3 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o | 3 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o |
4 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | 4 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o |
5 | obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o | 5 | obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o |
6 | obj-$(CONFIG_ARM_SMMU) += arm-smmu.o | ||
6 | obj-$(CONFIG_DMAR_TABLE) += dmar.o | 7 | obj-$(CONFIG_DMAR_TABLE) += dmar.o |
7 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o | 8 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o |
8 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o | 9 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 21d02b0d907c..6dc659426a51 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev) | |||
287 | 287 | ||
288 | /* | 288 | /* |
289 | * If it's a multifunction device that does not support our | 289 | * If it's a multifunction device that does not support our |
290 | * required ACS flags, add to the same group as function 0. | 290 | * required ACS flags, add to the same group as lowest numbered |
291 | * function that also does not suport the required ACS flags. | ||
291 | */ | 292 | */ |
292 | if (dma_pdev->multifunction && | 293 | if (dma_pdev->multifunction && |
293 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) | 294 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { |
294 | swap_pci_ref(&dma_pdev, | 295 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); |
295 | pci_get_slot(dma_pdev->bus, | 296 | |
296 | PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), | 297 | for (i = 0; i < 8; i++) { |
297 | 0))); | 298 | struct pci_dev *tmp; |
299 | |||
300 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | ||
301 | if (!tmp) | ||
302 | continue; | ||
303 | |||
304 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
305 | swap_pci_ref(&dma_pdev, tmp); | ||
306 | break; | ||
307 | } | ||
308 | pci_dev_put(tmp); | ||
309 | } | ||
310 | } | ||
298 | 311 | ||
299 | /* | 312 | /* |
300 | * Devices on the root bus go through the iommu. If that's not us, | 313 | * Devices on the root bus go through the iommu. If that's not us, |
@@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1484 | 1497 | ||
1485 | /* Large PTE found which maps this address */ | 1498 | /* Large PTE found which maps this address */ |
1486 | unmap_size = PTE_PAGE_SIZE(*pte); | 1499 | unmap_size = PTE_PAGE_SIZE(*pte); |
1500 | |||
1501 | /* Only unmap from the first pte in the page */ | ||
1502 | if ((unmap_size - 1) & bus_addr) | ||
1503 | break; | ||
1487 | count = PAGE_SIZE_PTE_COUNT(unmap_size); | 1504 | count = PAGE_SIZE_PTE_COUNT(unmap_size); |
1488 | for (i = 0; i < count; i++) | 1505 | for (i = 0; i < count; i++) |
1489 | pte[i] = 0ULL; | 1506 | pte[i] = 0ULL; |
@@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1493 | unmapped += unmap_size; | 1510 | unmapped += unmap_size; |
1494 | } | 1511 | } |
1495 | 1512 | ||
1496 | BUG_ON(!is_power_of_2(unmapped)); | 1513 | BUG_ON(unmapped && !is_power_of_2(unmapped)); |
1497 | 1514 | ||
1498 | return unmapped; | 1515 | return unmapped; |
1499 | } | 1516 | } |
@@ -1893,34 +1910,59 @@ static void domain_id_free(int id) | |||
1893 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1910 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1894 | } | 1911 | } |
1895 | 1912 | ||
1913 | #define DEFINE_FREE_PT_FN(LVL, FN) \ | ||
1914 | static void free_pt_##LVL (unsigned long __pt) \ | ||
1915 | { \ | ||
1916 | unsigned long p; \ | ||
1917 | u64 *pt; \ | ||
1918 | int i; \ | ||
1919 | \ | ||
1920 | pt = (u64 *)__pt; \ | ||
1921 | \ | ||
1922 | for (i = 0; i < 512; ++i) { \ | ||
1923 | if (!IOMMU_PTE_PRESENT(pt[i])) \ | ||
1924 | continue; \ | ||
1925 | \ | ||
1926 | p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \ | ||
1927 | FN(p); \ | ||
1928 | } \ | ||
1929 | free_page((unsigned long)pt); \ | ||
1930 | } | ||
1931 | |||
1932 | DEFINE_FREE_PT_FN(l2, free_page) | ||
1933 | DEFINE_FREE_PT_FN(l3, free_pt_l2) | ||
1934 | DEFINE_FREE_PT_FN(l4, free_pt_l3) | ||
1935 | DEFINE_FREE_PT_FN(l5, free_pt_l4) | ||
1936 | DEFINE_FREE_PT_FN(l6, free_pt_l5) | ||
1937 | |||
1896 | static void free_pagetable(struct protection_domain *domain) | 1938 | static void free_pagetable(struct protection_domain *domain) |
1897 | { | 1939 | { |
1898 | int i, j; | 1940 | unsigned long root = (unsigned long)domain->pt_root; |
1899 | u64 *p1, *p2, *p3; | ||
1900 | |||
1901 | p1 = domain->pt_root; | ||
1902 | |||
1903 | if (!p1) | ||
1904 | return; | ||
1905 | |||
1906 | for (i = 0; i < 512; ++i) { | ||
1907 | if (!IOMMU_PTE_PRESENT(p1[i])) | ||
1908 | continue; | ||
1909 | |||
1910 | p2 = IOMMU_PTE_PAGE(p1[i]); | ||
1911 | for (j = 0; j < 512; ++j) { | ||
1912 | if (!IOMMU_PTE_PRESENT(p2[j])) | ||
1913 | continue; | ||
1914 | p3 = IOMMU_PTE_PAGE(p2[j]); | ||
1915 | free_page((unsigned long)p3); | ||
1916 | } | ||
1917 | 1941 | ||
1918 | free_page((unsigned long)p2); | 1942 | switch (domain->mode) { |
1943 | case PAGE_MODE_NONE: | ||
1944 | break; | ||
1945 | case PAGE_MODE_1_LEVEL: | ||
1946 | free_page(root); | ||
1947 | break; | ||
1948 | case PAGE_MODE_2_LEVEL: | ||
1949 | free_pt_l2(root); | ||
1950 | break; | ||
1951 | case PAGE_MODE_3_LEVEL: | ||
1952 | free_pt_l3(root); | ||
1953 | break; | ||
1954 | case PAGE_MODE_4_LEVEL: | ||
1955 | free_pt_l4(root); | ||
1956 | break; | ||
1957 | case PAGE_MODE_5_LEVEL: | ||
1958 | free_pt_l5(root); | ||
1959 | break; | ||
1960 | case PAGE_MODE_6_LEVEL: | ||
1961 | free_pt_l6(root); | ||
1962 | break; | ||
1963 | default: | ||
1964 | BUG(); | ||
1919 | } | 1965 | } |
1920 | |||
1921 | free_page((unsigned long)p1); | ||
1922 | |||
1923 | domain->pt_root = NULL; | ||
1924 | } | 1966 | } |
1925 | 1967 | ||
1926 | static void free_gcr3_tbl_level1(u64 *tbl) | 1968 | static void free_gcr3_tbl_level1(u64 *tbl) |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c new file mode 100644 index 000000000000..ebd0a4cff049 --- /dev/null +++ b/drivers/iommu/arm-smmu.c | |||
@@ -0,0 +1,1969 @@ | |||
1 | /* | ||
2 | * IOMMU API for ARM architected SMMU implementations. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Copyright (C) 2013 ARM Limited | ||
18 | * | ||
19 | * Author: Will Deacon <will.deacon@arm.com> | ||
20 | * | ||
21 | * This driver currently supports: | ||
22 | * - SMMUv1 and v2 implementations | ||
23 | * - Stream-matching and stream-indexing | ||
24 | * - v7/v8 long-descriptor format | ||
25 | * - Non-secure access to the SMMU | ||
26 | * - 4k and 64k pages, with contiguous pte hints. | ||
27 | * - Up to 39-bit addressing | ||
28 | * - Context fault reporting | ||
29 | */ | ||
30 | |||
31 | #define pr_fmt(fmt) "arm-smmu: " fmt | ||
32 | |||
33 | #include <linux/delay.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/iommu.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/module.h> | ||
41 | #include <linux/of.h> | ||
42 | #include <linux/platform_device.h> | ||
43 | #include <linux/slab.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | |||
46 | #include <linux/amba/bus.h> | ||
47 | |||
48 | #include <asm/pgalloc.h> | ||
49 | |||
50 | /* Maximum number of stream IDs assigned to a single device */ | ||
51 | #define MAX_MASTER_STREAMIDS 8 | ||
52 | |||
53 | /* Maximum number of context banks per SMMU */ | ||
54 | #define ARM_SMMU_MAX_CBS 128 | ||
55 | |||
56 | /* Maximum number of mapping groups per SMMU */ | ||
57 | #define ARM_SMMU_MAX_SMRS 128 | ||
58 | |||
59 | /* Number of VMIDs per SMMU */ | ||
60 | #define ARM_SMMU_NUM_VMIDS 256 | ||
61 | |||
62 | /* SMMU global address space */ | ||
63 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | ||
64 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) | ||
65 | |||
66 | /* Page table bits */ | ||
67 | #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) | ||
68 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) | ||
69 | #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) | ||
70 | #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) | ||
71 | #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) | ||
72 | #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) | ||
73 | |||
74 | #if PAGE_SIZE == SZ_4K | ||
75 | #define ARM_SMMU_PTE_CONT_ENTRIES 16 | ||
76 | #elif PAGE_SIZE == SZ_64K | ||
77 | #define ARM_SMMU_PTE_CONT_ENTRIES 32 | ||
78 | #else | ||
79 | #define ARM_SMMU_PTE_CONT_ENTRIES 1 | ||
80 | #endif | ||
81 | |||
82 | #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) | ||
83 | #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) | ||
84 | #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) | ||
85 | |||
86 | /* Stage-1 PTE */ | ||
87 | #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) | ||
88 | #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) | ||
89 | #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 | ||
90 | |||
91 | /* Stage-2 PTE */ | ||
92 | #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) | ||
93 | #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) | ||
94 | #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) | ||
95 | #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) | ||
96 | #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) | ||
97 | #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) | ||
98 | |||
99 | /* Configuration registers */ | ||
100 | #define ARM_SMMU_GR0_sCR0 0x0 | ||
101 | #define sCR0_CLIENTPD (1 << 0) | ||
102 | #define sCR0_GFRE (1 << 1) | ||
103 | #define sCR0_GFIE (1 << 2) | ||
104 | #define sCR0_GCFGFRE (1 << 4) | ||
105 | #define sCR0_GCFGFIE (1 << 5) | ||
106 | #define sCR0_USFCFG (1 << 10) | ||
107 | #define sCR0_VMIDPNE (1 << 11) | ||
108 | #define sCR0_PTM (1 << 12) | ||
109 | #define sCR0_FB (1 << 13) | ||
110 | #define sCR0_BSU_SHIFT 14 | ||
111 | #define sCR0_BSU_MASK 0x3 | ||
112 | |||
113 | /* Identification registers */ | ||
114 | #define ARM_SMMU_GR0_ID0 0x20 | ||
115 | #define ARM_SMMU_GR0_ID1 0x24 | ||
116 | #define ARM_SMMU_GR0_ID2 0x28 | ||
117 | #define ARM_SMMU_GR0_ID3 0x2c | ||
118 | #define ARM_SMMU_GR0_ID4 0x30 | ||
119 | #define ARM_SMMU_GR0_ID5 0x34 | ||
120 | #define ARM_SMMU_GR0_ID6 0x38 | ||
121 | #define ARM_SMMU_GR0_ID7 0x3c | ||
122 | #define ARM_SMMU_GR0_sGFSR 0x48 | ||
123 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | ||
124 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | ||
125 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | ||
126 | #define ARM_SMMU_GR0_PIDR0 0xfe0 | ||
127 | #define ARM_SMMU_GR0_PIDR1 0xfe4 | ||
128 | #define ARM_SMMU_GR0_PIDR2 0xfe8 | ||
129 | |||
130 | #define ID0_S1TS (1 << 30) | ||
131 | #define ID0_S2TS (1 << 29) | ||
132 | #define ID0_NTS (1 << 28) | ||
133 | #define ID0_SMS (1 << 27) | ||
134 | #define ID0_PTFS_SHIFT 24 | ||
135 | #define ID0_PTFS_MASK 0x2 | ||
136 | #define ID0_PTFS_V8_ONLY 0x2 | ||
137 | #define ID0_CTTW (1 << 14) | ||
138 | #define ID0_NUMIRPT_SHIFT 16 | ||
139 | #define ID0_NUMIRPT_MASK 0xff | ||
140 | #define ID0_NUMSMRG_SHIFT 0 | ||
141 | #define ID0_NUMSMRG_MASK 0xff | ||
142 | |||
143 | #define ID1_PAGESIZE (1 << 31) | ||
144 | #define ID1_NUMPAGENDXB_SHIFT 28 | ||
145 | #define ID1_NUMPAGENDXB_MASK 7 | ||
146 | #define ID1_NUMS2CB_SHIFT 16 | ||
147 | #define ID1_NUMS2CB_MASK 0xff | ||
148 | #define ID1_NUMCB_SHIFT 0 | ||
149 | #define ID1_NUMCB_MASK 0xff | ||
150 | |||
151 | #define ID2_OAS_SHIFT 4 | ||
152 | #define ID2_OAS_MASK 0xf | ||
153 | #define ID2_IAS_SHIFT 0 | ||
154 | #define ID2_IAS_MASK 0xf | ||
155 | #define ID2_UBS_SHIFT 8 | ||
156 | #define ID2_UBS_MASK 0xf | ||
157 | #define ID2_PTFS_4K (1 << 12) | ||
158 | #define ID2_PTFS_16K (1 << 13) | ||
159 | #define ID2_PTFS_64K (1 << 14) | ||
160 | |||
161 | #define PIDR2_ARCH_SHIFT 4 | ||
162 | #define PIDR2_ARCH_MASK 0xf | ||
163 | |||
164 | /* Global TLB invalidation */ | ||
165 | #define ARM_SMMU_GR0_STLBIALL 0x60 | ||
166 | #define ARM_SMMU_GR0_TLBIVMID 0x64 | ||
167 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | ||
168 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | ||
169 | #define ARM_SMMU_GR0_sTLBGSYNC 0x70 | ||
170 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | ||
171 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | ||
172 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | ||
173 | |||
174 | /* Stream mapping registers */ | ||
175 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | ||
176 | #define SMR_VALID (1 << 31) | ||
177 | #define SMR_MASK_SHIFT 16 | ||
178 | #define SMR_MASK_MASK 0x7fff | ||
179 | #define SMR_ID_SHIFT 0 | ||
180 | #define SMR_ID_MASK 0x7fff | ||
181 | |||
182 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | ||
183 | #define S2CR_CBNDX_SHIFT 0 | ||
184 | #define S2CR_CBNDX_MASK 0xff | ||
185 | #define S2CR_TYPE_SHIFT 16 | ||
186 | #define S2CR_TYPE_MASK 0x3 | ||
187 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | ||
188 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | ||
189 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | ||
190 | |||
191 | /* Context bank attribute registers */ | ||
192 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | ||
193 | #define CBAR_VMID_SHIFT 0 | ||
194 | #define CBAR_VMID_MASK 0xff | ||
195 | #define CBAR_S1_MEMATTR_SHIFT 12 | ||
196 | #define CBAR_S1_MEMATTR_MASK 0xf | ||
197 | #define CBAR_S1_MEMATTR_WB 0xf | ||
198 | #define CBAR_TYPE_SHIFT 16 | ||
199 | #define CBAR_TYPE_MASK 0x3 | ||
200 | #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) | ||
201 | #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) | ||
202 | #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) | ||
203 | #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) | ||
204 | #define CBAR_IRPTNDX_SHIFT 24 | ||
205 | #define CBAR_IRPTNDX_MASK 0xff | ||
206 | |||
207 | #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) | ||
208 | #define CBA2R_RW64_32BIT (0 << 0) | ||
209 | #define CBA2R_RW64_64BIT (1 << 0) | ||
210 | |||
211 | /* Translation context bank */ | ||
212 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | ||
213 | #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize) | ||
214 | |||
215 | #define ARM_SMMU_CB_SCTLR 0x0 | ||
216 | #define ARM_SMMU_CB_RESUME 0x8 | ||
217 | #define ARM_SMMU_CB_TTBCR2 0x10 | ||
218 | #define ARM_SMMU_CB_TTBR0_LO 0x20 | ||
219 | #define ARM_SMMU_CB_TTBR0_HI 0x24 | ||
220 | #define ARM_SMMU_CB_TTBCR 0x30 | ||
221 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | ||
222 | #define ARM_SMMU_CB_FSR 0x58 | ||
223 | #define ARM_SMMU_CB_FAR_LO 0x60 | ||
224 | #define ARM_SMMU_CB_FAR_HI 0x64 | ||
225 | #define ARM_SMMU_CB_FSYNR0 0x68 | ||
226 | |||
227 | #define SCTLR_S1_ASIDPNE (1 << 12) | ||
228 | #define SCTLR_CFCFG (1 << 7) | ||
229 | #define SCTLR_CFIE (1 << 6) | ||
230 | #define SCTLR_CFRE (1 << 5) | ||
231 | #define SCTLR_E (1 << 4) | ||
232 | #define SCTLR_AFE (1 << 2) | ||
233 | #define SCTLR_TRE (1 << 1) | ||
234 | #define SCTLR_M (1 << 0) | ||
235 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | ||
236 | |||
237 | #define RESUME_RETRY (0 << 0) | ||
238 | #define RESUME_TERMINATE (1 << 0) | ||
239 | |||
240 | #define TTBCR_EAE (1 << 31) | ||
241 | |||
242 | #define TTBCR_PASIZE_SHIFT 16 | ||
243 | #define TTBCR_PASIZE_MASK 0x7 | ||
244 | |||
245 | #define TTBCR_TG0_4K (0 << 14) | ||
246 | #define TTBCR_TG0_64K (1 << 14) | ||
247 | |||
248 | #define TTBCR_SH0_SHIFT 12 | ||
249 | #define TTBCR_SH0_MASK 0x3 | ||
250 | #define TTBCR_SH_NS 0 | ||
251 | #define TTBCR_SH_OS 2 | ||
252 | #define TTBCR_SH_IS 3 | ||
253 | |||
254 | #define TTBCR_ORGN0_SHIFT 10 | ||
255 | #define TTBCR_IRGN0_SHIFT 8 | ||
256 | #define TTBCR_RGN_MASK 0x3 | ||
257 | #define TTBCR_RGN_NC 0 | ||
258 | #define TTBCR_RGN_WBWA 1 | ||
259 | #define TTBCR_RGN_WT 2 | ||
260 | #define TTBCR_RGN_WB 3 | ||
261 | |||
262 | #define TTBCR_SL0_SHIFT 6 | ||
263 | #define TTBCR_SL0_MASK 0x3 | ||
264 | #define TTBCR_SL0_LVL_2 0 | ||
265 | #define TTBCR_SL0_LVL_1 1 | ||
266 | |||
267 | #define TTBCR_T1SZ_SHIFT 16 | ||
268 | #define TTBCR_T0SZ_SHIFT 0 | ||
269 | #define TTBCR_SZ_MASK 0xf | ||
270 | |||
271 | #define TTBCR2_SEP_SHIFT 15 | ||
272 | #define TTBCR2_SEP_MASK 0x7 | ||
273 | |||
274 | #define TTBCR2_PASIZE_SHIFT 0 | ||
275 | #define TTBCR2_PASIZE_MASK 0x7 | ||
276 | |||
277 | /* Common definitions for PASize and SEP fields */ | ||
278 | #define TTBCR2_ADDR_32 0 | ||
279 | #define TTBCR2_ADDR_36 1 | ||
280 | #define TTBCR2_ADDR_40 2 | ||
281 | #define TTBCR2_ADDR_42 3 | ||
282 | #define TTBCR2_ADDR_44 4 | ||
283 | #define TTBCR2_ADDR_48 5 | ||
284 | |||
285 | #define MAIR_ATTR_SHIFT(n) ((n) << 3) | ||
286 | #define MAIR_ATTR_MASK 0xff | ||
287 | #define MAIR_ATTR_DEVICE 0x04 | ||
288 | #define MAIR_ATTR_NC 0x44 | ||
289 | #define MAIR_ATTR_WBRWA 0xff | ||
290 | #define MAIR_ATTR_IDX_NC 0 | ||
291 | #define MAIR_ATTR_IDX_CACHE 1 | ||
292 | #define MAIR_ATTR_IDX_DEV 2 | ||
293 | |||
294 | #define FSR_MULTI (1 << 31) | ||
295 | #define FSR_SS (1 << 30) | ||
296 | #define FSR_UUT (1 << 8) | ||
297 | #define FSR_ASF (1 << 7) | ||
298 | #define FSR_TLBLKF (1 << 6) | ||
299 | #define FSR_TLBMCF (1 << 5) | ||
300 | #define FSR_EF (1 << 4) | ||
301 | #define FSR_PF (1 << 3) | ||
302 | #define FSR_AFF (1 << 2) | ||
303 | #define FSR_TF (1 << 1) | ||
304 | |||
305 | #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ | ||
306 | FSR_TLBLKF) | ||
307 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | ||
308 | FSR_EF | FSR_PF | FSR_TF) | ||
309 | |||
310 | #define FSYNR0_WNR (1 << 4) | ||
311 | |||
312 | struct arm_smmu_smr { | ||
313 | u8 idx; | ||
314 | u16 mask; | ||
315 | u16 id; | ||
316 | }; | ||
317 | |||
318 | struct arm_smmu_master { | ||
319 | struct device_node *of_node; | ||
320 | |||
321 | /* | ||
322 | * The following is specific to the master's position in the | ||
323 | * SMMU chain. | ||
324 | */ | ||
325 | struct rb_node node; | ||
326 | int num_streamids; | ||
327 | u16 streamids[MAX_MASTER_STREAMIDS]; | ||
328 | |||
329 | /* | ||
330 | * We only need to allocate these on the root SMMU, as we | ||
331 | * configure unmatched streams to bypass translation. | ||
332 | */ | ||
333 | struct arm_smmu_smr *smrs; | ||
334 | }; | ||
335 | |||
336 | struct arm_smmu_device { | ||
337 | struct device *dev; | ||
338 | struct device_node *parent_of_node; | ||
339 | |||
340 | void __iomem *base; | ||
341 | unsigned long size; | ||
342 | unsigned long pagesize; | ||
343 | |||
344 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | ||
345 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | ||
346 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | ||
347 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | ||
348 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | ||
349 | u32 features; | ||
350 | int version; | ||
351 | |||
352 | u32 num_context_banks; | ||
353 | u32 num_s2_context_banks; | ||
354 | DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); | ||
355 | atomic_t irptndx; | ||
356 | |||
357 | u32 num_mapping_groups; | ||
358 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | ||
359 | |||
360 | unsigned long input_size; | ||
361 | unsigned long s1_output_size; | ||
362 | unsigned long s2_output_size; | ||
363 | |||
364 | u32 num_global_irqs; | ||
365 | u32 num_context_irqs; | ||
366 | unsigned int *irqs; | ||
367 | |||
368 | DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS); | ||
369 | |||
370 | struct list_head list; | ||
371 | struct rb_root masters; | ||
372 | }; | ||
373 | |||
374 | struct arm_smmu_cfg { | ||
375 | struct arm_smmu_device *smmu; | ||
376 | u8 vmid; | ||
377 | u8 cbndx; | ||
378 | u8 irptndx; | ||
379 | u32 cbar; | ||
380 | pgd_t *pgd; | ||
381 | }; | ||
382 | |||
383 | struct arm_smmu_domain { | ||
384 | /* | ||
385 | * A domain can span across multiple, chained SMMUs and requires | ||
386 | * all devices within the domain to follow the same translation | ||
387 | * path. | ||
388 | */ | ||
389 | struct arm_smmu_device *leaf_smmu; | ||
390 | struct arm_smmu_cfg root_cfg; | ||
391 | phys_addr_t output_mask; | ||
392 | |||
393 | spinlock_t lock; | ||
394 | }; | ||
395 | |||
396 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | ||
397 | static LIST_HEAD(arm_smmu_devices); | ||
398 | |||
399 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | ||
400 | struct device_node *dev_node) | ||
401 | { | ||
402 | struct rb_node *node = smmu->masters.rb_node; | ||
403 | |||
404 | while (node) { | ||
405 | struct arm_smmu_master *master; | ||
406 | master = container_of(node, struct arm_smmu_master, node); | ||
407 | |||
408 | if (dev_node < master->of_node) | ||
409 | node = node->rb_left; | ||
410 | else if (dev_node > master->of_node) | ||
411 | node = node->rb_right; | ||
412 | else | ||
413 | return master; | ||
414 | } | ||
415 | |||
416 | return NULL; | ||
417 | } | ||
418 | |||
419 | static int insert_smmu_master(struct arm_smmu_device *smmu, | ||
420 | struct arm_smmu_master *master) | ||
421 | { | ||
422 | struct rb_node **new, *parent; | ||
423 | |||
424 | new = &smmu->masters.rb_node; | ||
425 | parent = NULL; | ||
426 | while (*new) { | ||
427 | struct arm_smmu_master *this; | ||
428 | this = container_of(*new, struct arm_smmu_master, node); | ||
429 | |||
430 | parent = *new; | ||
431 | if (master->of_node < this->of_node) | ||
432 | new = &((*new)->rb_left); | ||
433 | else if (master->of_node > this->of_node) | ||
434 | new = &((*new)->rb_right); | ||
435 | else | ||
436 | return -EEXIST; | ||
437 | } | ||
438 | |||
439 | rb_link_node(&master->node, parent, new); | ||
440 | rb_insert_color(&master->node, &smmu->masters); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int register_smmu_master(struct arm_smmu_device *smmu, | ||
445 | struct device *dev, | ||
446 | struct of_phandle_args *masterspec) | ||
447 | { | ||
448 | int i; | ||
449 | struct arm_smmu_master *master; | ||
450 | |||
451 | master = find_smmu_master(smmu, masterspec->np); | ||
452 | if (master) { | ||
453 | dev_err(dev, | ||
454 | "rejecting multiple registrations for master device %s\n", | ||
455 | masterspec->np->name); | ||
456 | return -EBUSY; | ||
457 | } | ||
458 | |||
459 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | ||
460 | dev_err(dev, | ||
461 | "reached maximum number (%d) of stream IDs for master device %s\n", | ||
462 | MAX_MASTER_STREAMIDS, masterspec->np->name); | ||
463 | return -ENOSPC; | ||
464 | } | ||
465 | |||
466 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | ||
467 | if (!master) | ||
468 | return -ENOMEM; | ||
469 | |||
470 | master->of_node = masterspec->np; | ||
471 | master->num_streamids = masterspec->args_count; | ||
472 | |||
473 | for (i = 0; i < master->num_streamids; ++i) | ||
474 | master->streamids[i] = masterspec->args[i]; | ||
475 | |||
476 | return insert_smmu_master(smmu, master); | ||
477 | } | ||
478 | |||
479 | static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu) | ||
480 | { | ||
481 | struct arm_smmu_device *parent; | ||
482 | |||
483 | if (!smmu->parent_of_node) | ||
484 | return NULL; | ||
485 | |||
486 | spin_lock(&arm_smmu_devices_lock); | ||
487 | list_for_each_entry(parent, &arm_smmu_devices, list) | ||
488 | if (parent->dev->of_node == smmu->parent_of_node) | ||
489 | goto out_unlock; | ||
490 | |||
491 | parent = NULL; | ||
492 | dev_warn(smmu->dev, | ||
493 | "Failed to find SMMU parent despite parent in DT\n"); | ||
494 | out_unlock: | ||
495 | spin_unlock(&arm_smmu_devices_lock); | ||
496 | return parent; | ||
497 | } | ||
498 | |||
499 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | ||
500 | { | ||
501 | int idx; | ||
502 | |||
503 | do { | ||
504 | idx = find_next_zero_bit(map, end, start); | ||
505 | if (idx == end) | ||
506 | return -ENOSPC; | ||
507 | } while (test_and_set_bit(idx, map)); | ||
508 | |||
509 | return idx; | ||
510 | } | ||
511 | |||
512 | static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | ||
513 | { | ||
514 | clear_bit(idx, map); | ||
515 | } | ||
516 | |||
517 | /* Wait for any pending TLB invalidations to complete */ | ||
518 | static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | ||
519 | { | ||
520 | int count = 0; | ||
521 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
522 | |||
523 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | ||
524 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | ||
525 | & sTLBGSTATUS_GSACTIVE) { | ||
526 | cpu_relax(); | ||
527 | if (++count == TLB_LOOP_TIMEOUT) { | ||
528 | dev_err_ratelimited(smmu->dev, | ||
529 | "TLB sync timed out -- SMMU may be deadlocked\n"); | ||
530 | return; | ||
531 | } | ||
532 | udelay(1); | ||
533 | } | ||
534 | } | ||
535 | |||
536 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | ||
537 | { | ||
538 | int flags, ret; | ||
539 | u32 fsr, far, fsynr, resume; | ||
540 | unsigned long iova; | ||
541 | struct iommu_domain *domain = dev; | ||
542 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
543 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
544 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
545 | void __iomem *cb_base; | ||
546 | |||
547 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | ||
548 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); | ||
549 | |||
550 | if (!(fsr & FSR_FAULT)) | ||
551 | return IRQ_NONE; | ||
552 | |||
553 | if (fsr & FSR_IGN) | ||
554 | dev_err_ratelimited(smmu->dev, | ||
555 | "Unexpected context fault (fsr 0x%u)\n", | ||
556 | fsr); | ||
557 | |||
558 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | ||
559 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
560 | |||
561 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); | ||
562 | iova = far; | ||
563 | #ifdef CONFIG_64BIT | ||
564 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); | ||
565 | iova |= ((unsigned long)far << 32); | ||
566 | #endif | ||
567 | |||
568 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | ||
569 | ret = IRQ_HANDLED; | ||
570 | resume = RESUME_RETRY; | ||
571 | } else { | ||
572 | ret = IRQ_NONE; | ||
573 | resume = RESUME_TERMINATE; | ||
574 | } | ||
575 | |||
576 | /* Clear the faulting FSR */ | ||
577 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | ||
578 | |||
579 | /* Retry or terminate any stalled transactions */ | ||
580 | if (fsr & FSR_SS) | ||
581 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | |||
586 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | ||
587 | { | ||
588 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | ||
589 | struct arm_smmu_device *smmu = dev; | ||
590 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
591 | |||
592 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | ||
593 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | ||
594 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | ||
595 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | ||
596 | |||
597 | dev_err_ratelimited(smmu->dev, | ||
598 | "Unexpected global fault, this could be serious\n"); | ||
599 | dev_err_ratelimited(smmu->dev, | ||
600 | "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", | ||
601 | gfsr, gfsynr0, gfsynr1, gfsynr2); | ||
602 | |||
603 | writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); | ||
604 | return IRQ_NONE; | ||
605 | } | ||
606 | |||
607 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | ||
608 | { | ||
609 | u32 reg; | ||
610 | bool stage1; | ||
611 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
612 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
613 | void __iomem *cb_base, *gr0_base, *gr1_base; | ||
614 | |||
615 | gr0_base = ARM_SMMU_GR0(smmu); | ||
616 | gr1_base = ARM_SMMU_GR1(smmu); | ||
617 | stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS; | ||
618 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | ||
619 | |||
620 | /* CBAR */ | ||
621 | reg = root_cfg->cbar | | ||
622 | (root_cfg->vmid << CBAR_VMID_SHIFT); | ||
623 | if (smmu->version == 1) | ||
624 | reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; | ||
625 | |||
626 | /* Use the weakest memory type, so it is overridden by the pte */ | ||
627 | if (stage1) | ||
628 | reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | ||
629 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); | ||
630 | |||
631 | if (smmu->version > 1) { | ||
632 | /* CBA2R */ | ||
633 | #ifdef CONFIG_64BIT | ||
634 | reg = CBA2R_RW64_64BIT; | ||
635 | #else | ||
636 | reg = CBA2R_RW64_32BIT; | ||
637 | #endif | ||
638 | writel_relaxed(reg, | ||
639 | gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx)); | ||
640 | |||
641 | /* TTBCR2 */ | ||
642 | switch (smmu->input_size) { | ||
643 | case 32: | ||
644 | reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); | ||
645 | break; | ||
646 | case 36: | ||
647 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); | ||
648 | break; | ||
649 | case 39: | ||
650 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); | ||
651 | break; | ||
652 | case 42: | ||
653 | reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); | ||
654 | break; | ||
655 | case 44: | ||
656 | reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); | ||
657 | break; | ||
658 | case 48: | ||
659 | reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); | ||
660 | break; | ||
661 | } | ||
662 | |||
663 | switch (smmu->s1_output_size) { | ||
664 | case 32: | ||
665 | reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); | ||
666 | break; | ||
667 | case 36: | ||
668 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); | ||
669 | break; | ||
670 | case 39: | ||
671 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); | ||
672 | break; | ||
673 | case 42: | ||
674 | reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); | ||
675 | break; | ||
676 | case 44: | ||
677 | reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); | ||
678 | break; | ||
679 | case 48: | ||
680 | reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | if (stage1) | ||
685 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); | ||
686 | } | ||
687 | |||
688 | /* TTBR0 */ | ||
689 | reg = __pa(root_cfg->pgd); | ||
690 | #ifndef __BIG_ENDIAN | ||
691 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | ||
692 | reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; | ||
693 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | ||
694 | #else | ||
695 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | ||
696 | reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; | ||
697 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | ||
698 | #endif | ||
699 | |||
700 | /* | ||
701 | * TTBCR | ||
702 | * We use long descriptor, with inner-shareable WBWA tables in TTBR0. | ||
703 | */ | ||
704 | if (smmu->version > 1) { | ||
705 | if (PAGE_SIZE == SZ_4K) | ||
706 | reg = TTBCR_TG0_4K; | ||
707 | else | ||
708 | reg = TTBCR_TG0_64K; | ||
709 | |||
710 | if (!stage1) { | ||
711 | switch (smmu->s2_output_size) { | ||
712 | case 32: | ||
713 | reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); | ||
714 | break; | ||
715 | case 36: | ||
716 | reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); | ||
717 | break; | ||
718 | case 40: | ||
719 | reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); | ||
720 | break; | ||
721 | case 42: | ||
722 | reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); | ||
723 | break; | ||
724 | case 44: | ||
725 | reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); | ||
726 | break; | ||
727 | case 48: | ||
728 | reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); | ||
729 | break; | ||
730 | } | ||
731 | } else { | ||
732 | reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; | ||
733 | } | ||
734 | } else { | ||
735 | reg = 0; | ||
736 | } | ||
737 | |||
738 | reg |= TTBCR_EAE | | ||
739 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | | ||
740 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | | ||
741 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | | ||
742 | (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | ||
743 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
744 | |||
745 | /* MAIR0 (stage-1 only) */ | ||
746 | if (stage1) { | ||
747 | reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | | ||
748 | (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | | ||
749 | (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); | ||
750 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); | ||
751 | } | ||
752 | |||
753 | /* Nuke the TLB */ | ||
754 | writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); | ||
755 | arm_smmu_tlb_sync(smmu); | ||
756 | |||
757 | /* SCTLR */ | ||
758 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | ||
759 | if (stage1) | ||
760 | reg |= SCTLR_S1_ASIDPNE; | ||
761 | #ifdef __BIG_ENDIAN | ||
762 | reg |= SCTLR_E; | ||
763 | #endif | ||
764 | writel(reg, cb_base + ARM_SMMU_CB_SCTLR); | ||
765 | } | ||
766 | |||
767 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | ||
768 | struct device *dev) | ||
769 | { | ||
770 | int irq, ret, start; | ||
771 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
772 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
773 | struct arm_smmu_device *smmu, *parent; | ||
774 | |||
775 | /* | ||
776 | * Walk the SMMU chain to find the root device for this chain. | ||
777 | * We assume that no masters have translations which terminate | ||
778 | * early, and therefore check that the root SMMU does indeed have | ||
779 | * a StreamID for the master in question. | ||
780 | */ | ||
781 | parent = dev->archdata.iommu; | ||
782 | smmu_domain->output_mask = -1; | ||
783 | do { | ||
784 | smmu = parent; | ||
785 | smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1; | ||
786 | } while ((parent = find_parent_smmu(smmu))); | ||
787 | |||
788 | if (!find_smmu_master(smmu, dev->of_node)) { | ||
789 | dev_err(dev, "unable to find root SMMU for device\n"); | ||
790 | return -ENODEV; | ||
791 | } | ||
792 | |||
793 | ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS); | ||
794 | if (IS_ERR_VALUE(ret)) | ||
795 | return ret; | ||
796 | |||
797 | root_cfg->vmid = ret; | ||
798 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | ||
799 | /* | ||
800 | * We will likely want to change this if/when KVM gets | ||
801 | * involved. | ||
802 | */ | ||
803 | root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
804 | start = smmu->num_s2_context_banks; | ||
805 | } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) { | ||
806 | root_cfg->cbar = CBAR_TYPE_S2_TRANS; | ||
807 | start = 0; | ||
808 | } else { | ||
809 | root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
810 | start = smmu->num_s2_context_banks; | ||
811 | } | ||
812 | |||
813 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | ||
814 | smmu->num_context_banks); | ||
815 | if (IS_ERR_VALUE(ret)) | ||
816 | goto out_free_vmid; | ||
817 | |||
818 | root_cfg->cbndx = ret; | ||
819 | |||
820 | if (smmu->version == 1) { | ||
821 | root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); | ||
822 | root_cfg->irptndx %= smmu->num_context_irqs; | ||
823 | } else { | ||
824 | root_cfg->irptndx = root_cfg->cbndx; | ||
825 | } | ||
826 | |||
827 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | ||
828 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | ||
829 | "arm-smmu-context-fault", domain); | ||
830 | if (IS_ERR_VALUE(ret)) { | ||
831 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | ||
832 | root_cfg->irptndx, irq); | ||
833 | root_cfg->irptndx = -1; | ||
834 | goto out_free_context; | ||
835 | } | ||
836 | |||
837 | root_cfg->smmu = smmu; | ||
838 | arm_smmu_init_context_bank(smmu_domain); | ||
839 | return ret; | ||
840 | |||
841 | out_free_context: | ||
842 | __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); | ||
843 | out_free_vmid: | ||
844 | __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid); | ||
845 | return ret; | ||
846 | } | ||
847 | |||
848 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | ||
849 | { | ||
850 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
851 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
852 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
853 | int irq; | ||
854 | |||
855 | if (!smmu) | ||
856 | return; | ||
857 | |||
858 | if (root_cfg->irptndx != -1) { | ||
859 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | ||
860 | free_irq(irq, domain); | ||
861 | } | ||
862 | |||
863 | __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid); | ||
864 | __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); | ||
865 | } | ||
866 | |||
867 | static int arm_smmu_domain_init(struct iommu_domain *domain) | ||
868 | { | ||
869 | struct arm_smmu_domain *smmu_domain; | ||
870 | pgd_t *pgd; | ||
871 | |||
872 | /* | ||
873 | * Allocate the domain and initialise some of its data structures. | ||
874 | * We can't really do anything meaningful until we've added a | ||
875 | * master. | ||
876 | */ | ||
877 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | ||
878 | if (!smmu_domain) | ||
879 | return -ENOMEM; | ||
880 | |||
881 | pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
882 | if (!pgd) | ||
883 | goto out_free_domain; | ||
884 | smmu_domain->root_cfg.pgd = pgd; | ||
885 | |||
886 | spin_lock_init(&smmu_domain->lock); | ||
887 | domain->priv = smmu_domain; | ||
888 | return 0; | ||
889 | |||
890 | out_free_domain: | ||
891 | kfree(smmu_domain); | ||
892 | return -ENOMEM; | ||
893 | } | ||
894 | |||
895 | static void arm_smmu_free_ptes(pmd_t *pmd) | ||
896 | { | ||
897 | pgtable_t table = pmd_pgtable(*pmd); | ||
898 | pgtable_page_dtor(table); | ||
899 | __free_page(table); | ||
900 | } | ||
901 | |||
902 | static void arm_smmu_free_pmds(pud_t *pud) | ||
903 | { | ||
904 | int i; | ||
905 | pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); | ||
906 | |||
907 | pmd = pmd_base; | ||
908 | for (i = 0; i < PTRS_PER_PMD; ++i) { | ||
909 | if (pmd_none(*pmd)) | ||
910 | continue; | ||
911 | |||
912 | arm_smmu_free_ptes(pmd); | ||
913 | pmd++; | ||
914 | } | ||
915 | |||
916 | pmd_free(NULL, pmd_base); | ||
917 | } | ||
918 | |||
919 | static void arm_smmu_free_puds(pgd_t *pgd) | ||
920 | { | ||
921 | int i; | ||
922 | pud_t *pud, *pud_base = pud_offset(pgd, 0); | ||
923 | |||
924 | pud = pud_base; | ||
925 | for (i = 0; i < PTRS_PER_PUD; ++i) { | ||
926 | if (pud_none(*pud)) | ||
927 | continue; | ||
928 | |||
929 | arm_smmu_free_pmds(pud); | ||
930 | pud++; | ||
931 | } | ||
932 | |||
933 | pud_free(NULL, pud_base); | ||
934 | } | ||
935 | |||
936 | static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) | ||
937 | { | ||
938 | int i; | ||
939 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
940 | pgd_t *pgd, *pgd_base = root_cfg->pgd; | ||
941 | |||
942 | /* | ||
943 | * Recursively free the page tables for this domain. We don't | ||
944 | * care about speculative TLB filling, because the TLB will be | ||
945 | * nuked next time this context bank is re-allocated and no devices | ||
946 | * currently map to these tables. | ||
947 | */ | ||
948 | pgd = pgd_base; | ||
949 | for (i = 0; i < PTRS_PER_PGD; ++i) { | ||
950 | if (pgd_none(*pgd)) | ||
951 | continue; | ||
952 | arm_smmu_free_puds(pgd); | ||
953 | pgd++; | ||
954 | } | ||
955 | |||
956 | kfree(pgd_base); | ||
957 | } | ||
958 | |||
959 | static void arm_smmu_domain_destroy(struct iommu_domain *domain) | ||
960 | { | ||
961 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
962 | arm_smmu_destroy_domain_context(domain); | ||
963 | arm_smmu_free_pgtables(smmu_domain); | ||
964 | kfree(smmu_domain); | ||
965 | } | ||
966 | |||
967 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | ||
968 | struct arm_smmu_master *master) | ||
969 | { | ||
970 | int i; | ||
971 | struct arm_smmu_smr *smrs; | ||
972 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
973 | |||
974 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | ||
975 | return 0; | ||
976 | |||
977 | if (master->smrs) | ||
978 | return -EEXIST; | ||
979 | |||
980 | smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL); | ||
981 | if (!smrs) { | ||
982 | dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n", | ||
983 | master->num_streamids, master->of_node->name); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | |||
987 | /* Allocate the SMRs on the root SMMU */ | ||
988 | for (i = 0; i < master->num_streamids; ++i) { | ||
989 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, | ||
990 | smmu->num_mapping_groups); | ||
991 | if (IS_ERR_VALUE(idx)) { | ||
992 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | ||
993 | goto err_free_smrs; | ||
994 | } | ||
995 | |||
996 | smrs[i] = (struct arm_smmu_smr) { | ||
997 | .idx = idx, | ||
998 | .mask = 0, /* We don't currently share SMRs */ | ||
999 | .id = master->streamids[i], | ||
1000 | }; | ||
1001 | } | ||
1002 | |||
1003 | /* It worked! Now, poke the actual hardware */ | ||
1004 | for (i = 0; i < master->num_streamids; ++i) { | ||
1005 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | | ||
1006 | smrs[i].mask << SMR_MASK_SHIFT; | ||
1007 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | ||
1008 | } | ||
1009 | |||
1010 | master->smrs = smrs; | ||
1011 | return 0; | ||
1012 | |||
1013 | err_free_smrs: | ||
1014 | while (--i >= 0) | ||
1015 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | ||
1016 | kfree(smrs); | ||
1017 | return -ENOSPC; | ||
1018 | } | ||
1019 | |||
1020 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | ||
1021 | struct arm_smmu_master *master) | ||
1022 | { | ||
1023 | int i; | ||
1024 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1025 | struct arm_smmu_smr *smrs = master->smrs; | ||
1026 | |||
1027 | /* Invalidate the SMRs before freeing back to the allocator */ | ||
1028 | for (i = 0; i < master->num_streamids; ++i) { | ||
1029 | u8 idx = smrs[i].idx; | ||
1030 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); | ||
1031 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | ||
1032 | } | ||
1033 | |||
1034 | master->smrs = NULL; | ||
1035 | kfree(smrs); | ||
1036 | } | ||
1037 | |||
1038 | static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, | ||
1039 | struct arm_smmu_master *master) | ||
1040 | { | ||
1041 | int i; | ||
1042 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1043 | |||
1044 | for (i = 0; i < master->num_streamids; ++i) { | ||
1045 | u16 sid = master->streamids[i]; | ||
1046 | writel_relaxed(S2CR_TYPE_BYPASS, | ||
1047 | gr0_base + ARM_SMMU_GR0_S2CR(sid)); | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | ||
1052 | struct arm_smmu_master *master) | ||
1053 | { | ||
1054 | int i, ret; | ||
1055 | struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu; | ||
1056 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1057 | |||
1058 | ret = arm_smmu_master_configure_smrs(smmu, master); | ||
1059 | if (ret) | ||
1060 | return ret; | ||
1061 | |||
1062 | /* Bypass the leaves */ | ||
1063 | smmu = smmu_domain->leaf_smmu; | ||
1064 | while ((parent = find_parent_smmu(smmu))) { | ||
1065 | /* | ||
1066 | * We won't have a StreamID match for anything but the root | ||
1067 | * smmu, so we only need to worry about StreamID indexing, | ||
1068 | * where we must install bypass entries in the S2CRs. | ||
1069 | */ | ||
1070 | if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) | ||
1071 | continue; | ||
1072 | |||
1073 | arm_smmu_bypass_stream_mapping(smmu, master); | ||
1074 | smmu = parent; | ||
1075 | } | ||
1076 | |||
1077 | /* Now we're at the root, time to point at our context bank */ | ||
1078 | for (i = 0; i < master->num_streamids; ++i) { | ||
1079 | u32 idx, s2cr; | ||
1080 | idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; | ||
1081 | s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) | | ||
1082 | (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); | ||
1083 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); | ||
1084 | } | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | ||
1090 | struct arm_smmu_master *master) | ||
1091 | { | ||
1092 | struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu; | ||
1093 | |||
1094 | /* | ||
1095 | * We *must* clear the S2CR first, because freeing the SMR means | ||
1096 | * that it can be re-allocated immediately. | ||
1097 | */ | ||
1098 | arm_smmu_bypass_stream_mapping(smmu, master); | ||
1099 | arm_smmu_master_free_smrs(smmu, master); | ||
1100 | } | ||
1101 | |||
1102 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | ||
1103 | { | ||
1104 | int ret = -EINVAL; | ||
1105 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1106 | struct arm_smmu_device *device_smmu = dev->archdata.iommu; | ||
1107 | struct arm_smmu_master *master; | ||
1108 | |||
1109 | if (!device_smmu) { | ||
1110 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); | ||
1111 | return -ENXIO; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Sanity check the domain. We don't currently support domains | ||
1116 | * that cross between different SMMU chains. | ||
1117 | */ | ||
1118 | spin_lock(&smmu_domain->lock); | ||
1119 | if (!smmu_domain->leaf_smmu) { | ||
1120 | /* Now that we have a master, we can finalise the domain */ | ||
1121 | ret = arm_smmu_init_domain_context(domain, dev); | ||
1122 | if (IS_ERR_VALUE(ret)) | ||
1123 | goto err_unlock; | ||
1124 | |||
1125 | smmu_domain->leaf_smmu = device_smmu; | ||
1126 | } else if (smmu_domain->leaf_smmu != device_smmu) { | ||
1127 | dev_err(dev, | ||
1128 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | ||
1129 | dev_name(smmu_domain->leaf_smmu->dev), | ||
1130 | dev_name(device_smmu->dev)); | ||
1131 | goto err_unlock; | ||
1132 | } | ||
1133 | spin_unlock(&smmu_domain->lock); | ||
1134 | |||
1135 | /* Looks ok, so add the device to the domain */ | ||
1136 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | ||
1137 | if (!master) | ||
1138 | return -ENODEV; | ||
1139 | |||
1140 | return arm_smmu_domain_add_master(smmu_domain, master); | ||
1141 | |||
1142 | err_unlock: | ||
1143 | spin_unlock(&smmu_domain->lock); | ||
1144 | return ret; | ||
1145 | } | ||
1146 | |||
1147 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | ||
1148 | { | ||
1149 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1150 | struct arm_smmu_master *master; | ||
1151 | |||
1152 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | ||
1153 | if (master) | ||
1154 | arm_smmu_domain_remove_master(smmu_domain, master); | ||
1155 | } | ||
1156 | |||
1157 | static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, | ||
1158 | size_t size) | ||
1159 | { | ||
1160 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | ||
1161 | |||
1162 | /* | ||
1163 | * If the SMMU can't walk tables in the CPU caches, treat them | ||
1164 | * like non-coherent DMA since we need to flush the new entries | ||
1165 | * all the way out to memory. There's no possibility of recursion | ||
1166 | * here as the SMMU table walker will not be wired through another | ||
1167 | * SMMU. | ||
1168 | */ | ||
1169 | if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) | ||
1170 | dma_map_page(smmu->dev, virt_to_page(addr), offset, size, | ||
1171 | DMA_TO_DEVICE); | ||
1172 | } | ||
1173 | |||
1174 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | ||
1175 | unsigned long end) | ||
1176 | { | ||
1177 | return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && | ||
1178 | (addr + ARM_SMMU_PTE_CONT_SIZE <= end); | ||
1179 | } | ||
1180 | |||
1181 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | ||
1182 | unsigned long addr, unsigned long end, | ||
1183 | unsigned long pfn, int flags, int stage) | ||
1184 | { | ||
1185 | pte_t *pte, *start; | ||
1186 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; | ||
1187 | |||
1188 | if (pmd_none(*pmd)) { | ||
1189 | /* Allocate a new set of tables */ | ||
1190 | pgtable_t table = alloc_page(PGALLOC_GFP); | ||
1191 | if (!table) | ||
1192 | return -ENOMEM; | ||
1193 | |||
1194 | arm_smmu_flush_pgtable(smmu, page_address(table), | ||
1195 | ARM_SMMU_PTE_HWTABLE_SIZE); | ||
1196 | pgtable_page_ctor(table); | ||
1197 | pmd_populate(NULL, pmd, table); | ||
1198 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); | ||
1199 | } | ||
1200 | |||
1201 | if (stage == 1) { | ||
1202 | pteval |= ARM_SMMU_PTE_AP_UNPRIV; | ||
1203 | if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) | ||
1204 | pteval |= ARM_SMMU_PTE_AP_RDONLY; | ||
1205 | |||
1206 | if (flags & IOMMU_CACHE) | ||
1207 | pteval |= (MAIR_ATTR_IDX_CACHE << | ||
1208 | ARM_SMMU_PTE_ATTRINDX_SHIFT); | ||
1209 | } else { | ||
1210 | pteval |= ARM_SMMU_PTE_HAP_FAULT; | ||
1211 | if (flags & IOMMU_READ) | ||
1212 | pteval |= ARM_SMMU_PTE_HAP_READ; | ||
1213 | if (flags & IOMMU_WRITE) | ||
1214 | pteval |= ARM_SMMU_PTE_HAP_WRITE; | ||
1215 | if (flags & IOMMU_CACHE) | ||
1216 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; | ||
1217 | else | ||
1218 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; | ||
1219 | } | ||
1220 | |||
1221 | /* If no access, create a faulting entry to avoid TLB fills */ | ||
1222 | if (!(flags & (IOMMU_READ | IOMMU_WRITE))) | ||
1223 | pteval &= ~ARM_SMMU_PTE_PAGE; | ||
1224 | |||
1225 | pteval |= ARM_SMMU_PTE_SH_IS; | ||
1226 | start = pmd_page_vaddr(*pmd) + pte_index(addr); | ||
1227 | pte = start; | ||
1228 | |||
1229 | /* | ||
1230 | * Install the page table entries. This is fairly complicated | ||
1231 | * since we attempt to make use of the contiguous hint in the | ||
1232 | * ptes where possible. The contiguous hint indicates a series | ||
1233 | * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically | ||
1234 | * contiguous region with the following constraints: | ||
1235 | * | ||
1236 | * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE | ||
1237 | * - Each pte in the region has the contiguous hint bit set | ||
1238 | * | ||
1239 | * This complicates unmapping (also handled by this code, when | ||
1240 | * neither IOMMU_READ or IOMMU_WRITE are set) because it is | ||
1241 | * possible, yet highly unlikely, that a client may unmap only | ||
1242 | * part of a contiguous range. This requires clearing of the | ||
1243 | * contiguous hint bits in the range before installing the new | ||
1244 | * faulting entries. | ||
1245 | * | ||
1246 | * Note that re-mapping an address range without first unmapping | ||
1247 | * it is not supported, so TLB invalidation is not required here | ||
1248 | * and is instead performed at unmap and domain-init time. | ||
1249 | */ | ||
1250 | do { | ||
1251 | int i = 1; | ||
1252 | pteval &= ~ARM_SMMU_PTE_CONT; | ||
1253 | |||
1254 | if (arm_smmu_pte_is_contiguous_range(addr, end)) { | ||
1255 | i = ARM_SMMU_PTE_CONT_ENTRIES; | ||
1256 | pteval |= ARM_SMMU_PTE_CONT; | ||
1257 | } else if (pte_val(*pte) & | ||
1258 | (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { | ||
1259 | int j; | ||
1260 | pte_t *cont_start; | ||
1261 | unsigned long idx = pte_index(addr); | ||
1262 | |||
1263 | idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); | ||
1264 | cont_start = pmd_page_vaddr(*pmd) + idx; | ||
1265 | for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) | ||
1266 | pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT; | ||
1267 | |||
1268 | arm_smmu_flush_pgtable(smmu, cont_start, | ||
1269 | sizeof(*pte) * | ||
1270 | ARM_SMMU_PTE_CONT_ENTRIES); | ||
1271 | } | ||
1272 | |||
1273 | do { | ||
1274 | *pte = pfn_pte(pfn, __pgprot(pteval)); | ||
1275 | } while (pte++, pfn++, addr += PAGE_SIZE, --i); | ||
1276 | } while (addr != end); | ||
1277 | |||
1278 | arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); | ||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | ||
1283 | unsigned long addr, unsigned long end, | ||
1284 | phys_addr_t phys, int flags, int stage) | ||
1285 | { | ||
1286 | int ret; | ||
1287 | pmd_t *pmd; | ||
1288 | unsigned long next, pfn = __phys_to_pfn(phys); | ||
1289 | |||
1290 | #ifndef __PAGETABLE_PMD_FOLDED | ||
1291 | if (pud_none(*pud)) { | ||
1292 | pmd = pmd_alloc_one(NULL, addr); | ||
1293 | if (!pmd) | ||
1294 | return -ENOMEM; | ||
1295 | } else | ||
1296 | #endif | ||
1297 | pmd = pmd_offset(pud, addr); | ||
1298 | |||
1299 | do { | ||
1300 | next = pmd_addr_end(addr, end); | ||
1301 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, | ||
1302 | flags, stage); | ||
1303 | pud_populate(NULL, pud, pmd); | ||
1304 | arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); | ||
1305 | phys += next - addr; | ||
1306 | } while (pmd++, addr = next, addr < end); | ||
1307 | |||
1308 | return ret; | ||
1309 | } | ||
1310 | |||
1311 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | ||
1312 | unsigned long addr, unsigned long end, | ||
1313 | phys_addr_t phys, int flags, int stage) | ||
1314 | { | ||
1315 | int ret = 0; | ||
1316 | pud_t *pud; | ||
1317 | unsigned long next; | ||
1318 | |||
1319 | #ifndef __PAGETABLE_PUD_FOLDED | ||
1320 | if (pgd_none(*pgd)) { | ||
1321 | pud = pud_alloc_one(NULL, addr); | ||
1322 | if (!pud) | ||
1323 | return -ENOMEM; | ||
1324 | } else | ||
1325 | #endif | ||
1326 | pud = pud_offset(pgd, addr); | ||
1327 | |||
1328 | do { | ||
1329 | next = pud_addr_end(addr, end); | ||
1330 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, | ||
1331 | flags, stage); | ||
1332 | pgd_populate(NULL, pud, pgd); | ||
1333 | arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); | ||
1334 | phys += next - addr; | ||
1335 | } while (pud++, addr = next, addr < end); | ||
1336 | |||
1337 | return ret; | ||
1338 | } | ||
1339 | |||
1340 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | ||
1341 | unsigned long iova, phys_addr_t paddr, | ||
1342 | size_t size, int flags) | ||
1343 | { | ||
1344 | int ret, stage; | ||
1345 | unsigned long end; | ||
1346 | phys_addr_t input_mask, output_mask; | ||
1347 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
1348 | pgd_t *pgd = root_cfg->pgd; | ||
1349 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1350 | |||
1351 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { | ||
1352 | stage = 2; | ||
1353 | output_mask = (1ULL << smmu->s2_output_size) - 1; | ||
1354 | } else { | ||
1355 | stage = 1; | ||
1356 | output_mask = (1ULL << smmu->s1_output_size) - 1; | ||
1357 | } | ||
1358 | |||
1359 | if (!pgd) | ||
1360 | return -EINVAL; | ||
1361 | |||
1362 | if (size & ~PAGE_MASK) | ||
1363 | return -EINVAL; | ||
1364 | |||
1365 | input_mask = (1ULL << smmu->input_size) - 1; | ||
1366 | if ((phys_addr_t)iova & ~input_mask) | ||
1367 | return -ERANGE; | ||
1368 | |||
1369 | if (paddr & ~output_mask) | ||
1370 | return -ERANGE; | ||
1371 | |||
1372 | spin_lock(&smmu_domain->lock); | ||
1373 | pgd += pgd_index(iova); | ||
1374 | end = iova + size; | ||
1375 | do { | ||
1376 | unsigned long next = pgd_addr_end(iova, end); | ||
1377 | |||
1378 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, | ||
1379 | flags, stage); | ||
1380 | if (ret) | ||
1381 | goto out_unlock; | ||
1382 | |||
1383 | paddr += next - iova; | ||
1384 | iova = next; | ||
1385 | } while (pgd++, iova != end); | ||
1386 | |||
1387 | out_unlock: | ||
1388 | spin_unlock(&smmu_domain->lock); | ||
1389 | |||
1390 | /* Ensure new page tables are visible to the hardware walker */ | ||
1391 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) | ||
1392 | dsb(); | ||
1393 | |||
1394 | return ret; | ||
1395 | } | ||
1396 | |||
1397 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | ||
1398 | phys_addr_t paddr, size_t size, int flags) | ||
1399 | { | ||
1400 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1401 | struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; | ||
1402 | |||
1403 | if (!smmu_domain || !smmu) | ||
1404 | return -ENODEV; | ||
1405 | |||
1406 | /* Check for silent address truncation up the SMMU chain. */ | ||
1407 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) | ||
1408 | return -ERANGE; | ||
1409 | |||
1410 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); | ||
1411 | } | ||
1412 | |||
1413 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | ||
1414 | size_t size) | ||
1415 | { | ||
1416 | int ret; | ||
1417 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1418 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
1419 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1420 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1421 | |||
1422 | ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); | ||
1423 | writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); | ||
1424 | arm_smmu_tlb_sync(smmu); | ||
1425 | return ret ? ret : size; | ||
1426 | } | ||
1427 | |||
1428 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | ||
1429 | dma_addr_t iova) | ||
1430 | { | ||
1431 | pgd_t *pgd; | ||
1432 | pud_t *pud; | ||
1433 | pmd_t *pmd; | ||
1434 | pte_t *pte; | ||
1435 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1436 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
1437 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1438 | |||
1439 | spin_lock(&smmu_domain->lock); | ||
1440 | pgd = root_cfg->pgd; | ||
1441 | if (!pgd) | ||
1442 | goto err_unlock; | ||
1443 | |||
1444 | pgd += pgd_index(iova); | ||
1445 | if (pgd_none_or_clear_bad(pgd)) | ||
1446 | goto err_unlock; | ||
1447 | |||
1448 | pud = pud_offset(pgd, iova); | ||
1449 | if (pud_none_or_clear_bad(pud)) | ||
1450 | goto err_unlock; | ||
1451 | |||
1452 | pmd = pmd_offset(pud, iova); | ||
1453 | if (pmd_none_or_clear_bad(pmd)) | ||
1454 | goto err_unlock; | ||
1455 | |||
1456 | pte = pmd_page_vaddr(*pmd) + pte_index(iova); | ||
1457 | if (pte_none(pte)) | ||
1458 | goto err_unlock; | ||
1459 | |||
1460 | spin_unlock(&smmu_domain->lock); | ||
1461 | return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); | ||
1462 | |||
1463 | err_unlock: | ||
1464 | spin_unlock(&smmu_domain->lock); | ||
1465 | dev_warn(smmu->dev, | ||
1466 | "invalid (corrupt?) page tables detected for iova 0x%llx\n", | ||
1467 | (unsigned long long)iova); | ||
1468 | return -EINVAL; | ||
1469 | } | ||
1470 | |||
1471 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, | ||
1472 | unsigned long cap) | ||
1473 | { | ||
1474 | unsigned long caps = 0; | ||
1475 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1476 | |||
1477 | if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) | ||
1478 | caps |= IOMMU_CAP_CACHE_COHERENCY; | ||
1479 | |||
1480 | return !!(cap & caps); | ||
1481 | } | ||
1482 | |||
1483 | static int arm_smmu_add_device(struct device *dev) | ||
1484 | { | ||
1485 | struct arm_smmu_device *child, *parent, *smmu; | ||
1486 | struct arm_smmu_master *master = NULL; | ||
1487 | |||
1488 | spin_lock(&arm_smmu_devices_lock); | ||
1489 | list_for_each_entry(parent, &arm_smmu_devices, list) { | ||
1490 | smmu = parent; | ||
1491 | |||
1492 | /* Try to find a child of the current SMMU. */ | ||
1493 | list_for_each_entry(child, &arm_smmu_devices, list) { | ||
1494 | if (child->parent_of_node == parent->dev->of_node) { | ||
1495 | /* Does the child sit above our master? */ | ||
1496 | master = find_smmu_master(child, dev->of_node); | ||
1497 | if (master) { | ||
1498 | smmu = NULL; | ||
1499 | break; | ||
1500 | } | ||
1501 | } | ||
1502 | } | ||
1503 | |||
1504 | /* We found some children, so keep searching. */ | ||
1505 | if (!smmu) { | ||
1506 | master = NULL; | ||
1507 | continue; | ||
1508 | } | ||
1509 | |||
1510 | master = find_smmu_master(smmu, dev->of_node); | ||
1511 | if (master) | ||
1512 | break; | ||
1513 | } | ||
1514 | spin_unlock(&arm_smmu_devices_lock); | ||
1515 | |||
1516 | if (!master) | ||
1517 | return -ENODEV; | ||
1518 | |||
1519 | dev->archdata.iommu = smmu; | ||
1520 | return 0; | ||
1521 | } | ||
1522 | |||
1523 | static void arm_smmu_remove_device(struct device *dev) | ||
1524 | { | ||
1525 | dev->archdata.iommu = NULL; | ||
1526 | } | ||
1527 | |||
1528 | static struct iommu_ops arm_smmu_ops = { | ||
1529 | .domain_init = arm_smmu_domain_init, | ||
1530 | .domain_destroy = arm_smmu_domain_destroy, | ||
1531 | .attach_dev = arm_smmu_attach_dev, | ||
1532 | .detach_dev = arm_smmu_detach_dev, | ||
1533 | .map = arm_smmu_map, | ||
1534 | .unmap = arm_smmu_unmap, | ||
1535 | .iova_to_phys = arm_smmu_iova_to_phys, | ||
1536 | .domain_has_cap = arm_smmu_domain_has_cap, | ||
1537 | .add_device = arm_smmu_add_device, | ||
1538 | .remove_device = arm_smmu_remove_device, | ||
1539 | .pgsize_bitmap = (SECTION_SIZE | | ||
1540 | ARM_SMMU_PTE_CONT_SIZE | | ||
1541 | PAGE_SIZE), | ||
1542 | }; | ||
1543 | |||
1544 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | ||
1545 | { | ||
1546 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1547 | int i = 0; | ||
1548 | u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); | ||
1549 | |||
1550 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | ||
1551 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | ||
1552 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); | ||
1553 | writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); | ||
1554 | } | ||
1555 | |||
1556 | /* Invalidate the TLB, just in case */ | ||
1557 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); | ||
1558 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); | ||
1559 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | ||
1560 | |||
1561 | /* Enable fault reporting */ | ||
1562 | scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); | ||
1563 | |||
1564 | /* Disable TLB broadcasting. */ | ||
1565 | scr0 |= (sCR0_VMIDPNE | sCR0_PTM); | ||
1566 | |||
1567 | /* Enable client access, but bypass when no mapping is found */ | ||
1568 | scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); | ||
1569 | |||
1570 | /* Disable forced broadcasting */ | ||
1571 | scr0 &= ~sCR0_FB; | ||
1572 | |||
1573 | /* Don't upgrade barriers */ | ||
1574 | scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); | ||
1575 | |||
1576 | /* Push the button */ | ||
1577 | arm_smmu_tlb_sync(smmu); | ||
1578 | writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); | ||
1579 | } | ||
1580 | |||
1581 | static int arm_smmu_id_size_to_bits(int size) | ||
1582 | { | ||
1583 | switch (size) { | ||
1584 | case 0: | ||
1585 | return 32; | ||
1586 | case 1: | ||
1587 | return 36; | ||
1588 | case 2: | ||
1589 | return 40; | ||
1590 | case 3: | ||
1591 | return 42; | ||
1592 | case 4: | ||
1593 | return 44; | ||
1594 | case 5: | ||
1595 | default: | ||
1596 | return 48; | ||
1597 | } | ||
1598 | } | ||
1599 | |||
1600 | static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | ||
1601 | { | ||
1602 | unsigned long size; | ||
1603 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1604 | u32 id; | ||
1605 | |||
1606 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | ||
1607 | |||
1608 | /* Primecell ID */ | ||
1609 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2); | ||
1610 | smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1; | ||
1611 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); | ||
1612 | |||
1613 | /* ID0 */ | ||
1614 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | ||
1615 | #ifndef CONFIG_64BIT | ||
1616 | if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { | ||
1617 | dev_err(smmu->dev, "\tno v7 descriptor support!\n"); | ||
1618 | return -ENODEV; | ||
1619 | } | ||
1620 | #endif | ||
1621 | if (id & ID0_S1TS) { | ||
1622 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | ||
1623 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | ||
1624 | } | ||
1625 | |||
1626 | if (id & ID0_S2TS) { | ||
1627 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; | ||
1628 | dev_notice(smmu->dev, "\tstage 2 translation\n"); | ||
1629 | } | ||
1630 | |||
1631 | if (id & ID0_NTS) { | ||
1632 | smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; | ||
1633 | dev_notice(smmu->dev, "\tnested translation\n"); | ||
1634 | } | ||
1635 | |||
1636 | if (!(smmu->features & | ||
1637 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 | | ||
1638 | ARM_SMMU_FEAT_TRANS_NESTED))) { | ||
1639 | dev_err(smmu->dev, "\tno translation support!\n"); | ||
1640 | return -ENODEV; | ||
1641 | } | ||
1642 | |||
1643 | if (id & ID0_CTTW) { | ||
1644 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; | ||
1645 | dev_notice(smmu->dev, "\tcoherent table walk\n"); | ||
1646 | } | ||
1647 | |||
1648 | if (id & ID0_SMS) { | ||
1649 | u32 smr, sid, mask; | ||
1650 | |||
1651 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | ||
1652 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | ||
1653 | ID0_NUMSMRG_MASK; | ||
1654 | if (smmu->num_mapping_groups == 0) { | ||
1655 | dev_err(smmu->dev, | ||
1656 | "stream-matching supported, but no SMRs present!\n"); | ||
1657 | return -ENODEV; | ||
1658 | } | ||
1659 | |||
1660 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | ||
1661 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | ||
1662 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1663 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1664 | |||
1665 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | ||
1666 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | ||
1667 | if ((mask & sid) != sid) { | ||
1668 | dev_err(smmu->dev, | ||
1669 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | ||
1670 | mask, sid); | ||
1671 | return -ENODEV; | ||
1672 | } | ||
1673 | |||
1674 | dev_notice(smmu->dev, | ||
1675 | "\tstream matching with %u register groups, mask 0x%x", | ||
1676 | smmu->num_mapping_groups, mask); | ||
1677 | } | ||
1678 | |||
1679 | /* ID1 */ | ||
1680 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | ||
1681 | smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; | ||
1682 | |||
1683 | /* Check that we ioremapped enough */ | ||
1684 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | ||
1685 | size *= (smmu->pagesize << 1); | ||
1686 | if (smmu->size < size) | ||
1687 | dev_warn(smmu->dev, | ||
1688 | "device is 0x%lx bytes but only mapped 0x%lx!\n", | ||
1689 | size, smmu->size); | ||
1690 | |||
1691 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & | ||
1692 | ID1_NUMS2CB_MASK; | ||
1693 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; | ||
1694 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | ||
1695 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | ||
1696 | return -ENODEV; | ||
1697 | } | ||
1698 | dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", | ||
1699 | smmu->num_context_banks, smmu->num_s2_context_banks); | ||
1700 | |||
1701 | /* ID2 */ | ||
1702 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | ||
1703 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | ||
1704 | |||
1705 | /* | ||
1706 | * Stage-1 output limited by stage-2 input size due to pgd | ||
1707 | * allocation (PTRS_PER_PGD). | ||
1708 | */ | ||
1709 | #ifdef CONFIG_64BIT | ||
1710 | /* Current maximum output size of 39 bits */ | ||
1711 | smmu->s1_output_size = min(39UL, size); | ||
1712 | #else | ||
1713 | smmu->s1_output_size = min(32UL, size); | ||
1714 | #endif | ||
1715 | |||
1716 | /* The stage-2 output mask is also applied for bypass */ | ||
1717 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | ||
1718 | smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size); | ||
1719 | |||
1720 | if (smmu->version == 1) { | ||
1721 | smmu->input_size = 32; | ||
1722 | } else { | ||
1723 | #ifdef CONFIG_64BIT | ||
1724 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; | ||
1725 | size = min(39, arm_smmu_id_size_to_bits(size)); | ||
1726 | #else | ||
1727 | size = 32; | ||
1728 | #endif | ||
1729 | smmu->input_size = size; | ||
1730 | |||
1731 | if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || | ||
1732 | (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || | ||
1733 | (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { | ||
1734 | dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", | ||
1735 | PAGE_SIZE); | ||
1736 | return -ENODEV; | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1740 | dev_notice(smmu->dev, | ||
1741 | "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", | ||
1742 | smmu->input_size, smmu->s1_output_size, smmu->s2_output_size); | ||
1743 | return 0; | ||
1744 | } | ||
1745 | |||
1746 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) | ||
1747 | { | ||
1748 | struct resource *res; | ||
1749 | struct arm_smmu_device *smmu; | ||
1750 | struct device_node *dev_node; | ||
1751 | struct device *dev = &pdev->dev; | ||
1752 | struct rb_node *node; | ||
1753 | struct of_phandle_args masterspec; | ||
1754 | int num_irqs, i, err; | ||
1755 | |||
1756 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | ||
1757 | if (!smmu) { | ||
1758 | dev_err(dev, "failed to allocate arm_smmu_device\n"); | ||
1759 | return -ENOMEM; | ||
1760 | } | ||
1761 | smmu->dev = dev; | ||
1762 | |||
1763 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1764 | if (!res) { | ||
1765 | dev_err(dev, "missing base address/size\n"); | ||
1766 | return -ENODEV; | ||
1767 | } | ||
1768 | |||
1769 | smmu->size = resource_size(res); | ||
1770 | smmu->base = devm_request_and_ioremap(dev, res); | ||
1771 | if (!smmu->base) | ||
1772 | return -EADDRNOTAVAIL; | ||
1773 | |||
1774 | if (of_property_read_u32(dev->of_node, "#global-interrupts", | ||
1775 | &smmu->num_global_irqs)) { | ||
1776 | dev_err(dev, "missing #global-interrupts property\n"); | ||
1777 | return -ENODEV; | ||
1778 | } | ||
1779 | |||
1780 | num_irqs = 0; | ||
1781 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | ||
1782 | num_irqs++; | ||
1783 | if (num_irqs > smmu->num_global_irqs) | ||
1784 | smmu->num_context_irqs++; | ||
1785 | } | ||
1786 | |||
1787 | if (num_irqs < smmu->num_global_irqs) { | ||
1788 | dev_warn(dev, "found %d interrupts but expected at least %d\n", | ||
1789 | num_irqs, smmu->num_global_irqs); | ||
1790 | smmu->num_global_irqs = num_irqs; | ||
1791 | } | ||
1792 | smmu->num_context_irqs = num_irqs - smmu->num_global_irqs; | ||
1793 | |||
1794 | smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, | ||
1795 | GFP_KERNEL); | ||
1796 | if (!smmu->irqs) { | ||
1797 | dev_err(dev, "failed to allocate %d irqs\n", num_irqs); | ||
1798 | return -ENOMEM; | ||
1799 | } | ||
1800 | |||
1801 | for (i = 0; i < num_irqs; ++i) { | ||
1802 | int irq = platform_get_irq(pdev, i); | ||
1803 | if (irq < 0) { | ||
1804 | dev_err(dev, "failed to get irq index %d\n", i); | ||
1805 | return -ENODEV; | ||
1806 | } | ||
1807 | smmu->irqs[i] = irq; | ||
1808 | } | ||
1809 | |||
1810 | i = 0; | ||
1811 | smmu->masters = RB_ROOT; | ||
1812 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | ||
1813 | "#stream-id-cells", i, | ||
1814 | &masterspec)) { | ||
1815 | err = register_smmu_master(smmu, dev, &masterspec); | ||
1816 | if (err) { | ||
1817 | dev_err(dev, "failed to add master %s\n", | ||
1818 | masterspec.np->name); | ||
1819 | goto out_put_masters; | ||
1820 | } | ||
1821 | |||
1822 | i++; | ||
1823 | } | ||
1824 | dev_notice(dev, "registered %d master devices\n", i); | ||
1825 | |||
1826 | if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0))) | ||
1827 | smmu->parent_of_node = dev_node; | ||
1828 | |||
1829 | err = arm_smmu_device_cfg_probe(smmu); | ||
1830 | if (err) | ||
1831 | goto out_put_parent; | ||
1832 | |||
1833 | if (smmu->version > 1 && | ||
1834 | smmu->num_context_banks != smmu->num_context_irqs) { | ||
1835 | dev_err(dev, | ||
1836 | "found only %d context interrupt(s) but %d required\n", | ||
1837 | smmu->num_context_irqs, smmu->num_context_banks); | ||
1838 | goto out_put_parent; | ||
1839 | } | ||
1840 | |||
1841 | arm_smmu_device_reset(smmu); | ||
1842 | |||
1843 | for (i = 0; i < smmu->num_global_irqs; ++i) { | ||
1844 | err = request_irq(smmu->irqs[i], | ||
1845 | arm_smmu_global_fault, | ||
1846 | IRQF_SHARED, | ||
1847 | "arm-smmu global fault", | ||
1848 | smmu); | ||
1849 | if (err) { | ||
1850 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | ||
1851 | i, smmu->irqs[i]); | ||
1852 | goto out_free_irqs; | ||
1853 | } | ||
1854 | } | ||
1855 | |||
1856 | INIT_LIST_HEAD(&smmu->list); | ||
1857 | spin_lock(&arm_smmu_devices_lock); | ||
1858 | list_add(&smmu->list, &arm_smmu_devices); | ||
1859 | spin_unlock(&arm_smmu_devices_lock); | ||
1860 | return 0; | ||
1861 | |||
1862 | out_free_irqs: | ||
1863 | while (i--) | ||
1864 | free_irq(smmu->irqs[i], smmu); | ||
1865 | |||
1866 | out_put_parent: | ||
1867 | if (smmu->parent_of_node) | ||
1868 | of_node_put(smmu->parent_of_node); | ||
1869 | |||
1870 | out_put_masters: | ||
1871 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | ||
1872 | struct arm_smmu_master *master; | ||
1873 | master = container_of(node, struct arm_smmu_master, node); | ||
1874 | of_node_put(master->of_node); | ||
1875 | } | ||
1876 | |||
1877 | return err; | ||
1878 | } | ||
1879 | |||
1880 | static int arm_smmu_device_remove(struct platform_device *pdev) | ||
1881 | { | ||
1882 | int i; | ||
1883 | struct device *dev = &pdev->dev; | ||
1884 | struct arm_smmu_device *curr, *smmu = NULL; | ||
1885 | struct rb_node *node; | ||
1886 | |||
1887 | spin_lock(&arm_smmu_devices_lock); | ||
1888 | list_for_each_entry(curr, &arm_smmu_devices, list) { | ||
1889 | if (curr->dev == dev) { | ||
1890 | smmu = curr; | ||
1891 | list_del(&smmu->list); | ||
1892 | break; | ||
1893 | } | ||
1894 | } | ||
1895 | spin_unlock(&arm_smmu_devices_lock); | ||
1896 | |||
1897 | if (!smmu) | ||
1898 | return -ENODEV; | ||
1899 | |||
1900 | if (smmu->parent_of_node) | ||
1901 | of_node_put(smmu->parent_of_node); | ||
1902 | |||
1903 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | ||
1904 | struct arm_smmu_master *master; | ||
1905 | master = container_of(node, struct arm_smmu_master, node); | ||
1906 | of_node_put(master->of_node); | ||
1907 | } | ||
1908 | |||
1909 | if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS)) | ||
1910 | dev_err(dev, "removing device with active domains!\n"); | ||
1911 | |||
1912 | for (i = 0; i < smmu->num_global_irqs; ++i) | ||
1913 | free_irq(smmu->irqs[i], smmu); | ||
1914 | |||
1915 | /* Turn the thing off */ | ||
1916 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); | ||
1917 | return 0; | ||
1918 | } | ||
1919 | |||
1920 | #ifdef CONFIG_OF | ||
1921 | static struct of_device_id arm_smmu_of_match[] = { | ||
1922 | { .compatible = "arm,smmu-v1", }, | ||
1923 | { .compatible = "arm,smmu-v2", }, | ||
1924 | { .compatible = "arm,mmu-400", }, | ||
1925 | { .compatible = "arm,mmu-500", }, | ||
1926 | { }, | ||
1927 | }; | ||
1928 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | ||
1929 | #endif | ||
1930 | |||
1931 | static struct platform_driver arm_smmu_driver = { | ||
1932 | .driver = { | ||
1933 | .owner = THIS_MODULE, | ||
1934 | .name = "arm-smmu", | ||
1935 | .of_match_table = of_match_ptr(arm_smmu_of_match), | ||
1936 | }, | ||
1937 | .probe = arm_smmu_device_dt_probe, | ||
1938 | .remove = arm_smmu_device_remove, | ||
1939 | }; | ||
1940 | |||
1941 | static int __init arm_smmu_init(void) | ||
1942 | { | ||
1943 | int ret; | ||
1944 | |||
1945 | ret = platform_driver_register(&arm_smmu_driver); | ||
1946 | if (ret) | ||
1947 | return ret; | ||
1948 | |||
1949 | /* Oh, for a proper bus abstraction */ | ||
1950 | if (!iommu_present(&platform_bus_type)); | ||
1951 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | ||
1952 | |||
1953 | if (!iommu_present(&amba_bustype)); | ||
1954 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | ||
1955 | |||
1956 | return 0; | ||
1957 | } | ||
1958 | |||
1959 | static void __exit arm_smmu_exit(void) | ||
1960 | { | ||
1961 | return platform_driver_unregister(&arm_smmu_driver); | ||
1962 | } | ||
1963 | |||
1964 | module_init(arm_smmu_init); | ||
1965 | module_exit(arm_smmu_exit); | ||
1966 | |||
1967 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | ||
1968 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | ||
1969 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index a7967ceb79e6..785675a56a10 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -309,6 +309,7 @@ parse_dmar_table(void) | |||
309 | struct acpi_table_dmar *dmar; | 309 | struct acpi_table_dmar *dmar; |
310 | struct acpi_dmar_header *entry_header; | 310 | struct acpi_dmar_header *entry_header; |
311 | int ret = 0; | 311 | int ret = 0; |
312 | int drhd_count = 0; | ||
312 | 313 | ||
313 | /* | 314 | /* |
314 | * Do it again, earlier dmar_tbl mapping could be mapped with | 315 | * Do it again, earlier dmar_tbl mapping could be mapped with |
@@ -347,6 +348,7 @@ parse_dmar_table(void) | |||
347 | 348 | ||
348 | switch (entry_header->type) { | 349 | switch (entry_header->type) { |
349 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | 350 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
351 | drhd_count++; | ||
350 | ret = dmar_parse_one_drhd(entry_header); | 352 | ret = dmar_parse_one_drhd(entry_header); |
351 | break; | 353 | break; |
352 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 354 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
@@ -371,6 +373,8 @@ parse_dmar_table(void) | |||
371 | 373 | ||
372 | entry_header = ((void *)entry_header + entry_header->length); | 374 | entry_header = ((void *)entry_header + entry_header->length); |
373 | } | 375 | } |
376 | if (drhd_count == 0) | ||
377 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); | ||
374 | return ret; | 378 | return ret; |
375 | } | 379 | } |
376 | 380 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b4f0e28dfa41..eec0d3e04bf5 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev) | |||
4182 | 4182 | ||
4183 | /* | 4183 | /* |
4184 | * If it's a multifunction device that does not support our | 4184 | * If it's a multifunction device that does not support our |
4185 | * required ACS flags, add to the same group as function 0. | 4185 | * required ACS flags, add to the same group as lowest numbered |
4186 | * function that also does not suport the required ACS flags. | ||
4186 | */ | 4187 | */ |
4187 | if (dma_pdev->multifunction && | 4188 | if (dma_pdev->multifunction && |
4188 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) | 4189 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { |
4189 | swap_pci_ref(&dma_pdev, | 4190 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); |
4190 | pci_get_slot(dma_pdev->bus, | 4191 | |
4191 | PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), | 4192 | for (i = 0; i < 8; i++) { |
4192 | 0))); | 4193 | struct pci_dev *tmp; |
4194 | |||
4195 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | ||
4196 | if (!tmp) | ||
4197 | continue; | ||
4198 | |||
4199 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
4200 | swap_pci_ref(&dma_pdev, tmp); | ||
4201 | break; | ||
4202 | } | ||
4203 | pci_dev_put(tmp); | ||
4204 | } | ||
4205 | } | ||
4193 | 4206 | ||
4194 | /* | 4207 | /* |
4195 | * Devices on the root bus go through the iommu. If that's not us, | 4208 | * Devices on the root bus go through the iommu. If that's not us, |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 5b19b2d6ec2d..f71673dbb23d 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -664,8 +664,7 @@ error: | |||
664 | */ | 664 | */ |
665 | 665 | ||
666 | if (x2apic_present) | 666 | if (x2apic_present) |
667 | WARN(1, KERN_WARNING | 667 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); |
668 | "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); | ||
669 | 668 | ||
670 | return -1; | 669 | return -1; |
671 | } | 670 | } |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d8f98b14e2fe..fbe9ca734f8f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain, | |||
754 | } | 754 | } |
755 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 755 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
756 | 756 | ||
757 | static size_t iommu_pgsize(struct iommu_domain *domain, | ||
758 | unsigned long addr_merge, size_t size) | ||
759 | { | ||
760 | unsigned int pgsize_idx; | ||
761 | size_t pgsize; | ||
762 | |||
763 | /* Max page size that still fits into 'size' */ | ||
764 | pgsize_idx = __fls(size); | ||
765 | |||
766 | /* need to consider alignment requirements ? */ | ||
767 | if (likely(addr_merge)) { | ||
768 | /* Max page size allowed by address */ | ||
769 | unsigned int align_pgsize_idx = __ffs(addr_merge); | ||
770 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | ||
771 | } | ||
772 | |||
773 | /* build a mask of acceptable page sizes */ | ||
774 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | ||
775 | |||
776 | /* throw away page sizes not supported by the hardware */ | ||
777 | pgsize &= domain->ops->pgsize_bitmap; | ||
778 | |||
779 | /* make sure we're still sane */ | ||
780 | BUG_ON(!pgsize); | ||
781 | |||
782 | /* pick the biggest page */ | ||
783 | pgsize_idx = __fls(pgsize); | ||
784 | pgsize = 1UL << pgsize_idx; | ||
785 | |||
786 | return pgsize; | ||
787 | } | ||
788 | |||
757 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | 789 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
758 | phys_addr_t paddr, size_t size, int prot) | 790 | phys_addr_t paddr, size_t size, int prot) |
759 | { | 791 | { |
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
775 | * size of the smallest page supported by the hardware | 807 | * size of the smallest page supported by the hardware |
776 | */ | 808 | */ |
777 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { | 809 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { |
778 | pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " | 810 | pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", |
779 | "0x%x\n", iova, (unsigned long)paddr, | 811 | iova, &paddr, size, min_pagesz); |
780 | (unsigned long)size, min_pagesz); | ||
781 | return -EINVAL; | 812 | return -EINVAL; |
782 | } | 813 | } |
783 | 814 | ||
784 | pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, | 815 | pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); |
785 | (unsigned long)paddr, (unsigned long)size); | ||
786 | 816 | ||
787 | while (size) { | 817 | while (size) { |
788 | unsigned long pgsize, addr_merge = iova | paddr; | 818 | size_t pgsize = iommu_pgsize(domain, iova | paddr, size); |
789 | unsigned int pgsize_idx; | ||
790 | |||
791 | /* Max page size that still fits into 'size' */ | ||
792 | pgsize_idx = __fls(size); | ||
793 | |||
794 | /* need to consider alignment requirements ? */ | ||
795 | if (likely(addr_merge)) { | ||
796 | /* Max page size allowed by both iova and paddr */ | ||
797 | unsigned int align_pgsize_idx = __ffs(addr_merge); | ||
798 | |||
799 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | ||
800 | } | ||
801 | |||
802 | /* build a mask of acceptable page sizes */ | ||
803 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | ||
804 | |||
805 | /* throw away page sizes not supported by the hardware */ | ||
806 | pgsize &= domain->ops->pgsize_bitmap; | ||
807 | |||
808 | /* make sure we're still sane */ | ||
809 | BUG_ON(!pgsize); | ||
810 | |||
811 | /* pick the biggest page */ | ||
812 | pgsize_idx = __fls(pgsize); | ||
813 | pgsize = 1UL << pgsize_idx; | ||
814 | 819 | ||
815 | pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, | 820 | pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", |
816 | (unsigned long)paddr, pgsize); | 821 | iova, &paddr, pgsize); |
817 | 822 | ||
818 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); | 823 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); |
819 | if (ret) | 824 | if (ret) |
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | |||
850 | * by the hardware | 855 | * by the hardware |
851 | */ | 856 | */ |
852 | if (!IS_ALIGNED(iova | size, min_pagesz)) { | 857 | if (!IS_ALIGNED(iova | size, min_pagesz)) { |
853 | pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", | 858 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", |
854 | iova, (unsigned long)size, min_pagesz); | 859 | iova, size, min_pagesz); |
855 | return -EINVAL; | 860 | return -EINVAL; |
856 | } | 861 | } |
857 | 862 | ||
858 | pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, | 863 | pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); |
859 | (unsigned long)size); | ||
860 | 864 | ||
861 | /* | 865 | /* |
862 | * Keep iterating until we either unmap 'size' bytes (or more) | 866 | * Keep iterating until we either unmap 'size' bytes (or more) |
863 | * or we hit an area that isn't mapped. | 867 | * or we hit an area that isn't mapped. |
864 | */ | 868 | */ |
865 | while (unmapped < size) { | 869 | while (unmapped < size) { |
866 | size_t left = size - unmapped; | 870 | size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); |
867 | 871 | ||
868 | unmapped_page = domain->ops->unmap(domain, iova, left); | 872 | unmapped_page = domain->ops->unmap(domain, iova, pgsize); |
869 | if (!unmapped_page) | 873 | if (!unmapped_page) |
870 | break; | 874 | break; |
871 | 875 | ||
872 | pr_debug("unmapped: iova 0x%lx size %lx\n", iova, | 876 | pr_debug("unmapped: iova 0x%lx size 0x%zx\n", |
873 | (unsigned long)unmapped_page); | 877 | iova, unmapped_page); |
874 | 878 | ||
875 | iova += unmapped_page; | 879 | iova += unmapped_page; |
876 | unmapped += unmapped_page; | 880 | unmapped += unmapped_page; |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index e02e5d71745b..0ba3766240d5 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
833 | iopgd = iopgd_offset(obj, da); | 833 | iopgd = iopgd_offset(obj, da); |
834 | 834 | ||
835 | if (!iopgd_is_table(*iopgd)) { | 835 | if (!iopgd_is_table(*iopgd)) { |
836 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " | 836 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", |
837 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | 837 | obj->name, errs, da, iopgd, *iopgd); |
838 | return IRQ_NONE; | 838 | return IRQ_NONE; |
839 | } | 839 | } |
840 | 840 | ||
841 | iopte = iopte_offset(iopgd, da); | 841 | iopte = iopte_offset(iopgd, da); |
842 | 842 | ||
843 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " | 843 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", |
844 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | 844 | obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); |
845 | iopte, *iopte); | ||
846 | 845 | ||
847 | return IRQ_NONE; | 846 | return IRQ_NONE; |
848 | } | 847 | } |
@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | |||
1235 | else if (iopte_is_large(*pte)) | 1234 | else if (iopte_is_large(*pte)) |
1236 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | 1235 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); |
1237 | else | 1236 | else |
1238 | dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); | 1237 | dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, |
1238 | (unsigned long long)da); | ||
1239 | } else { | 1239 | } else { |
1240 | if (iopgd_is_section(*pgd)) | 1240 | if (iopgd_is_section(*pgd)) |
1241 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | 1241 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); |
1242 | else if (iopgd_is_super(*pgd)) | 1242 | else if (iopgd_is_super(*pgd)) |
1243 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | 1243 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); |
1244 | else | 1244 | else |
1245 | dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); | 1245 | dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, |
1246 | (unsigned long long)da); | ||
1246 | } | 1247 | } |
1247 | 1248 | ||
1248 | return ret; | 1249 | return ret; |
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index cd4ae9e5b0c6..f4003d568a92 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h | |||
@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) | |||
95 | #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) | 95 | #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) |
96 | 96 | ||
97 | #define to_iommu(dev) \ | 97 | #define to_iommu(dev) \ |
98 | (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) | 98 | ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) |
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 46d875690739..d14725984153 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c | |||
@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | if (i && sg->offset) { | 104 | if (i && sg->offset) { |
105 | pr_err("%s: sg[%d] offset not allowed in internal " | 105 | pr_err("%s: sg[%d] offset not allowed in internal entries\n", |
106 | "entries\n", __func__, i); | 106 | __func__, i); |
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 42d670a468f8..3d2a90a62649 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -902,7 +902,6 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) | |||
902 | struct scatterlist sg; | 902 | struct scatterlist sg; |
903 | struct virtio_net_ctrl_mq s; | 903 | struct virtio_net_ctrl_mq s; |
904 | struct net_device *dev = vi->dev; | 904 | struct net_device *dev = vi->dev; |
905 | int i; | ||
906 | 905 | ||
907 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | 906 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
908 | return 0; | 907 | return 0; |
@@ -916,10 +915,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) | |||
916 | queue_pairs); | 915 | queue_pairs); |
917 | return -EINVAL; | 916 | return -EINVAL; |
918 | } else { | 917 | } else { |
919 | for (i = vi->curr_queue_pairs; i < queue_pairs; i++) | ||
920 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | ||
921 | schedule_delayed_work(&vi->refill, 0); | ||
922 | vi->curr_queue_pairs = queue_pairs; | 918 | vi->curr_queue_pairs = queue_pairs; |
919 | schedule_delayed_work(&vi->refill, 0); | ||
923 | } | 920 | } |
924 | 921 | ||
925 | return 0; | 922 | return 0; |
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 259ad282ae5d..c488da5db7c7 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
@@ -76,6 +76,7 @@ struct vfio_group { | |||
76 | struct notifier_block nb; | 76 | struct notifier_block nb; |
77 | struct list_head vfio_next; | 77 | struct list_head vfio_next; |
78 | struct list_head container_next; | 78 | struct list_head container_next; |
79 | atomic_t opened; | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | struct vfio_device { | 82 | struct vfio_device { |
@@ -206,6 +207,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) | |||
206 | INIT_LIST_HEAD(&group->device_list); | 207 | INIT_LIST_HEAD(&group->device_list); |
207 | mutex_init(&group->device_lock); | 208 | mutex_init(&group->device_lock); |
208 | atomic_set(&group->container_users, 0); | 209 | atomic_set(&group->container_users, 0); |
210 | atomic_set(&group->opened, 0); | ||
209 | group->iommu_group = iommu_group; | 211 | group->iommu_group = iommu_group; |
210 | 212 | ||
211 | group->nb.notifier_call = vfio_iommu_group_notifier; | 213 | group->nb.notifier_call = vfio_iommu_group_notifier; |
@@ -1236,12 +1238,22 @@ static long vfio_group_fops_compat_ioctl(struct file *filep, | |||
1236 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) | 1238 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) |
1237 | { | 1239 | { |
1238 | struct vfio_group *group; | 1240 | struct vfio_group *group; |
1241 | int opened; | ||
1239 | 1242 | ||
1240 | group = vfio_group_get_from_minor(iminor(inode)); | 1243 | group = vfio_group_get_from_minor(iminor(inode)); |
1241 | if (!group) | 1244 | if (!group) |
1242 | return -ENODEV; | 1245 | return -ENODEV; |
1243 | 1246 | ||
1247 | /* Do we need multiple instances of the group open? Seems not. */ | ||
1248 | opened = atomic_cmpxchg(&group->opened, 0, 1); | ||
1249 | if (opened) { | ||
1250 | vfio_group_put(group); | ||
1251 | return -EBUSY; | ||
1252 | } | ||
1253 | |||
1254 | /* Is something still in use from a previous open? */ | ||
1244 | if (group->container) { | 1255 | if (group->container) { |
1256 | atomic_dec(&group->opened); | ||
1245 | vfio_group_put(group); | 1257 | vfio_group_put(group); |
1246 | return -EBUSY; | 1258 | return -EBUSY; |
1247 | } | 1259 | } |
@@ -1259,6 +1271,8 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |||
1259 | 1271 | ||
1260 | vfio_group_try_dissolve_container(group); | 1272 | vfio_group_try_dissolve_container(group); |
1261 | 1273 | ||
1274 | atomic_dec(&group->opened); | ||
1275 | |||
1262 | vfio_group_put(group); | 1276 | vfio_group_put(group); |
1263 | 1277 | ||
1264 | return 0; | 1278 | return 0; |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 6f3fbc48a6c7..a9807dea3887 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/pci.h> /* pci_bus_type */ | 33 | #include <linux/pci.h> /* pci_bus_type */ |
34 | #include <linux/rbtree.h> | ||
34 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
36 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
@@ -47,19 +48,25 @@ module_param_named(allow_unsafe_interrupts, | |||
47 | MODULE_PARM_DESC(allow_unsafe_interrupts, | 48 | MODULE_PARM_DESC(allow_unsafe_interrupts, |
48 | "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); | 49 | "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); |
49 | 50 | ||
51 | static bool disable_hugepages; | ||
52 | module_param_named(disable_hugepages, | ||
53 | disable_hugepages, bool, S_IRUGO | S_IWUSR); | ||
54 | MODULE_PARM_DESC(disable_hugepages, | ||
55 | "Disable VFIO IOMMU support for IOMMU hugepages."); | ||
56 | |||
50 | struct vfio_iommu { | 57 | struct vfio_iommu { |
51 | struct iommu_domain *domain; | 58 | struct iommu_domain *domain; |
52 | struct mutex lock; | 59 | struct mutex lock; |
53 | struct list_head dma_list; | 60 | struct rb_root dma_list; |
54 | struct list_head group_list; | 61 | struct list_head group_list; |
55 | bool cache; | 62 | bool cache; |
56 | }; | 63 | }; |
57 | 64 | ||
58 | struct vfio_dma { | 65 | struct vfio_dma { |
59 | struct list_head next; | 66 | struct rb_node node; |
60 | dma_addr_t iova; /* Device address */ | 67 | dma_addr_t iova; /* Device address */ |
61 | unsigned long vaddr; /* Process virtual addr */ | 68 | unsigned long vaddr; /* Process virtual addr */ |
62 | long npage; /* Number of pages */ | 69 | size_t size; /* Map size (bytes) */ |
63 | int prot; /* IOMMU_READ/WRITE */ | 70 | int prot; /* IOMMU_READ/WRITE */ |
64 | }; | 71 | }; |
65 | 72 | ||
@@ -73,7 +80,48 @@ struct vfio_group { | |||
73 | * into DMA'ble space using the IOMMU | 80 | * into DMA'ble space using the IOMMU |
74 | */ | 81 | */ |
75 | 82 | ||
76 | #define NPAGE_TO_SIZE(npage) ((size_t)(npage) << PAGE_SHIFT) | 83 | static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, |
84 | dma_addr_t start, size_t size) | ||
85 | { | ||
86 | struct rb_node *node = iommu->dma_list.rb_node; | ||
87 | |||
88 | while (node) { | ||
89 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); | ||
90 | |||
91 | if (start + size <= dma->iova) | ||
92 | node = node->rb_left; | ||
93 | else if (start >= dma->iova + dma->size) | ||
94 | node = node->rb_right; | ||
95 | else | ||
96 | return dma; | ||
97 | } | ||
98 | |||
99 | return NULL; | ||
100 | } | ||
101 | |||
102 | static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new) | ||
103 | { | ||
104 | struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; | ||
105 | struct vfio_dma *dma; | ||
106 | |||
107 | while (*link) { | ||
108 | parent = *link; | ||
109 | dma = rb_entry(parent, struct vfio_dma, node); | ||
110 | |||
111 | if (new->iova + new->size <= dma->iova) | ||
112 | link = &(*link)->rb_left; | ||
113 | else | ||
114 | link = &(*link)->rb_right; | ||
115 | } | ||
116 | |||
117 | rb_link_node(&new->node, parent, link); | ||
118 | rb_insert_color(&new->node, &iommu->dma_list); | ||
119 | } | ||
120 | |||
121 | static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *old) | ||
122 | { | ||
123 | rb_erase(&old->node, &iommu->dma_list); | ||
124 | } | ||
77 | 125 | ||
78 | struct vwork { | 126 | struct vwork { |
79 | struct mm_struct *mm; | 127 | struct mm_struct *mm; |
@@ -100,8 +148,8 @@ static void vfio_lock_acct(long npage) | |||
100 | struct vwork *vwork; | 148 | struct vwork *vwork; |
101 | struct mm_struct *mm; | 149 | struct mm_struct *mm; |
102 | 150 | ||
103 | if (!current->mm) | 151 | if (!current->mm || !npage) |
104 | return; /* process exited */ | 152 | return; /* process exited or nothing to do */ |
105 | 153 | ||
106 | if (down_write_trylock(¤t->mm->mmap_sem)) { | 154 | if (down_write_trylock(¤t->mm->mmap_sem)) { |
107 | current->mm->locked_vm += npage; | 155 | current->mm->locked_vm += npage; |
@@ -173,33 +221,6 @@ static int put_pfn(unsigned long pfn, int prot) | |||
173 | return 0; | 221 | return 0; |
174 | } | 222 | } |
175 | 223 | ||
176 | /* Unmap DMA region */ | ||
177 | static long __vfio_dma_do_unmap(struct vfio_iommu *iommu, dma_addr_t iova, | ||
178 | long npage, int prot) | ||
179 | { | ||
180 | long i, unlocked = 0; | ||
181 | |||
182 | for (i = 0; i < npage; i++, iova += PAGE_SIZE) { | ||
183 | unsigned long pfn; | ||
184 | |||
185 | pfn = iommu_iova_to_phys(iommu->domain, iova) >> PAGE_SHIFT; | ||
186 | if (pfn) { | ||
187 | iommu_unmap(iommu->domain, iova, PAGE_SIZE); | ||
188 | unlocked += put_pfn(pfn, prot); | ||
189 | } | ||
190 | } | ||
191 | return unlocked; | ||
192 | } | ||
193 | |||
194 | static void vfio_dma_unmap(struct vfio_iommu *iommu, dma_addr_t iova, | ||
195 | long npage, int prot) | ||
196 | { | ||
197 | long unlocked; | ||
198 | |||
199 | unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot); | ||
200 | vfio_lock_acct(-unlocked); | ||
201 | } | ||
202 | |||
203 | static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) | 224 | static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) |
204 | { | 225 | { |
205 | struct page *page[1]; | 226 | struct page *page[1]; |
@@ -226,198 +247,306 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) | |||
226 | return ret; | 247 | return ret; |
227 | } | 248 | } |
228 | 249 | ||
229 | /* Map DMA region */ | 250 | /* |
230 | static int __vfio_dma_map(struct vfio_iommu *iommu, dma_addr_t iova, | 251 | * Attempt to pin pages. We really don't want to track all the pfns and |
231 | unsigned long vaddr, long npage, int prot) | 252 | * the iommu can only map chunks of consecutive pfns anyway, so get the |
253 | * first page and all consecutive pages with the same locking. | ||
254 | */ | ||
255 | static long vfio_pin_pages(unsigned long vaddr, long npage, | ||
256 | int prot, unsigned long *pfn_base) | ||
232 | { | 257 | { |
233 | dma_addr_t start = iova; | 258 | unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
234 | long i, locked = 0; | 259 | bool lock_cap = capable(CAP_IPC_LOCK); |
235 | int ret; | 260 | long ret, i; |
236 | 261 | ||
237 | /* Verify that pages are not already mapped */ | 262 | if (!current->mm) |
238 | for (i = 0; i < npage; i++, iova += PAGE_SIZE) | 263 | return -ENODEV; |
239 | if (iommu_iova_to_phys(iommu->domain, iova)) | ||
240 | return -EBUSY; | ||
241 | 264 | ||
242 | iova = start; | 265 | ret = vaddr_get_pfn(vaddr, prot, pfn_base); |
266 | if (ret) | ||
267 | return ret; | ||
243 | 268 | ||
244 | if (iommu->cache) | 269 | if (is_invalid_reserved_pfn(*pfn_base)) |
245 | prot |= IOMMU_CACHE; | 270 | return 1; |
246 | 271 | ||
247 | /* | 272 | if (!lock_cap && current->mm->locked_vm + 1 > limit) { |
248 | * XXX We break mappings into pages and use get_user_pages_fast to | 273 | put_pfn(*pfn_base, prot); |
249 | * pin the pages in memory. It's been suggested that mlock might | 274 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, |
250 | * provide a more efficient mechanism, but nothing prevents the | 275 | limit << PAGE_SHIFT); |
251 | * user from munlocking the pages, which could then allow the user | 276 | return -ENOMEM; |
252 | * access to random host memory. We also have no guarantee from the | 277 | } |
253 | * IOMMU API that the iommu driver can unmap sub-pages of previous | 278 | |
254 | * mappings. This means we might lose an entire range if a single | 279 | if (unlikely(disable_hugepages)) { |
255 | * page within it is unmapped. Single page mappings are inefficient, | 280 | vfio_lock_acct(1); |
256 | * but provide the most flexibility for now. | 281 | return 1; |
257 | */ | 282 | } |
258 | for (i = 0; i < npage; i++, iova += PAGE_SIZE, vaddr += PAGE_SIZE) { | 283 | |
284 | /* Lock all the consecutive pages from pfn_base */ | ||
285 | for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { | ||
259 | unsigned long pfn = 0; | 286 | unsigned long pfn = 0; |
260 | 287 | ||
261 | ret = vaddr_get_pfn(vaddr, prot, &pfn); | 288 | ret = vaddr_get_pfn(vaddr, prot, &pfn); |
262 | if (ret) { | 289 | if (ret) |
263 | __vfio_dma_do_unmap(iommu, start, i, prot); | 290 | break; |
264 | return ret; | ||
265 | } | ||
266 | 291 | ||
267 | /* | 292 | if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) { |
268 | * Only add actual locked pages to accounting | 293 | put_pfn(pfn, prot); |
269 | * XXX We're effectively marking a page locked for every | 294 | break; |
270 | * IOVA page even though it's possible the user could be | 295 | } |
271 | * backing multiple IOVAs with the same vaddr. This over- | ||
272 | * penalizes the user process, but we currently have no | ||
273 | * easy way to do this properly. | ||
274 | */ | ||
275 | if (!is_invalid_reserved_pfn(pfn)) | ||
276 | locked++; | ||
277 | 296 | ||
278 | ret = iommu_map(iommu->domain, iova, | 297 | if (!lock_cap && current->mm->locked_vm + i + 1 > limit) { |
279 | (phys_addr_t)pfn << PAGE_SHIFT, | ||
280 | PAGE_SIZE, prot); | ||
281 | if (ret) { | ||
282 | /* Back out mappings on error */ | ||
283 | put_pfn(pfn, prot); | 298 | put_pfn(pfn, prot); |
284 | __vfio_dma_do_unmap(iommu, start, i, prot); | 299 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", |
285 | return ret; | 300 | __func__, limit << PAGE_SHIFT); |
301 | break; | ||
286 | } | 302 | } |
287 | } | 303 | } |
288 | vfio_lock_acct(locked); | 304 | |
289 | return 0; | 305 | vfio_lock_acct(i); |
306 | |||
307 | return i; | ||
290 | } | 308 | } |
291 | 309 | ||
292 | static inline bool ranges_overlap(dma_addr_t start1, size_t size1, | 310 | static long vfio_unpin_pages(unsigned long pfn, long npage, |
293 | dma_addr_t start2, size_t size2) | 311 | int prot, bool do_accounting) |
294 | { | 312 | { |
295 | if (start1 < start2) | 313 | unsigned long unlocked = 0; |
296 | return (start2 - start1 < size1); | 314 | long i; |
297 | else if (start2 < start1) | 315 | |
298 | return (start1 - start2 < size2); | 316 | for (i = 0; i < npage; i++) |
299 | return (size1 > 0 && size2 > 0); | 317 | unlocked += put_pfn(pfn++, prot); |
318 | |||
319 | if (do_accounting) | ||
320 | vfio_lock_acct(-unlocked); | ||
321 | |||
322 | return unlocked; | ||
300 | } | 323 | } |
301 | 324 | ||
302 | static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, | 325 | static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, |
303 | dma_addr_t start, size_t size) | 326 | dma_addr_t iova, size_t *size) |
304 | { | 327 | { |
305 | struct vfio_dma *dma; | 328 | dma_addr_t start = iova, end = iova + *size; |
329 | long unlocked = 0; | ||
306 | 330 | ||
307 | list_for_each_entry(dma, &iommu->dma_list, next) { | 331 | while (iova < end) { |
308 | if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage), | 332 | size_t unmapped; |
309 | start, size)) | 333 | phys_addr_t phys; |
310 | return dma; | 334 | |
335 | /* | ||
336 | * We use the IOMMU to track the physical address. This | ||
337 | * saves us from having a lot more entries in our mapping | ||
338 | * tree. The downside is that we don't track the size | ||
339 | * used to do the mapping. We request unmap of a single | ||
340 | * page, but expect IOMMUs that support large pages to | ||
341 | * unmap a larger chunk. | ||
342 | */ | ||
343 | phys = iommu_iova_to_phys(iommu->domain, iova); | ||
344 | if (WARN_ON(!phys)) { | ||
345 | iova += PAGE_SIZE; | ||
346 | continue; | ||
347 | } | ||
348 | |||
349 | unmapped = iommu_unmap(iommu->domain, iova, PAGE_SIZE); | ||
350 | if (!unmapped) | ||
351 | break; | ||
352 | |||
353 | unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT, | ||
354 | unmapped >> PAGE_SHIFT, | ||
355 | dma->prot, false); | ||
356 | iova += unmapped; | ||
311 | } | 357 | } |
312 | return NULL; | 358 | |
359 | vfio_lock_acct(-unlocked); | ||
360 | |||
361 | *size = iova - start; | ||
362 | |||
363 | return 0; | ||
313 | } | 364 | } |
314 | 365 | ||
315 | static long vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start, | 366 | static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start, |
316 | size_t size, struct vfio_dma *dma) | 367 | size_t *size, struct vfio_dma *dma) |
317 | { | 368 | { |
369 | size_t offset, overlap, tmp; | ||
318 | struct vfio_dma *split; | 370 | struct vfio_dma *split; |
319 | long npage_lo, npage_hi; | 371 | int ret; |
320 | 372 | ||
321 | /* Existing dma region is completely covered, unmap all */ | 373 | if (!*size) |
322 | if (start <= dma->iova && | 374 | return 0; |
323 | start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) { | 375 | |
324 | vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot); | 376 | /* |
325 | list_del(&dma->next); | 377 | * Existing dma region is completely covered, unmap all. This is |
326 | npage_lo = dma->npage; | 378 | * the likely case since userspace tends to map and unmap buffers |
379 | * in one shot rather than multiple mappings within a buffer. | ||
380 | */ | ||
381 | if (likely(start <= dma->iova && | ||
382 | start + *size >= dma->iova + dma->size)) { | ||
383 | *size = dma->size; | ||
384 | ret = vfio_unmap_unpin(iommu, dma, dma->iova, size); | ||
385 | if (ret) | ||
386 | return ret; | ||
387 | |||
388 | /* | ||
389 | * Did we remove more than we have? Should never happen | ||
390 | * since a vfio_dma is contiguous in iova and vaddr. | ||
391 | */ | ||
392 | WARN_ON(*size != dma->size); | ||
393 | |||
394 | vfio_remove_dma(iommu, dma); | ||
327 | kfree(dma); | 395 | kfree(dma); |
328 | return npage_lo; | 396 | return 0; |
329 | } | 397 | } |
330 | 398 | ||
331 | /* Overlap low address of existing range */ | 399 | /* Overlap low address of existing range */ |
332 | if (start <= dma->iova) { | 400 | if (start <= dma->iova) { |
333 | size_t overlap; | 401 | overlap = start + *size - dma->iova; |
402 | ret = vfio_unmap_unpin(iommu, dma, dma->iova, &overlap); | ||
403 | if (ret) | ||
404 | return ret; | ||
334 | 405 | ||
335 | overlap = start + size - dma->iova; | 406 | vfio_remove_dma(iommu, dma); |
336 | npage_lo = overlap >> PAGE_SHIFT; | ||
337 | 407 | ||
338 | vfio_dma_unmap(iommu, dma->iova, npage_lo, dma->prot); | 408 | /* |
339 | dma->iova += overlap; | 409 | * Check, we may have removed to whole vfio_dma. If not |
340 | dma->vaddr += overlap; | 410 | * fixup and re-insert. |
341 | dma->npage -= npage_lo; | 411 | */ |
342 | return npage_lo; | 412 | if (overlap < dma->size) { |
413 | dma->iova += overlap; | ||
414 | dma->vaddr += overlap; | ||
415 | dma->size -= overlap; | ||
416 | vfio_insert_dma(iommu, dma); | ||
417 | } else | ||
418 | kfree(dma); | ||
419 | |||
420 | *size = overlap; | ||
421 | return 0; | ||
343 | } | 422 | } |
344 | 423 | ||
345 | /* Overlap high address of existing range */ | 424 | /* Overlap high address of existing range */ |
346 | if (start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) { | 425 | if (start + *size >= dma->iova + dma->size) { |
347 | size_t overlap; | 426 | offset = start - dma->iova; |
427 | overlap = dma->size - offset; | ||
348 | 428 | ||
349 | overlap = dma->iova + NPAGE_TO_SIZE(dma->npage) - start; | 429 | ret = vfio_unmap_unpin(iommu, dma, start, &overlap); |
350 | npage_hi = overlap >> PAGE_SHIFT; | 430 | if (ret) |
431 | return ret; | ||
351 | 432 | ||
352 | vfio_dma_unmap(iommu, start, npage_hi, dma->prot); | 433 | dma->size -= overlap; |
353 | dma->npage -= npage_hi; | 434 | *size = overlap; |
354 | return npage_hi; | 435 | return 0; |
355 | } | 436 | } |
356 | 437 | ||
357 | /* Split existing */ | 438 | /* Split existing */ |
358 | npage_lo = (start - dma->iova) >> PAGE_SHIFT; | ||
359 | npage_hi = dma->npage - (size >> PAGE_SHIFT) - npage_lo; | ||
360 | 439 | ||
361 | split = kzalloc(sizeof *split, GFP_KERNEL); | 440 | /* |
441 | * Allocate our tracking structure early even though it may not | ||
442 | * be used. An Allocation failure later loses track of pages and | ||
443 | * is more difficult to unwind. | ||
444 | */ | ||
445 | split = kzalloc(sizeof(*split), GFP_KERNEL); | ||
362 | if (!split) | 446 | if (!split) |
363 | return -ENOMEM; | 447 | return -ENOMEM; |
364 | 448 | ||
365 | vfio_dma_unmap(iommu, start, size >> PAGE_SHIFT, dma->prot); | 449 | offset = start - dma->iova; |
450 | |||
451 | ret = vfio_unmap_unpin(iommu, dma, start, size); | ||
452 | if (ret || !*size) { | ||
453 | kfree(split); | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | tmp = dma->size; | ||
366 | 458 | ||
367 | dma->npage = npage_lo; | 459 | /* Resize the lower vfio_dma in place, before the below insert */ |
460 | dma->size = offset; | ||
368 | 461 | ||
369 | split->npage = npage_hi; | 462 | /* Insert new for remainder, assuming it didn't all get unmapped */ |
370 | split->iova = start + size; | 463 | if (likely(offset + *size < tmp)) { |
371 | split->vaddr = dma->vaddr + NPAGE_TO_SIZE(npage_lo) + size; | 464 | split->size = tmp - offset - *size; |
372 | split->prot = dma->prot; | 465 | split->iova = dma->iova + offset + *size; |
373 | list_add(&split->next, &iommu->dma_list); | 466 | split->vaddr = dma->vaddr + offset + *size; |
374 | return size >> PAGE_SHIFT; | 467 | split->prot = dma->prot; |
468 | vfio_insert_dma(iommu, split); | ||
469 | } else | ||
470 | kfree(split); | ||
471 | |||
472 | return 0; | ||
375 | } | 473 | } |
376 | 474 | ||
377 | static int vfio_dma_do_unmap(struct vfio_iommu *iommu, | 475 | static int vfio_dma_do_unmap(struct vfio_iommu *iommu, |
378 | struct vfio_iommu_type1_dma_unmap *unmap) | 476 | struct vfio_iommu_type1_dma_unmap *unmap) |
379 | { | 477 | { |
380 | long ret = 0, npage = unmap->size >> PAGE_SHIFT; | ||
381 | struct vfio_dma *dma, *tmp; | ||
382 | uint64_t mask; | 478 | uint64_t mask; |
479 | struct vfio_dma *dma; | ||
480 | size_t unmapped = 0, size; | ||
481 | int ret = 0; | ||
383 | 482 | ||
384 | mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1; | 483 | mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1; |
385 | 484 | ||
386 | if (unmap->iova & mask) | 485 | if (unmap->iova & mask) |
387 | return -EINVAL; | 486 | return -EINVAL; |
388 | if (unmap->size & mask) | 487 | if (!unmap->size || unmap->size & mask) |
389 | return -EINVAL; | 488 | return -EINVAL; |
390 | 489 | ||
391 | /* XXX We still break these down into PAGE_SIZE */ | ||
392 | WARN_ON(mask & PAGE_MASK); | 490 | WARN_ON(mask & PAGE_MASK); |
393 | 491 | ||
394 | mutex_lock(&iommu->lock); | 492 | mutex_lock(&iommu->lock); |
395 | 493 | ||
396 | list_for_each_entry_safe(dma, tmp, &iommu->dma_list, next) { | 494 | while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { |
397 | if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage), | 495 | size = unmap->size; |
398 | unmap->iova, unmap->size)) { | 496 | ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma); |
399 | ret = vfio_remove_dma_overlap(iommu, unmap->iova, | 497 | if (ret || !size) |
400 | unmap->size, dma); | 498 | break; |
401 | if (ret > 0) | 499 | unmapped += size; |
402 | npage -= ret; | ||
403 | if (ret < 0 || npage == 0) | ||
404 | break; | ||
405 | } | ||
406 | } | 500 | } |
501 | |||
407 | mutex_unlock(&iommu->lock); | 502 | mutex_unlock(&iommu->lock); |
408 | return ret > 0 ? 0 : (int)ret; | 503 | |
504 | /* | ||
505 | * We may unmap more than requested, update the unmap struct so | ||
506 | * userspace can know. | ||
507 | */ | ||
508 | unmap->size = unmapped; | ||
509 | |||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Turns out AMD IOMMU has a page table bug where it won't map large pages | ||
515 | * to a region that previously mapped smaller pages. This should be fixed | ||
516 | * soon, so this is just a temporary workaround to break mappings down into | ||
517 | * PAGE_SIZE. Better to map smaller pages than nothing. | ||
518 | */ | ||
519 | static int map_try_harder(struct vfio_iommu *iommu, dma_addr_t iova, | ||
520 | unsigned long pfn, long npage, int prot) | ||
521 | { | ||
522 | long i; | ||
523 | int ret; | ||
524 | |||
525 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { | ||
526 | ret = iommu_map(iommu->domain, iova, | ||
527 | (phys_addr_t)pfn << PAGE_SHIFT, | ||
528 | PAGE_SIZE, prot); | ||
529 | if (ret) | ||
530 | break; | ||
531 | } | ||
532 | |||
533 | for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) | ||
534 | iommu_unmap(iommu->domain, iova, PAGE_SIZE); | ||
535 | |||
536 | return ret; | ||
409 | } | 537 | } |
410 | 538 | ||
411 | static int vfio_dma_do_map(struct vfio_iommu *iommu, | 539 | static int vfio_dma_do_map(struct vfio_iommu *iommu, |
412 | struct vfio_iommu_type1_dma_map *map) | 540 | struct vfio_iommu_type1_dma_map *map) |
413 | { | 541 | { |
414 | struct vfio_dma *dma, *pdma = NULL; | 542 | dma_addr_t end, iova; |
415 | dma_addr_t iova = map->iova; | 543 | unsigned long vaddr = map->vaddr; |
416 | unsigned long locked, lock_limit, vaddr = map->vaddr; | ||
417 | size_t size = map->size; | 544 | size_t size = map->size; |
545 | long npage; | ||
418 | int ret = 0, prot = 0; | 546 | int ret = 0, prot = 0; |
419 | uint64_t mask; | 547 | uint64_t mask; |
420 | long npage; | 548 | |
549 | end = map->iova + map->size; | ||
421 | 550 | ||
422 | mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1; | 551 | mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1; |
423 | 552 | ||
@@ -430,104 +559,144 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
430 | if (!prot) | 559 | if (!prot) |
431 | return -EINVAL; /* No READ/WRITE? */ | 560 | return -EINVAL; /* No READ/WRITE? */ |
432 | 561 | ||
562 | if (iommu->cache) | ||
563 | prot |= IOMMU_CACHE; | ||
564 | |||
433 | if (vaddr & mask) | 565 | if (vaddr & mask) |
434 | return -EINVAL; | 566 | return -EINVAL; |
435 | if (iova & mask) | 567 | if (map->iova & mask) |
436 | return -EINVAL; | 568 | return -EINVAL; |
437 | if (size & mask) | 569 | if (!map->size || map->size & mask) |
438 | return -EINVAL; | 570 | return -EINVAL; |
439 | 571 | ||
440 | /* XXX We still break these down into PAGE_SIZE */ | ||
441 | WARN_ON(mask & PAGE_MASK); | 572 | WARN_ON(mask & PAGE_MASK); |
442 | 573 | ||
443 | /* Don't allow IOVA wrap */ | 574 | /* Don't allow IOVA wrap */ |
444 | if (iova + size && iova + size < iova) | 575 | if (end && end < map->iova) |
445 | return -EINVAL; | 576 | return -EINVAL; |
446 | 577 | ||
447 | /* Don't allow virtual address wrap */ | 578 | /* Don't allow virtual address wrap */ |
448 | if (vaddr + size && vaddr + size < vaddr) | 579 | if (vaddr + map->size && vaddr + map->size < vaddr) |
449 | return -EINVAL; | ||
450 | |||
451 | npage = size >> PAGE_SHIFT; | ||
452 | if (!npage) | ||
453 | return -EINVAL; | 580 | return -EINVAL; |
454 | 581 | ||
455 | mutex_lock(&iommu->lock); | 582 | mutex_lock(&iommu->lock); |
456 | 583 | ||
457 | if (vfio_find_dma(iommu, iova, size)) { | 584 | if (vfio_find_dma(iommu, map->iova, map->size)) { |
458 | ret = -EBUSY; | 585 | mutex_unlock(&iommu->lock); |
459 | goto out_lock; | 586 | return -EEXIST; |
460 | } | 587 | } |
461 | 588 | ||
462 | /* account for locked pages */ | 589 | for (iova = map->iova; iova < end; iova += size, vaddr += size) { |
463 | locked = current->mm->locked_vm + npage; | 590 | struct vfio_dma *dma = NULL; |
464 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | 591 | unsigned long pfn; |
465 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { | 592 | long i; |
466 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", | 593 | |
467 | __func__, rlimit(RLIMIT_MEMLOCK)); | 594 | /* Pin a contiguous chunk of memory */ |
468 | ret = -ENOMEM; | 595 | npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT, |
469 | goto out_lock; | 596 | prot, &pfn); |
470 | } | 597 | if (npage <= 0) { |
598 | WARN_ON(!npage); | ||
599 | ret = (int)npage; | ||
600 | break; | ||
601 | } | ||
471 | 602 | ||
472 | ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot); | 603 | /* Verify pages are not already mapped */ |
473 | if (ret) | 604 | for (i = 0; i < npage; i++) { |
474 | goto out_lock; | 605 | if (iommu_iova_to_phys(iommu->domain, |
606 | iova + (i << PAGE_SHIFT))) { | ||
607 | vfio_unpin_pages(pfn, npage, prot, true); | ||
608 | ret = -EBUSY; | ||
609 | break; | ||
610 | } | ||
611 | } | ||
475 | 612 | ||
476 | /* Check if we abut a region below - nothing below 0 */ | 613 | ret = iommu_map(iommu->domain, iova, |
477 | if (iova) { | 614 | (phys_addr_t)pfn << PAGE_SHIFT, |
478 | dma = vfio_find_dma(iommu, iova - 1, 1); | 615 | npage << PAGE_SHIFT, prot); |
479 | if (dma && dma->prot == prot && | 616 | if (ret) { |
480 | dma->vaddr + NPAGE_TO_SIZE(dma->npage) == vaddr) { | 617 | if (ret != -EBUSY || |
618 | map_try_harder(iommu, iova, pfn, npage, prot)) { | ||
619 | vfio_unpin_pages(pfn, npage, prot, true); | ||
620 | break; | ||
621 | } | ||
622 | } | ||
481 | 623 | ||
482 | dma->npage += npage; | 624 | size = npage << PAGE_SHIFT; |
483 | iova = dma->iova; | ||
484 | vaddr = dma->vaddr; | ||
485 | npage = dma->npage; | ||
486 | size = NPAGE_TO_SIZE(npage); | ||
487 | 625 | ||
488 | pdma = dma; | 626 | /* |
627 | * Check if we abut a region below - nothing below 0. | ||
628 | * This is the most likely case when mapping chunks of | ||
629 | * physically contiguous regions within a virtual address | ||
630 | * range. Update the abutting entry in place since iova | ||
631 | * doesn't change. | ||
632 | */ | ||
633 | if (likely(iova)) { | ||
634 | struct vfio_dma *tmp; | ||
635 | tmp = vfio_find_dma(iommu, iova - 1, 1); | ||
636 | if (tmp && tmp->prot == prot && | ||
637 | tmp->vaddr + tmp->size == vaddr) { | ||
638 | tmp->size += size; | ||
639 | iova = tmp->iova; | ||
640 | size = tmp->size; | ||
641 | vaddr = tmp->vaddr; | ||
642 | dma = tmp; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * Check if we abut a region above - nothing above ~0 + 1. | ||
648 | * If we abut above and below, remove and free. If only | ||
649 | * abut above, remove, modify, reinsert. | ||
650 | */ | ||
651 | if (likely(iova + size)) { | ||
652 | struct vfio_dma *tmp; | ||
653 | tmp = vfio_find_dma(iommu, iova + size, 1); | ||
654 | if (tmp && tmp->prot == prot && | ||
655 | tmp->vaddr == vaddr + size) { | ||
656 | vfio_remove_dma(iommu, tmp); | ||
657 | if (dma) { | ||
658 | dma->size += tmp->size; | ||
659 | kfree(tmp); | ||
660 | } else { | ||
661 | size += tmp->size; | ||
662 | tmp->size = size; | ||
663 | tmp->iova = iova; | ||
664 | tmp->vaddr = vaddr; | ||
665 | vfio_insert_dma(iommu, tmp); | ||
666 | dma = tmp; | ||
667 | } | ||
668 | } | ||
489 | } | 669 | } |
490 | } | ||
491 | 670 | ||
492 | /* Check if we abut a region above - nothing above ~0 + 1 */ | 671 | if (!dma) { |
493 | if (iova + size) { | 672 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
494 | dma = vfio_find_dma(iommu, iova + size, 1); | 673 | if (!dma) { |
495 | if (dma && dma->prot == prot && | 674 | iommu_unmap(iommu->domain, iova, size); |
496 | dma->vaddr == vaddr + size) { | 675 | vfio_unpin_pages(pfn, npage, prot, true); |
676 | ret = -ENOMEM; | ||
677 | break; | ||
678 | } | ||
497 | 679 | ||
498 | dma->npage += npage; | 680 | dma->size = size; |
499 | dma->iova = iova; | 681 | dma->iova = iova; |
500 | dma->vaddr = vaddr; | 682 | dma->vaddr = vaddr; |
501 | 683 | dma->prot = prot; | |
502 | /* | 684 | vfio_insert_dma(iommu, dma); |
503 | * If merged above and below, remove previously | ||
504 | * merged entry. New entry covers it. | ||
505 | */ | ||
506 | if (pdma) { | ||
507 | list_del(&pdma->next); | ||
508 | kfree(pdma); | ||
509 | } | ||
510 | pdma = dma; | ||
511 | } | 685 | } |
512 | } | 686 | } |
513 | 687 | ||
514 | /* Isolated, new region */ | 688 | if (ret) { |
515 | if (!pdma) { | 689 | struct vfio_dma *tmp; |
516 | dma = kzalloc(sizeof *dma, GFP_KERNEL); | 690 | iova = map->iova; |
517 | if (!dma) { | 691 | size = map->size; |
518 | ret = -ENOMEM; | 692 | while ((tmp = vfio_find_dma(iommu, iova, size))) { |
519 | vfio_dma_unmap(iommu, iova, npage, prot); | 693 | int r = vfio_remove_dma_overlap(iommu, iova, |
520 | goto out_lock; | 694 | &size, tmp); |
695 | if (WARN_ON(r || !size)) | ||
696 | break; | ||
521 | } | 697 | } |
522 | |||
523 | dma->npage = npage; | ||
524 | dma->iova = iova; | ||
525 | dma->vaddr = vaddr; | ||
526 | dma->prot = prot; | ||
527 | list_add(&dma->next, &iommu->dma_list); | ||
528 | } | 698 | } |
529 | 699 | ||
530 | out_lock: | ||
531 | mutex_unlock(&iommu->lock); | 700 | mutex_unlock(&iommu->lock); |
532 | return ret; | 701 | return ret; |
533 | } | 702 | } |
@@ -606,7 +775,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) | |||
606 | return ERR_PTR(-ENOMEM); | 775 | return ERR_PTR(-ENOMEM); |
607 | 776 | ||
608 | INIT_LIST_HEAD(&iommu->group_list); | 777 | INIT_LIST_HEAD(&iommu->group_list); |
609 | INIT_LIST_HEAD(&iommu->dma_list); | 778 | iommu->dma_list = RB_ROOT; |
610 | mutex_init(&iommu->lock); | 779 | mutex_init(&iommu->lock); |
611 | 780 | ||
612 | /* | 781 | /* |
@@ -640,7 +809,7 @@ static void vfio_iommu_type1_release(void *iommu_data) | |||
640 | { | 809 | { |
641 | struct vfio_iommu *iommu = iommu_data; | 810 | struct vfio_iommu *iommu = iommu_data; |
642 | struct vfio_group *group, *group_tmp; | 811 | struct vfio_group *group, *group_tmp; |
643 | struct vfio_dma *dma, *dma_tmp; | 812 | struct rb_node *node; |
644 | 813 | ||
645 | list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) { | 814 | list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) { |
646 | iommu_detach_group(iommu->domain, group->iommu_group); | 815 | iommu_detach_group(iommu->domain, group->iommu_group); |
@@ -648,10 +817,12 @@ static void vfio_iommu_type1_release(void *iommu_data) | |||
648 | kfree(group); | 817 | kfree(group); |
649 | } | 818 | } |
650 | 819 | ||
651 | list_for_each_entry_safe(dma, dma_tmp, &iommu->dma_list, next) { | 820 | while ((node = rb_first(&iommu->dma_list))) { |
652 | vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot); | 821 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); |
653 | list_del(&dma->next); | 822 | size_t size = dma->size; |
654 | kfree(dma); | 823 | vfio_remove_dma_overlap(iommu, dma->iova, &size, dma); |
824 | if (WARN_ON(!size)) | ||
825 | break; | ||
655 | } | 826 | } |
656 | 827 | ||
657 | iommu_domain_free(iommu->domain); | 828 | iommu_domain_free(iommu->domain); |
@@ -706,6 +877,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, | |||
706 | 877 | ||
707 | } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { | 878 | } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { |
708 | struct vfio_iommu_type1_dma_unmap unmap; | 879 | struct vfio_iommu_type1_dma_unmap unmap; |
880 | long ret; | ||
709 | 881 | ||
710 | minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); | 882 | minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); |
711 | 883 | ||
@@ -715,7 +887,11 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, | |||
715 | if (unmap.argsz < minsz || unmap.flags) | 887 | if (unmap.argsz < minsz || unmap.flags) |
716 | return -EINVAL; | 888 | return -EINVAL; |
717 | 889 | ||
718 | return vfio_dma_do_unmap(iommu, &unmap); | 890 | ret = vfio_dma_do_unmap(iommu, &unmap); |
891 | if (ret) | ||
892 | return ret; | ||
893 | |||
894 | return copy_to_user((void __user *)arg, &unmap, minsz); | ||
719 | } | 895 | } |
720 | 896 | ||
721 | return -ENOTTY; | 897 | return -ENOTTY; |
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 8b9226da3f54..017a1e8a8f6f 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config VHOST_NET | 1 | config VHOST_NET |
2 | tristate "Host kernel accelerator for virtio net" | 2 | tristate "Host kernel accelerator for virtio net" |
3 | depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) | 3 | depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) |
4 | select VHOST | ||
4 | select VHOST_RING | 5 | select VHOST_RING |
5 | ---help--- | 6 | ---help--- |
6 | This kernel module can be loaded in host kernel to accelerate | 7 | This kernel module can be loaded in host kernel to accelerate |
@@ -13,6 +14,7 @@ config VHOST_NET | |||
13 | config VHOST_SCSI | 14 | config VHOST_SCSI |
14 | tristate "VHOST_SCSI TCM fabric driver" | 15 | tristate "VHOST_SCSI TCM fabric driver" |
15 | depends on TARGET_CORE && EVENTFD && m | 16 | depends on TARGET_CORE && EVENTFD && m |
17 | select VHOST | ||
16 | select VHOST_RING | 18 | select VHOST_RING |
17 | default n | 19 | default n |
18 | ---help--- | 20 | ---help--- |
@@ -24,3 +26,9 @@ config VHOST_RING | |||
24 | ---help--- | 26 | ---help--- |
25 | This option is selected by any driver which needs to access | 27 | This option is selected by any driver which needs to access |
26 | the host side of a virtio ring. | 28 | the host side of a virtio ring. |
29 | |||
30 | config VHOST | ||
31 | tristate | ||
32 | ---help--- | ||
33 | This option is selected by any driver which needs to access | ||
34 | the core of vhost. | ||
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile index 654e9afb11f5..e0441c34db1c 100644 --- a/drivers/vhost/Makefile +++ b/drivers/vhost/Makefile | |||
@@ -1,7 +1,8 @@ | |||
1 | obj-$(CONFIG_VHOST_NET) += vhost_net.o | 1 | obj-$(CONFIG_VHOST_NET) += vhost_net.o |
2 | vhost_net-y := vhost.o net.o | 2 | vhost_net-y := net.o |
3 | 3 | ||
4 | obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o | 4 | obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o |
5 | vhost_scsi-y := scsi.o | 5 | vhost_scsi-y := scsi.o |
6 | 6 | ||
7 | obj-$(CONFIG_VHOST_RING) += vringh.o | 7 | obj-$(CONFIG_VHOST_RING) += vringh.o |
8 | obj-$(CONFIG_VHOST) += vhost.o | ||
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 8ca5ac71b845..027be91db139 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -168,7 +168,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n) | |||
168 | } | 168 | } |
169 | } | 169 | } |
170 | 170 | ||
171 | int vhost_net_set_ubuf_info(struct vhost_net *n) | 171 | static int vhost_net_set_ubuf_info(struct vhost_net *n) |
172 | { | 172 | { |
173 | bool zcopy; | 173 | bool zcopy; |
174 | int i; | 174 | int i; |
@@ -189,7 +189,7 @@ err: | |||
189 | return -ENOMEM; | 189 | return -ENOMEM; |
190 | } | 190 | } |
191 | 191 | ||
192 | void vhost_net_vq_reset(struct vhost_net *n) | 192 | static void vhost_net_vq_reset(struct vhost_net *n) |
193 | { | 193 | { |
194 | int i; | 194 | int i; |
195 | 195 | ||
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 701420297225..4264840ef7dc 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/llist.h> | 49 | #include <linux/llist.h> |
50 | #include <linux/bitmap.h> | 50 | #include <linux/bitmap.h> |
51 | 51 | ||
52 | #include "vhost.c" | ||
53 | #include "vhost.h" | 52 | #include "vhost.h" |
54 | 53 | ||
55 | #define TCM_VHOST_VERSION "v0.1" | 54 | #define TCM_VHOST_VERSION "v0.1" |
@@ -116,7 +115,6 @@ struct tcm_vhost_nacl { | |||
116 | struct se_node_acl se_node_acl; | 115 | struct se_node_acl se_node_acl; |
117 | }; | 116 | }; |
118 | 117 | ||
119 | struct vhost_scsi; | ||
120 | struct tcm_vhost_tpg { | 118 | struct tcm_vhost_tpg { |
121 | /* Vhost port target portal group tag for TCM */ | 119 | /* Vhost port target portal group tag for TCM */ |
122 | u16 tport_tpgt; | 120 | u16 tport_tpgt; |
@@ -218,7 +216,7 @@ static int iov_num_pages(struct iovec *iov) | |||
218 | ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; | 216 | ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; |
219 | } | 217 | } |
220 | 218 | ||
221 | void tcm_vhost_done_inflight(struct kref *kref) | 219 | static void tcm_vhost_done_inflight(struct kref *kref) |
222 | { | 220 | { |
223 | struct vhost_scsi_inflight *inflight; | 221 | struct vhost_scsi_inflight *inflight; |
224 | 222 | ||
@@ -329,11 +327,12 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) | |||
329 | return 1; | 327 | return 1; |
330 | } | 328 | } |
331 | 329 | ||
332 | static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, | 330 | static u32 |
333 | struct se_node_acl *se_nacl, | 331 | tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, |
334 | struct t10_pr_registration *pr_reg, | 332 | struct se_node_acl *se_nacl, |
335 | int *format_code, | 333 | struct t10_pr_registration *pr_reg, |
336 | unsigned char *buf) | 334 | int *format_code, |
335 | unsigned char *buf) | ||
337 | { | 336 | { |
338 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 337 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
339 | struct tcm_vhost_tpg, se_tpg); | 338 | struct tcm_vhost_tpg, se_tpg); |
@@ -359,10 +358,11 @@ static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, | |||
359 | format_code, buf); | 358 | format_code, buf); |
360 | } | 359 | } |
361 | 360 | ||
362 | static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, | 361 | static u32 |
363 | struct se_node_acl *se_nacl, | 362 | tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, |
364 | struct t10_pr_registration *pr_reg, | 363 | struct se_node_acl *se_nacl, |
365 | int *format_code) | 364 | struct t10_pr_registration *pr_reg, |
365 | int *format_code) | ||
366 | { | 366 | { |
367 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 367 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
368 | struct tcm_vhost_tpg, se_tpg); | 368 | struct tcm_vhost_tpg, se_tpg); |
@@ -388,10 +388,11 @@ static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, | |||
388 | format_code); | 388 | format_code); |
389 | } | 389 | } |
390 | 390 | ||
391 | static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | 391 | static char * |
392 | const char *buf, | 392 | tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, |
393 | u32 *out_tid_len, | 393 | const char *buf, |
394 | char **port_nexus_ptr) | 394 | u32 *out_tid_len, |
395 | char **port_nexus_ptr) | ||
395 | { | 396 | { |
396 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 397 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
397 | struct tcm_vhost_tpg, se_tpg); | 398 | struct tcm_vhost_tpg, se_tpg); |
@@ -417,8 +418,8 @@ static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | |||
417 | port_nexus_ptr); | 418 | port_nexus_ptr); |
418 | } | 419 | } |
419 | 420 | ||
420 | static struct se_node_acl *tcm_vhost_alloc_fabric_acl( | 421 | static struct se_node_acl * |
421 | struct se_portal_group *se_tpg) | 422 | tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) |
422 | { | 423 | { |
423 | struct tcm_vhost_nacl *nacl; | 424 | struct tcm_vhost_nacl *nacl; |
424 | 425 | ||
@@ -431,8 +432,9 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl( | |||
431 | return &nacl->se_node_acl; | 432 | return &nacl->se_node_acl; |
432 | } | 433 | } |
433 | 434 | ||
434 | static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, | 435 | static void |
435 | struct se_node_acl *se_nacl) | 436 | tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, |
437 | struct se_node_acl *se_nacl) | ||
436 | { | 438 | { |
437 | struct tcm_vhost_nacl *nacl = container_of(se_nacl, | 439 | struct tcm_vhost_nacl *nacl = container_of(se_nacl, |
438 | struct tcm_vhost_nacl, se_node_acl); | 440 | struct tcm_vhost_nacl, se_node_acl); |
@@ -491,28 +493,28 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) | |||
491 | return 0; | 493 | return 0; |
492 | } | 494 | } |
493 | 495 | ||
494 | static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) | 496 | static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) |
495 | { | 497 | { |
496 | struct vhost_scsi *vs = tv_cmd->tvc_vhost; | 498 | struct vhost_scsi *vs = cmd->tvc_vhost; |
497 | 499 | ||
498 | llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); | 500 | llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); |
499 | 501 | ||
500 | vhost_work_queue(&vs->dev, &vs->vs_completion_work); | 502 | vhost_work_queue(&vs->dev, &vs->vs_completion_work); |
501 | } | 503 | } |
502 | 504 | ||
503 | static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) | 505 | static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) |
504 | { | 506 | { |
505 | struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, | 507 | struct tcm_vhost_cmd *cmd = container_of(se_cmd, |
506 | struct tcm_vhost_cmd, tvc_se_cmd); | 508 | struct tcm_vhost_cmd, tvc_se_cmd); |
507 | vhost_scsi_complete_cmd(tv_cmd); | 509 | vhost_scsi_complete_cmd(cmd); |
508 | return 0; | 510 | return 0; |
509 | } | 511 | } |
510 | 512 | ||
511 | static int tcm_vhost_queue_status(struct se_cmd *se_cmd) | 513 | static int tcm_vhost_queue_status(struct se_cmd *se_cmd) |
512 | { | 514 | { |
513 | struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, | 515 | struct tcm_vhost_cmd *cmd = container_of(se_cmd, |
514 | struct tcm_vhost_cmd, tvc_se_cmd); | 516 | struct tcm_vhost_cmd, tvc_se_cmd); |
515 | vhost_scsi_complete_cmd(tv_cmd); | 517 | vhost_scsi_complete_cmd(cmd); |
516 | return 0; | 518 | return 0; |
517 | } | 519 | } |
518 | 520 | ||
@@ -527,8 +529,9 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) | |||
527 | kfree(evt); | 529 | kfree(evt); |
528 | } | 530 | } |
529 | 531 | ||
530 | static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, | 532 | static struct tcm_vhost_evt * |
531 | u32 event, u32 reason) | 533 | tcm_vhost_allocate_evt(struct vhost_scsi *vs, |
534 | u32 event, u32 reason) | ||
532 | { | 535 | { |
533 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; | 536 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
534 | struct tcm_vhost_evt *evt; | 537 | struct tcm_vhost_evt *evt; |
@@ -552,28 +555,28 @@ static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, | |||
552 | return evt; | 555 | return evt; |
553 | } | 556 | } |
554 | 557 | ||
555 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | 558 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) |
556 | { | 559 | { |
557 | struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; | 560 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; |
558 | 561 | ||
559 | /* TODO locking against target/backend threads? */ | 562 | /* TODO locking against target/backend threads? */ |
560 | transport_generic_free_cmd(se_cmd, 1); | 563 | transport_generic_free_cmd(se_cmd, 1); |
561 | 564 | ||
562 | if (tv_cmd->tvc_sgl_count) { | 565 | if (cmd->tvc_sgl_count) { |
563 | u32 i; | 566 | u32 i; |
564 | for (i = 0; i < tv_cmd->tvc_sgl_count; i++) | 567 | for (i = 0; i < cmd->tvc_sgl_count; i++) |
565 | put_page(sg_page(&tv_cmd->tvc_sgl[i])); | 568 | put_page(sg_page(&cmd->tvc_sgl[i])); |
566 | 569 | ||
567 | kfree(tv_cmd->tvc_sgl); | 570 | kfree(cmd->tvc_sgl); |
568 | } | 571 | } |
569 | 572 | ||
570 | tcm_vhost_put_inflight(tv_cmd->inflight); | 573 | tcm_vhost_put_inflight(cmd->inflight); |
571 | 574 | ||
572 | kfree(tv_cmd); | 575 | kfree(cmd); |
573 | } | 576 | } |
574 | 577 | ||
575 | static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, | 578 | static void |
576 | struct tcm_vhost_evt *evt) | 579 | tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) |
577 | { | 580 | { |
578 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; | 581 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
579 | struct virtio_scsi_event *event = &evt->event; | 582 | struct virtio_scsi_event *event = &evt->event; |
@@ -652,7 +655,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
652 | vs_completion_work); | 655 | vs_completion_work); |
653 | DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); | 656 | DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); |
654 | struct virtio_scsi_cmd_resp v_rsp; | 657 | struct virtio_scsi_cmd_resp v_rsp; |
655 | struct tcm_vhost_cmd *tv_cmd; | 658 | struct tcm_vhost_cmd *cmd; |
656 | struct llist_node *llnode; | 659 | struct llist_node *llnode; |
657 | struct se_cmd *se_cmd; | 660 | struct se_cmd *se_cmd; |
658 | int ret, vq; | 661 | int ret, vq; |
@@ -660,32 +663,32 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
660 | bitmap_zero(signal, VHOST_SCSI_MAX_VQ); | 663 | bitmap_zero(signal, VHOST_SCSI_MAX_VQ); |
661 | llnode = llist_del_all(&vs->vs_completion_list); | 664 | llnode = llist_del_all(&vs->vs_completion_list); |
662 | while (llnode) { | 665 | while (llnode) { |
663 | tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd, | 666 | cmd = llist_entry(llnode, struct tcm_vhost_cmd, |
664 | tvc_completion_list); | 667 | tvc_completion_list); |
665 | llnode = llist_next(llnode); | 668 | llnode = llist_next(llnode); |
666 | se_cmd = &tv_cmd->tvc_se_cmd; | 669 | se_cmd = &cmd->tvc_se_cmd; |
667 | 670 | ||
668 | pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, | 671 | pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, |
669 | tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); | 672 | cmd, se_cmd->residual_count, se_cmd->scsi_status); |
670 | 673 | ||
671 | memset(&v_rsp, 0, sizeof(v_rsp)); | 674 | memset(&v_rsp, 0, sizeof(v_rsp)); |
672 | v_rsp.resid = se_cmd->residual_count; | 675 | v_rsp.resid = se_cmd->residual_count; |
673 | /* TODO is status_qualifier field needed? */ | 676 | /* TODO is status_qualifier field needed? */ |
674 | v_rsp.status = se_cmd->scsi_status; | 677 | v_rsp.status = se_cmd->scsi_status; |
675 | v_rsp.sense_len = se_cmd->scsi_sense_length; | 678 | v_rsp.sense_len = se_cmd->scsi_sense_length; |
676 | memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, | 679 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, |
677 | v_rsp.sense_len); | 680 | v_rsp.sense_len); |
678 | ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); | 681 | ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); |
679 | if (likely(ret == 0)) { | 682 | if (likely(ret == 0)) { |
680 | struct vhost_scsi_virtqueue *q; | 683 | struct vhost_scsi_virtqueue *q; |
681 | vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0); | 684 | vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); |
682 | q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); | 685 | q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); |
683 | vq = q - vs->vqs; | 686 | vq = q - vs->vqs; |
684 | __set_bit(vq, signal); | 687 | __set_bit(vq, signal); |
685 | } else | 688 | } else |
686 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | 689 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); |
687 | 690 | ||
688 | vhost_scsi_free_cmd(tv_cmd); | 691 | vhost_scsi_free_cmd(cmd); |
689 | } | 692 | } |
690 | 693 | ||
691 | vq = -1; | 694 | vq = -1; |
@@ -694,35 +697,35 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
694 | vhost_signal(&vs->dev, &vs->vqs[vq].vq); | 697 | vhost_signal(&vs->dev, &vs->vqs[vq].vq); |
695 | } | 698 | } |
696 | 699 | ||
697 | static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( | 700 | static struct tcm_vhost_cmd * |
698 | struct vhost_virtqueue *vq, | 701 | vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq, |
699 | struct tcm_vhost_tpg *tv_tpg, | 702 | struct tcm_vhost_tpg *tpg, |
700 | struct virtio_scsi_cmd_req *v_req, | 703 | struct virtio_scsi_cmd_req *v_req, |
701 | u32 exp_data_len, | 704 | u32 exp_data_len, |
702 | int data_direction) | 705 | int data_direction) |
703 | { | 706 | { |
704 | struct tcm_vhost_cmd *tv_cmd; | 707 | struct tcm_vhost_cmd *cmd; |
705 | struct tcm_vhost_nexus *tv_nexus; | 708 | struct tcm_vhost_nexus *tv_nexus; |
706 | 709 | ||
707 | tv_nexus = tv_tpg->tpg_nexus; | 710 | tv_nexus = tpg->tpg_nexus; |
708 | if (!tv_nexus) { | 711 | if (!tv_nexus) { |
709 | pr_err("Unable to locate active struct tcm_vhost_nexus\n"); | 712 | pr_err("Unable to locate active struct tcm_vhost_nexus\n"); |
710 | return ERR_PTR(-EIO); | 713 | return ERR_PTR(-EIO); |
711 | } | 714 | } |
712 | 715 | ||
713 | tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); | 716 | cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); |
714 | if (!tv_cmd) { | 717 | if (!cmd) { |
715 | pr_err("Unable to allocate struct tcm_vhost_cmd\n"); | 718 | pr_err("Unable to allocate struct tcm_vhost_cmd\n"); |
716 | return ERR_PTR(-ENOMEM); | 719 | return ERR_PTR(-ENOMEM); |
717 | } | 720 | } |
718 | tv_cmd->tvc_tag = v_req->tag; | 721 | cmd->tvc_tag = v_req->tag; |
719 | tv_cmd->tvc_task_attr = v_req->task_attr; | 722 | cmd->tvc_task_attr = v_req->task_attr; |
720 | tv_cmd->tvc_exp_data_len = exp_data_len; | 723 | cmd->tvc_exp_data_len = exp_data_len; |
721 | tv_cmd->tvc_data_direction = data_direction; | 724 | cmd->tvc_data_direction = data_direction; |
722 | tv_cmd->tvc_nexus = tv_nexus; | 725 | cmd->tvc_nexus = tv_nexus; |
723 | tv_cmd->inflight = tcm_vhost_get_inflight(vq); | 726 | cmd->inflight = tcm_vhost_get_inflight(vq); |
724 | 727 | ||
725 | return tv_cmd; | 728 | return cmd; |
726 | } | 729 | } |
727 | 730 | ||
728 | /* | 731 | /* |
@@ -730,8 +733,11 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( | |||
730 | * | 733 | * |
731 | * Returns the number of scatterlist entries used or -errno on error. | 734 | * Returns the number of scatterlist entries used or -errno on error. |
732 | */ | 735 | */ |
733 | static int vhost_scsi_map_to_sgl(struct scatterlist *sgl, | 736 | static int |
734 | unsigned int sgl_count, struct iovec *iov, int write) | 737 | vhost_scsi_map_to_sgl(struct scatterlist *sgl, |
738 | unsigned int sgl_count, | ||
739 | struct iovec *iov, | ||
740 | int write) | ||
735 | { | 741 | { |
736 | unsigned int npages = 0, pages_nr, offset, nbytes; | 742 | unsigned int npages = 0, pages_nr, offset, nbytes; |
737 | struct scatterlist *sg = sgl; | 743 | struct scatterlist *sg = sgl; |
@@ -775,8 +781,11 @@ out: | |||
775 | return ret; | 781 | return ret; |
776 | } | 782 | } |
777 | 783 | ||
778 | static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, | 784 | static int |
779 | struct iovec *iov, unsigned int niov, int write) | 785 | vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, |
786 | struct iovec *iov, | ||
787 | unsigned int niov, | ||
788 | int write) | ||
780 | { | 789 | { |
781 | int ret; | 790 | int ret; |
782 | unsigned int i; | 791 | unsigned int i; |
@@ -792,25 +801,25 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, | |||
792 | 801 | ||
793 | /* TODO overflow checking */ | 802 | /* TODO overflow checking */ |
794 | 803 | ||
795 | sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); | 804 | sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); |
796 | if (!sg) | 805 | if (!sg) |
797 | return -ENOMEM; | 806 | return -ENOMEM; |
798 | pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, | 807 | pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, |
799 | sg, sgl_count, !sg); | 808 | sg, sgl_count, !sg); |
800 | sg_init_table(sg, sgl_count); | 809 | sg_init_table(sg, sgl_count); |
801 | 810 | ||
802 | tv_cmd->tvc_sgl = sg; | 811 | cmd->tvc_sgl = sg; |
803 | tv_cmd->tvc_sgl_count = sgl_count; | 812 | cmd->tvc_sgl_count = sgl_count; |
804 | 813 | ||
805 | pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); | 814 | pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); |
806 | for (i = 0; i < niov; i++) { | 815 | for (i = 0; i < niov; i++) { |
807 | ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write); | 816 | ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write); |
808 | if (ret < 0) { | 817 | if (ret < 0) { |
809 | for (i = 0; i < tv_cmd->tvc_sgl_count; i++) | 818 | for (i = 0; i < cmd->tvc_sgl_count; i++) |
810 | put_page(sg_page(&tv_cmd->tvc_sgl[i])); | 819 | put_page(sg_page(&cmd->tvc_sgl[i])); |
811 | kfree(tv_cmd->tvc_sgl); | 820 | kfree(cmd->tvc_sgl); |
812 | tv_cmd->tvc_sgl = NULL; | 821 | cmd->tvc_sgl = NULL; |
813 | tv_cmd->tvc_sgl_count = 0; | 822 | cmd->tvc_sgl_count = 0; |
814 | return ret; | 823 | return ret; |
815 | } | 824 | } |
816 | 825 | ||
@@ -822,15 +831,15 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, | |||
822 | 831 | ||
823 | static void tcm_vhost_submission_work(struct work_struct *work) | 832 | static void tcm_vhost_submission_work(struct work_struct *work) |
824 | { | 833 | { |
825 | struct tcm_vhost_cmd *tv_cmd = | 834 | struct tcm_vhost_cmd *cmd = |
826 | container_of(work, struct tcm_vhost_cmd, work); | 835 | container_of(work, struct tcm_vhost_cmd, work); |
827 | struct tcm_vhost_nexus *tv_nexus; | 836 | struct tcm_vhost_nexus *tv_nexus; |
828 | struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; | 837 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; |
829 | struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; | 838 | struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; |
830 | int rc, sg_no_bidi = 0; | 839 | int rc, sg_no_bidi = 0; |
831 | 840 | ||
832 | if (tv_cmd->tvc_sgl_count) { | 841 | if (cmd->tvc_sgl_count) { |
833 | sg_ptr = tv_cmd->tvc_sgl; | 842 | sg_ptr = cmd->tvc_sgl; |
834 | /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ | 843 | /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ |
835 | #if 0 | 844 | #if 0 |
836 | if (se_cmd->se_cmd_flags & SCF_BIDI) { | 845 | if (se_cmd->se_cmd_flags & SCF_BIDI) { |
@@ -841,13 +850,13 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
841 | } else { | 850 | } else { |
842 | sg_ptr = NULL; | 851 | sg_ptr = NULL; |
843 | } | 852 | } |
844 | tv_nexus = tv_cmd->tvc_nexus; | 853 | tv_nexus = cmd->tvc_nexus; |
845 | 854 | ||
846 | rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, | 855 | rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, |
847 | tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0], | 856 | cmd->tvc_cdb, &cmd->tvc_sense_buf[0], |
848 | tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len, | 857 | cmd->tvc_lun, cmd->tvc_exp_data_len, |
849 | tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction, | 858 | cmd->tvc_task_attr, cmd->tvc_data_direction, |
850 | 0, sg_ptr, tv_cmd->tvc_sgl_count, | 859 | 0, sg_ptr, cmd->tvc_sgl_count, |
851 | sg_bidi_ptr, sg_no_bidi); | 860 | sg_bidi_ptr, sg_no_bidi); |
852 | if (rc < 0) { | 861 | if (rc < 0) { |
853 | transport_send_check_condition_and_sense(se_cmd, | 862 | transport_send_check_condition_and_sense(se_cmd, |
@@ -856,8 +865,10 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
856 | } | 865 | } |
857 | } | 866 | } |
858 | 867 | ||
859 | static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, | 868 | static void |
860 | struct vhost_virtqueue *vq, int head, unsigned out) | 869 | vhost_scsi_send_bad_target(struct vhost_scsi *vs, |
870 | struct vhost_virtqueue *vq, | ||
871 | int head, unsigned out) | ||
861 | { | 872 | { |
862 | struct virtio_scsi_cmd_resp __user *resp; | 873 | struct virtio_scsi_cmd_resp __user *resp; |
863 | struct virtio_scsi_cmd_resp rsp; | 874 | struct virtio_scsi_cmd_resp rsp; |
@@ -873,13 +884,13 @@ static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, | |||
873 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | 884 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); |
874 | } | 885 | } |
875 | 886 | ||
876 | static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | 887 | static void |
877 | struct vhost_virtqueue *vq) | 888 | vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) |
878 | { | 889 | { |
879 | struct tcm_vhost_tpg **vs_tpg; | 890 | struct tcm_vhost_tpg **vs_tpg; |
880 | struct virtio_scsi_cmd_req v_req; | 891 | struct virtio_scsi_cmd_req v_req; |
881 | struct tcm_vhost_tpg *tv_tpg; | 892 | struct tcm_vhost_tpg *tpg; |
882 | struct tcm_vhost_cmd *tv_cmd; | 893 | struct tcm_vhost_cmd *cmd; |
883 | u32 exp_data_len, data_first, data_num, data_direction; | 894 | u32 exp_data_len, data_first, data_num, data_direction; |
884 | unsigned out, in, i; | 895 | unsigned out, in, i; |
885 | int head, ret; | 896 | int head, ret; |
@@ -964,10 +975,10 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
964 | 975 | ||
965 | /* Extract the tpgt */ | 976 | /* Extract the tpgt */ |
966 | target = v_req.lun[1]; | 977 | target = v_req.lun[1]; |
967 | tv_tpg = ACCESS_ONCE(vs_tpg[target]); | 978 | tpg = ACCESS_ONCE(vs_tpg[target]); |
968 | 979 | ||
969 | /* Target does not exist, fail the request */ | 980 | /* Target does not exist, fail the request */ |
970 | if (unlikely(!tv_tpg)) { | 981 | if (unlikely(!tpg)) { |
971 | vhost_scsi_send_bad_target(vs, vq, head, out); | 982 | vhost_scsi_send_bad_target(vs, vq, head, out); |
972 | continue; | 983 | continue; |
973 | } | 984 | } |
@@ -976,46 +987,46 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
976 | for (i = 0; i < data_num; i++) | 987 | for (i = 0; i < data_num; i++) |
977 | exp_data_len += vq->iov[data_first + i].iov_len; | 988 | exp_data_len += vq->iov[data_first + i].iov_len; |
978 | 989 | ||
979 | tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req, | 990 | cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req, |
980 | exp_data_len, data_direction); | 991 | exp_data_len, data_direction); |
981 | if (IS_ERR(tv_cmd)) { | 992 | if (IS_ERR(cmd)) { |
982 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", | 993 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", |
983 | PTR_ERR(tv_cmd)); | 994 | PTR_ERR(cmd)); |
984 | goto err_cmd; | 995 | goto err_cmd; |
985 | } | 996 | } |
986 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" | 997 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" |
987 | ": %d\n", tv_cmd, exp_data_len, data_direction); | 998 | ": %d\n", cmd, exp_data_len, data_direction); |
988 | 999 | ||
989 | tv_cmd->tvc_vhost = vs; | 1000 | cmd->tvc_vhost = vs; |
990 | tv_cmd->tvc_vq = vq; | 1001 | cmd->tvc_vq = vq; |
991 | tv_cmd->tvc_resp = vq->iov[out].iov_base; | 1002 | cmd->tvc_resp = vq->iov[out].iov_base; |
992 | 1003 | ||
993 | /* | 1004 | /* |
994 | * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb | 1005 | * Copy in the recieved CDB descriptor into cmd->tvc_cdb |
995 | * that will be used by tcm_vhost_new_cmd_map() and down into | 1006 | * that will be used by tcm_vhost_new_cmd_map() and down into |
996 | * target_setup_cmd_from_cdb() | 1007 | * target_setup_cmd_from_cdb() |
997 | */ | 1008 | */ |
998 | memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); | 1009 | memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); |
999 | /* | 1010 | /* |
1000 | * Check that the recieved CDB size does not exceeded our | 1011 | * Check that the recieved CDB size does not exceeded our |
1001 | * hardcoded max for tcm_vhost | 1012 | * hardcoded max for tcm_vhost |
1002 | */ | 1013 | */ |
1003 | /* TODO what if cdb was too small for varlen cdb header? */ | 1014 | /* TODO what if cdb was too small for varlen cdb header? */ |
1004 | if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) > | 1015 | if (unlikely(scsi_command_size(cmd->tvc_cdb) > |
1005 | TCM_VHOST_MAX_CDB_SIZE)) { | 1016 | TCM_VHOST_MAX_CDB_SIZE)) { |
1006 | vq_err(vq, "Received SCSI CDB with command_size: %d that" | 1017 | vq_err(vq, "Received SCSI CDB with command_size: %d that" |
1007 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 1018 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1008 | scsi_command_size(tv_cmd->tvc_cdb), | 1019 | scsi_command_size(cmd->tvc_cdb), |
1009 | TCM_VHOST_MAX_CDB_SIZE); | 1020 | TCM_VHOST_MAX_CDB_SIZE); |
1010 | goto err_free; | 1021 | goto err_free; |
1011 | } | 1022 | } |
1012 | tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | 1023 | cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; |
1013 | 1024 | ||
1014 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1025 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
1015 | tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun); | 1026 | cmd->tvc_cdb[0], cmd->tvc_lun); |
1016 | 1027 | ||
1017 | if (data_direction != DMA_NONE) { | 1028 | if (data_direction != DMA_NONE) { |
1018 | ret = vhost_scsi_map_iov_to_sgl(tv_cmd, | 1029 | ret = vhost_scsi_map_iov_to_sgl(cmd, |
1019 | &vq->iov[data_first], data_num, | 1030 | &vq->iov[data_first], data_num, |
1020 | data_direction == DMA_TO_DEVICE); | 1031 | data_direction == DMA_TO_DEVICE); |
1021 | if (unlikely(ret)) { | 1032 | if (unlikely(ret)) { |
@@ -1029,22 +1040,22 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
1029 | * complete the virtio-scsi request in TCM callback context via | 1040 | * complete the virtio-scsi request in TCM callback context via |
1030 | * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() | 1041 | * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() |
1031 | */ | 1042 | */ |
1032 | tv_cmd->tvc_vq_desc = head; | 1043 | cmd->tvc_vq_desc = head; |
1033 | /* | 1044 | /* |
1034 | * Dispatch tv_cmd descriptor for cmwq execution in process | 1045 | * Dispatch tv_cmd descriptor for cmwq execution in process |
1035 | * context provided by tcm_vhost_workqueue. This also ensures | 1046 | * context provided by tcm_vhost_workqueue. This also ensures |
1036 | * tv_cmd is executed on the same kworker CPU as this vhost | 1047 | * tv_cmd is executed on the same kworker CPU as this vhost |
1037 | * thread to gain positive L2 cache locality effects.. | 1048 | * thread to gain positive L2 cache locality effects.. |
1038 | */ | 1049 | */ |
1039 | INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work); | 1050 | INIT_WORK(&cmd->work, tcm_vhost_submission_work); |
1040 | queue_work(tcm_vhost_workqueue, &tv_cmd->work); | 1051 | queue_work(tcm_vhost_workqueue, &cmd->work); |
1041 | } | 1052 | } |
1042 | 1053 | ||
1043 | mutex_unlock(&vq->mutex); | 1054 | mutex_unlock(&vq->mutex); |
1044 | return; | 1055 | return; |
1045 | 1056 | ||
1046 | err_free: | 1057 | err_free: |
1047 | vhost_scsi_free_cmd(tv_cmd); | 1058 | vhost_scsi_free_cmd(cmd); |
1048 | err_cmd: | 1059 | err_cmd: |
1049 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1060 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1050 | mutex_unlock(&vq->mutex); | 1061 | mutex_unlock(&vq->mutex); |
@@ -1055,8 +1066,12 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | |||
1055 | pr_debug("%s: The handling func for control queue.\n", __func__); | 1066 | pr_debug("%s: The handling func for control queue.\n", __func__); |
1056 | } | 1067 | } |
1057 | 1068 | ||
1058 | static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg, | 1069 | static void |
1059 | struct se_lun *lun, u32 event, u32 reason) | 1070 | tcm_vhost_send_evt(struct vhost_scsi *vs, |
1071 | struct tcm_vhost_tpg *tpg, | ||
1072 | struct se_lun *lun, | ||
1073 | u32 event, | ||
1074 | u32 reason) | ||
1060 | { | 1075 | { |
1061 | struct tcm_vhost_evt *evt; | 1076 | struct tcm_vhost_evt *evt; |
1062 | 1077 | ||
@@ -1146,12 +1161,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
1146 | * The lock nesting rule is: | 1161 | * The lock nesting rule is: |
1147 | * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex | 1162 | * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex |
1148 | */ | 1163 | */ |
1149 | static int vhost_scsi_set_endpoint( | 1164 | static int |
1150 | struct vhost_scsi *vs, | 1165 | vhost_scsi_set_endpoint(struct vhost_scsi *vs, |
1151 | struct vhost_scsi_target *t) | 1166 | struct vhost_scsi_target *t) |
1152 | { | 1167 | { |
1153 | struct tcm_vhost_tport *tv_tport; | 1168 | struct tcm_vhost_tport *tv_tport; |
1154 | struct tcm_vhost_tpg *tv_tpg; | 1169 | struct tcm_vhost_tpg *tpg; |
1155 | struct tcm_vhost_tpg **vs_tpg; | 1170 | struct tcm_vhost_tpg **vs_tpg; |
1156 | struct vhost_virtqueue *vq; | 1171 | struct vhost_virtqueue *vq; |
1157 | int index, ret, i, len; | 1172 | int index, ret, i, len; |
@@ -1178,32 +1193,32 @@ static int vhost_scsi_set_endpoint( | |||
1178 | if (vs->vs_tpg) | 1193 | if (vs->vs_tpg) |
1179 | memcpy(vs_tpg, vs->vs_tpg, len); | 1194 | memcpy(vs_tpg, vs->vs_tpg, len); |
1180 | 1195 | ||
1181 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { | 1196 | list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { |
1182 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1197 | mutex_lock(&tpg->tv_tpg_mutex); |
1183 | if (!tv_tpg->tpg_nexus) { | 1198 | if (!tpg->tpg_nexus) { |
1184 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1199 | mutex_unlock(&tpg->tv_tpg_mutex); |
1185 | continue; | 1200 | continue; |
1186 | } | 1201 | } |
1187 | if (tv_tpg->tv_tpg_vhost_count != 0) { | 1202 | if (tpg->tv_tpg_vhost_count != 0) { |
1188 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1203 | mutex_unlock(&tpg->tv_tpg_mutex); |
1189 | continue; | 1204 | continue; |
1190 | } | 1205 | } |
1191 | tv_tport = tv_tpg->tport; | 1206 | tv_tport = tpg->tport; |
1192 | 1207 | ||
1193 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { | 1208 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { |
1194 | if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { | 1209 | if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { |
1195 | kfree(vs_tpg); | 1210 | kfree(vs_tpg); |
1196 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1211 | mutex_unlock(&tpg->tv_tpg_mutex); |
1197 | ret = -EEXIST; | 1212 | ret = -EEXIST; |
1198 | goto out; | 1213 | goto out; |
1199 | } | 1214 | } |
1200 | tv_tpg->tv_tpg_vhost_count++; | 1215 | tpg->tv_tpg_vhost_count++; |
1201 | tv_tpg->vhost_scsi = vs; | 1216 | tpg->vhost_scsi = vs; |
1202 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; | 1217 | vs_tpg[tpg->tport_tpgt] = tpg; |
1203 | smp_mb__after_atomic_inc(); | 1218 | smp_mb__after_atomic_inc(); |
1204 | match = true; | 1219 | match = true; |
1205 | } | 1220 | } |
1206 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1221 | mutex_unlock(&tpg->tv_tpg_mutex); |
1207 | } | 1222 | } |
1208 | 1223 | ||
1209 | if (match) { | 1224 | if (match) { |
@@ -1236,12 +1251,12 @@ out: | |||
1236 | return ret; | 1251 | return ret; |
1237 | } | 1252 | } |
1238 | 1253 | ||
1239 | static int vhost_scsi_clear_endpoint( | 1254 | static int |
1240 | struct vhost_scsi *vs, | 1255 | vhost_scsi_clear_endpoint(struct vhost_scsi *vs, |
1241 | struct vhost_scsi_target *t) | 1256 | struct vhost_scsi_target *t) |
1242 | { | 1257 | { |
1243 | struct tcm_vhost_tport *tv_tport; | 1258 | struct tcm_vhost_tport *tv_tport; |
1244 | struct tcm_vhost_tpg *tv_tpg; | 1259 | struct tcm_vhost_tpg *tpg; |
1245 | struct vhost_virtqueue *vq; | 1260 | struct vhost_virtqueue *vq; |
1246 | bool match = false; | 1261 | bool match = false; |
1247 | int index, ret, i; | 1262 | int index, ret, i; |
@@ -1264,30 +1279,30 @@ static int vhost_scsi_clear_endpoint( | |||
1264 | 1279 | ||
1265 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { | 1280 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { |
1266 | target = i; | 1281 | target = i; |
1267 | tv_tpg = vs->vs_tpg[target]; | 1282 | tpg = vs->vs_tpg[target]; |
1268 | if (!tv_tpg) | 1283 | if (!tpg) |
1269 | continue; | 1284 | continue; |
1270 | 1285 | ||
1271 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1286 | mutex_lock(&tpg->tv_tpg_mutex); |
1272 | tv_tport = tv_tpg->tport; | 1287 | tv_tport = tpg->tport; |
1273 | if (!tv_tport) { | 1288 | if (!tv_tport) { |
1274 | ret = -ENODEV; | 1289 | ret = -ENODEV; |
1275 | goto err_tpg; | 1290 | goto err_tpg; |
1276 | } | 1291 | } |
1277 | 1292 | ||
1278 | if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { | 1293 | if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { |
1279 | pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" | 1294 | pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" |
1280 | " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", | 1295 | " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", |
1281 | tv_tport->tport_name, tv_tpg->tport_tpgt, | 1296 | tv_tport->tport_name, tpg->tport_tpgt, |
1282 | t->vhost_wwpn, t->vhost_tpgt); | 1297 | t->vhost_wwpn, t->vhost_tpgt); |
1283 | ret = -EINVAL; | 1298 | ret = -EINVAL; |
1284 | goto err_tpg; | 1299 | goto err_tpg; |
1285 | } | 1300 | } |
1286 | tv_tpg->tv_tpg_vhost_count--; | 1301 | tpg->tv_tpg_vhost_count--; |
1287 | tv_tpg->vhost_scsi = NULL; | 1302 | tpg->vhost_scsi = NULL; |
1288 | vs->vs_tpg[target] = NULL; | 1303 | vs->vs_tpg[target] = NULL; |
1289 | match = true; | 1304 | match = true; |
1290 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1305 | mutex_unlock(&tpg->tv_tpg_mutex); |
1291 | } | 1306 | } |
1292 | if (match) { | 1307 | if (match) { |
1293 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1308 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
@@ -1311,7 +1326,7 @@ static int vhost_scsi_clear_endpoint( | |||
1311 | return 0; | 1326 | return 0; |
1312 | 1327 | ||
1313 | err_tpg: | 1328 | err_tpg: |
1314 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1329 | mutex_unlock(&tpg->tv_tpg_mutex); |
1315 | err_dev: | 1330 | err_dev: |
1316 | mutex_unlock(&vs->dev.mutex); | 1331 | mutex_unlock(&vs->dev.mutex); |
1317 | mutex_unlock(&tcm_vhost_mutex); | 1332 | mutex_unlock(&tcm_vhost_mutex); |
@@ -1338,68 +1353,70 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | |||
1338 | 1353 | ||
1339 | static int vhost_scsi_open(struct inode *inode, struct file *f) | 1354 | static int vhost_scsi_open(struct inode *inode, struct file *f) |
1340 | { | 1355 | { |
1341 | struct vhost_scsi *s; | 1356 | struct vhost_scsi *vs; |
1342 | struct vhost_virtqueue **vqs; | 1357 | struct vhost_virtqueue **vqs; |
1343 | int r, i; | 1358 | int r, i; |
1344 | 1359 | ||
1345 | s = kzalloc(sizeof(*s), GFP_KERNEL); | 1360 | vs = kzalloc(sizeof(*vs), GFP_KERNEL); |
1346 | if (!s) | 1361 | if (!vs) |
1347 | return -ENOMEM; | 1362 | return -ENOMEM; |
1348 | 1363 | ||
1349 | vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); | 1364 | vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); |
1350 | if (!vqs) { | 1365 | if (!vqs) { |
1351 | kfree(s); | 1366 | kfree(vs); |
1352 | return -ENOMEM; | 1367 | return -ENOMEM; |
1353 | } | 1368 | } |
1354 | 1369 | ||
1355 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); | 1370 | vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); |
1356 | vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); | 1371 | vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); |
1357 | 1372 | ||
1358 | s->vs_events_nr = 0; | 1373 | vs->vs_events_nr = 0; |
1359 | s->vs_events_missed = false; | 1374 | vs->vs_events_missed = false; |
1360 | 1375 | ||
1361 | vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq; | 1376 | vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; |
1362 | vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq; | 1377 | vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
1363 | s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; | 1378 | vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; |
1364 | s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; | 1379 | vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; |
1365 | for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { | 1380 | for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { |
1366 | vqs[i] = &s->vqs[i].vq; | 1381 | vqs[i] = &vs->vqs[i].vq; |
1367 | s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; | 1382 | vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; |
1368 | } | 1383 | } |
1369 | r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ); | 1384 | r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); |
1370 | 1385 | ||
1371 | tcm_vhost_init_inflight(s, NULL); | 1386 | tcm_vhost_init_inflight(vs, NULL); |
1372 | 1387 | ||
1373 | if (r < 0) { | 1388 | if (r < 0) { |
1374 | kfree(vqs); | 1389 | kfree(vqs); |
1375 | kfree(s); | 1390 | kfree(vs); |
1376 | return r; | 1391 | return r; |
1377 | } | 1392 | } |
1378 | 1393 | ||
1379 | f->private_data = s; | 1394 | f->private_data = vs; |
1380 | return 0; | 1395 | return 0; |
1381 | } | 1396 | } |
1382 | 1397 | ||
1383 | static int vhost_scsi_release(struct inode *inode, struct file *f) | 1398 | static int vhost_scsi_release(struct inode *inode, struct file *f) |
1384 | { | 1399 | { |
1385 | struct vhost_scsi *s = f->private_data; | 1400 | struct vhost_scsi *vs = f->private_data; |
1386 | struct vhost_scsi_target t; | 1401 | struct vhost_scsi_target t; |
1387 | 1402 | ||
1388 | mutex_lock(&s->dev.mutex); | 1403 | mutex_lock(&vs->dev.mutex); |
1389 | memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); | 1404 | memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); |
1390 | mutex_unlock(&s->dev.mutex); | 1405 | mutex_unlock(&vs->dev.mutex); |
1391 | vhost_scsi_clear_endpoint(s, &t); | 1406 | vhost_scsi_clear_endpoint(vs, &t); |
1392 | vhost_dev_stop(&s->dev); | 1407 | vhost_dev_stop(&vs->dev); |
1393 | vhost_dev_cleanup(&s->dev, false); | 1408 | vhost_dev_cleanup(&vs->dev, false); |
1394 | /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ | 1409 | /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ |
1395 | vhost_scsi_flush(s); | 1410 | vhost_scsi_flush(vs); |
1396 | kfree(s->dev.vqs); | 1411 | kfree(vs->dev.vqs); |
1397 | kfree(s); | 1412 | kfree(vs); |
1398 | return 0; | 1413 | return 0; |
1399 | } | 1414 | } |
1400 | 1415 | ||
1401 | static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, | 1416 | static long |
1402 | unsigned long arg) | 1417 | vhost_scsi_ioctl(struct file *f, |
1418 | unsigned int ioctl, | ||
1419 | unsigned long arg) | ||
1403 | { | 1420 | { |
1404 | struct vhost_scsi *vs = f->private_data; | 1421 | struct vhost_scsi *vs = f->private_data; |
1405 | struct vhost_scsi_target backend; | 1422 | struct vhost_scsi_target backend; |
@@ -1515,8 +1532,9 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) | |||
1515 | return "Unknown"; | 1532 | return "Unknown"; |
1516 | } | 1533 | } |
1517 | 1534 | ||
1518 | static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, | 1535 | static void |
1519 | struct se_lun *lun, bool plug) | 1536 | tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, |
1537 | struct se_lun *lun, bool plug) | ||
1520 | { | 1538 | { |
1521 | 1539 | ||
1522 | struct vhost_scsi *vs = tpg->vhost_scsi; | 1540 | struct vhost_scsi *vs = tpg->vhost_scsi; |
@@ -1556,18 +1574,18 @@ static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | |||
1556 | } | 1574 | } |
1557 | 1575 | ||
1558 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, | 1576 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, |
1559 | struct se_lun *lun) | 1577 | struct se_lun *lun) |
1560 | { | 1578 | { |
1561 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1579 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
1562 | struct tcm_vhost_tpg, se_tpg); | 1580 | struct tcm_vhost_tpg, se_tpg); |
1563 | 1581 | ||
1564 | mutex_lock(&tcm_vhost_mutex); | 1582 | mutex_lock(&tcm_vhost_mutex); |
1565 | 1583 | ||
1566 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1584 | mutex_lock(&tpg->tv_tpg_mutex); |
1567 | tv_tpg->tv_tpg_port_count++; | 1585 | tpg->tv_tpg_port_count++; |
1568 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1586 | mutex_unlock(&tpg->tv_tpg_mutex); |
1569 | 1587 | ||
1570 | tcm_vhost_hotplug(tv_tpg, lun); | 1588 | tcm_vhost_hotplug(tpg, lun); |
1571 | 1589 | ||
1572 | mutex_unlock(&tcm_vhost_mutex); | 1590 | mutex_unlock(&tcm_vhost_mutex); |
1573 | 1591 | ||
@@ -1575,26 +1593,26 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg, | |||
1575 | } | 1593 | } |
1576 | 1594 | ||
1577 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, | 1595 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, |
1578 | struct se_lun *lun) | 1596 | struct se_lun *lun) |
1579 | { | 1597 | { |
1580 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1598 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
1581 | struct tcm_vhost_tpg, se_tpg); | 1599 | struct tcm_vhost_tpg, se_tpg); |
1582 | 1600 | ||
1583 | mutex_lock(&tcm_vhost_mutex); | 1601 | mutex_lock(&tcm_vhost_mutex); |
1584 | 1602 | ||
1585 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1603 | mutex_lock(&tpg->tv_tpg_mutex); |
1586 | tv_tpg->tv_tpg_port_count--; | 1604 | tpg->tv_tpg_port_count--; |
1587 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1605 | mutex_unlock(&tpg->tv_tpg_mutex); |
1588 | 1606 | ||
1589 | tcm_vhost_hotunplug(tv_tpg, lun); | 1607 | tcm_vhost_hotunplug(tpg, lun); |
1590 | 1608 | ||
1591 | mutex_unlock(&tcm_vhost_mutex); | 1609 | mutex_unlock(&tcm_vhost_mutex); |
1592 | } | 1610 | } |
1593 | 1611 | ||
1594 | static struct se_node_acl *tcm_vhost_make_nodeacl( | 1612 | static struct se_node_acl * |
1595 | struct se_portal_group *se_tpg, | 1613 | tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, |
1596 | struct config_group *group, | 1614 | struct config_group *group, |
1597 | const char *name) | 1615 | const char *name) |
1598 | { | 1616 | { |
1599 | struct se_node_acl *se_nacl, *se_nacl_new; | 1617 | struct se_node_acl *se_nacl, *se_nacl_new; |
1600 | struct tcm_vhost_nacl *nacl; | 1618 | struct tcm_vhost_nacl *nacl; |
@@ -1635,23 +1653,23 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) | |||
1635 | kfree(nacl); | 1653 | kfree(nacl); |
1636 | } | 1654 | } |
1637 | 1655 | ||
1638 | static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, | 1656 | static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, |
1639 | const char *name) | 1657 | const char *name) |
1640 | { | 1658 | { |
1641 | struct se_portal_group *se_tpg; | 1659 | struct se_portal_group *se_tpg; |
1642 | struct tcm_vhost_nexus *tv_nexus; | 1660 | struct tcm_vhost_nexus *tv_nexus; |
1643 | 1661 | ||
1644 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1662 | mutex_lock(&tpg->tv_tpg_mutex); |
1645 | if (tv_tpg->tpg_nexus) { | 1663 | if (tpg->tpg_nexus) { |
1646 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1664 | mutex_unlock(&tpg->tv_tpg_mutex); |
1647 | pr_debug("tv_tpg->tpg_nexus already exists\n"); | 1665 | pr_debug("tpg->tpg_nexus already exists\n"); |
1648 | return -EEXIST; | 1666 | return -EEXIST; |
1649 | } | 1667 | } |
1650 | se_tpg = &tv_tpg->se_tpg; | 1668 | se_tpg = &tpg->se_tpg; |
1651 | 1669 | ||
1652 | tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); | 1670 | tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); |
1653 | if (!tv_nexus) { | 1671 | if (!tv_nexus) { |
1654 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1672 | mutex_unlock(&tpg->tv_tpg_mutex); |
1655 | pr_err("Unable to allocate struct tcm_vhost_nexus\n"); | 1673 | pr_err("Unable to allocate struct tcm_vhost_nexus\n"); |
1656 | return -ENOMEM; | 1674 | return -ENOMEM; |
1657 | } | 1675 | } |
@@ -1660,7 +1678,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, | |||
1660 | */ | 1678 | */ |
1661 | tv_nexus->tvn_se_sess = transport_init_session(); | 1679 | tv_nexus->tvn_se_sess = transport_init_session(); |
1662 | if (IS_ERR(tv_nexus->tvn_se_sess)) { | 1680 | if (IS_ERR(tv_nexus->tvn_se_sess)) { |
1663 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1681 | mutex_unlock(&tpg->tv_tpg_mutex); |
1664 | kfree(tv_nexus); | 1682 | kfree(tv_nexus); |
1665 | return -ENOMEM; | 1683 | return -ENOMEM; |
1666 | } | 1684 | } |
@@ -1672,7 +1690,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, | |||
1672 | tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( | 1690 | tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( |
1673 | se_tpg, (unsigned char *)name); | 1691 | se_tpg, (unsigned char *)name); |
1674 | if (!tv_nexus->tvn_se_sess->se_node_acl) { | 1692 | if (!tv_nexus->tvn_se_sess->se_node_acl) { |
1675 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1693 | mutex_unlock(&tpg->tv_tpg_mutex); |
1676 | pr_debug("core_tpg_check_initiator_node_acl() failed" | 1694 | pr_debug("core_tpg_check_initiator_node_acl() failed" |
1677 | " for %s\n", name); | 1695 | " for %s\n", name); |
1678 | transport_free_session(tv_nexus->tvn_se_sess); | 1696 | transport_free_session(tv_nexus->tvn_se_sess); |
@@ -1685,9 +1703,9 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, | |||
1685 | */ | 1703 | */ |
1686 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1704 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1687 | tv_nexus->tvn_se_sess, tv_nexus); | 1705 | tv_nexus->tvn_se_sess, tv_nexus); |
1688 | tv_tpg->tpg_nexus = tv_nexus; | 1706 | tpg->tpg_nexus = tv_nexus; |
1689 | 1707 | ||
1690 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1708 | mutex_unlock(&tpg->tv_tpg_mutex); |
1691 | return 0; | 1709 | return 0; |
1692 | } | 1710 | } |
1693 | 1711 | ||
@@ -1740,40 +1758,40 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) | |||
1740 | } | 1758 | } |
1741 | 1759 | ||
1742 | static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, | 1760 | static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, |
1743 | char *page) | 1761 | char *page) |
1744 | { | 1762 | { |
1745 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1763 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
1746 | struct tcm_vhost_tpg, se_tpg); | 1764 | struct tcm_vhost_tpg, se_tpg); |
1747 | struct tcm_vhost_nexus *tv_nexus; | 1765 | struct tcm_vhost_nexus *tv_nexus; |
1748 | ssize_t ret; | 1766 | ssize_t ret; |
1749 | 1767 | ||
1750 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1768 | mutex_lock(&tpg->tv_tpg_mutex); |
1751 | tv_nexus = tv_tpg->tpg_nexus; | 1769 | tv_nexus = tpg->tpg_nexus; |
1752 | if (!tv_nexus) { | 1770 | if (!tv_nexus) { |
1753 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1771 | mutex_unlock(&tpg->tv_tpg_mutex); |
1754 | return -ENODEV; | 1772 | return -ENODEV; |
1755 | } | 1773 | } |
1756 | ret = snprintf(page, PAGE_SIZE, "%s\n", | 1774 | ret = snprintf(page, PAGE_SIZE, "%s\n", |
1757 | tv_nexus->tvn_se_sess->se_node_acl->initiatorname); | 1775 | tv_nexus->tvn_se_sess->se_node_acl->initiatorname); |
1758 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1776 | mutex_unlock(&tpg->tv_tpg_mutex); |
1759 | 1777 | ||
1760 | return ret; | 1778 | return ret; |
1761 | } | 1779 | } |
1762 | 1780 | ||
1763 | static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, | 1781 | static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, |
1764 | const char *page, | 1782 | const char *page, |
1765 | size_t count) | 1783 | size_t count) |
1766 | { | 1784 | { |
1767 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1785 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, |
1768 | struct tcm_vhost_tpg, se_tpg); | 1786 | struct tcm_vhost_tpg, se_tpg); |
1769 | struct tcm_vhost_tport *tport_wwn = tv_tpg->tport; | 1787 | struct tcm_vhost_tport *tport_wwn = tpg->tport; |
1770 | unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; | 1788 | unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; |
1771 | int ret; | 1789 | int ret; |
1772 | /* | 1790 | /* |
1773 | * Shutdown the active I_T nexus if 'NULL' is passed.. | 1791 | * Shutdown the active I_T nexus if 'NULL' is passed.. |
1774 | */ | 1792 | */ |
1775 | if (!strncmp(page, "NULL", 4)) { | 1793 | if (!strncmp(page, "NULL", 4)) { |
1776 | ret = tcm_vhost_drop_nexus(tv_tpg); | 1794 | ret = tcm_vhost_drop_nexus(tpg); |
1777 | return (!ret) ? count : ret; | 1795 | return (!ret) ? count : ret; |
1778 | } | 1796 | } |
1779 | /* | 1797 | /* |
@@ -1831,7 +1849,7 @@ check_newline: | |||
1831 | if (i_port[strlen(i_port)-1] == '\n') | 1849 | if (i_port[strlen(i_port)-1] == '\n') |
1832 | i_port[strlen(i_port)-1] = '\0'; | 1850 | i_port[strlen(i_port)-1] = '\0'; |
1833 | 1851 | ||
1834 | ret = tcm_vhost_make_nexus(tv_tpg, port_ptr); | 1852 | ret = tcm_vhost_make_nexus(tpg, port_ptr); |
1835 | if (ret < 0) | 1853 | if (ret < 0) |
1836 | return ret; | 1854 | return ret; |
1837 | 1855 | ||
@@ -1845,9 +1863,10 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { | |||
1845 | NULL, | 1863 | NULL, |
1846 | }; | 1864 | }; |
1847 | 1865 | ||
1848 | static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn, | 1866 | static struct se_portal_group * |
1849 | struct config_group *group, | 1867 | tcm_vhost_make_tpg(struct se_wwn *wwn, |
1850 | const char *name) | 1868 | struct config_group *group, |
1869 | const char *name) | ||
1851 | { | 1870 | { |
1852 | struct tcm_vhost_tport *tport = container_of(wwn, | 1871 | struct tcm_vhost_tport *tport = container_of(wwn, |
1853 | struct tcm_vhost_tport, tport_wwn); | 1872 | struct tcm_vhost_tport, tport_wwn); |
@@ -1903,9 +1922,10 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) | |||
1903 | kfree(tpg); | 1922 | kfree(tpg); |
1904 | } | 1923 | } |
1905 | 1924 | ||
1906 | static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf, | 1925 | static struct se_wwn * |
1907 | struct config_group *group, | 1926 | tcm_vhost_make_tport(struct target_fabric_configfs *tf, |
1908 | const char *name) | 1927 | struct config_group *group, |
1928 | const char *name) | ||
1909 | { | 1929 | { |
1910 | struct tcm_vhost_tport *tport; | 1930 | struct tcm_vhost_tport *tport; |
1911 | char *ptr; | 1931 | char *ptr; |
@@ -1975,9 +1995,9 @@ static void tcm_vhost_drop_tport(struct se_wwn *wwn) | |||
1975 | kfree(tport); | 1995 | kfree(tport); |
1976 | } | 1996 | } |
1977 | 1997 | ||
1978 | static ssize_t tcm_vhost_wwn_show_attr_version( | 1998 | static ssize_t |
1979 | struct target_fabric_configfs *tf, | 1999 | tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, |
1980 | char *page) | 2000 | char *page) |
1981 | { | 2001 | { |
1982 | return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" | 2002 | return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" |
1983 | "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, | 2003 | "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, |
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 1ee45bc85f67..a73ea217f24d 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | 19 | ||
20 | #include "test.h" | 20 | #include "test.h" |
21 | #include "vhost.c" | 21 | #include "vhost.h" |
22 | 22 | ||
23 | /* Max number of bytes transferred before requeueing the job. | 23 | /* Max number of bytes transferred before requeueing the job. |
24 | * Using this limit prevents one virtqueue from starving others. */ | 24 | * Using this limit prevents one virtqueue from starving others. */ |
@@ -38,17 +38,19 @@ struct vhost_test { | |||
38 | * read-size critical section for our kind of RCU. */ | 38 | * read-size critical section for our kind of RCU. */ |
39 | static void handle_vq(struct vhost_test *n) | 39 | static void handle_vq(struct vhost_test *n) |
40 | { | 40 | { |
41 | struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ]; | 41 | struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; |
42 | unsigned out, in; | 42 | unsigned out, in; |
43 | int head; | 43 | int head; |
44 | size_t len, total_len = 0; | 44 | size_t len, total_len = 0; |
45 | void *private; | 45 | void *private; |
46 | 46 | ||
47 | private = rcu_dereference_check(vq->private_data, 1); | 47 | mutex_lock(&vq->mutex); |
48 | if (!private) | 48 | private = vq->private_data; |
49 | if (!private) { | ||
50 | mutex_unlock(&vq->mutex); | ||
49 | return; | 51 | return; |
52 | } | ||
50 | 53 | ||
51 | mutex_lock(&vq->mutex); | ||
52 | vhost_disable_notify(&n->dev, vq); | 54 | vhost_disable_notify(&n->dev, vq); |
53 | 55 | ||
54 | for (;;) { | 56 | for (;;) { |
@@ -102,15 +104,23 @@ static int vhost_test_open(struct inode *inode, struct file *f) | |||
102 | { | 104 | { |
103 | struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); | 105 | struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); |
104 | struct vhost_dev *dev; | 106 | struct vhost_dev *dev; |
107 | struct vhost_virtqueue **vqs; | ||
105 | int r; | 108 | int r; |
106 | 109 | ||
107 | if (!n) | 110 | if (!n) |
108 | return -ENOMEM; | 111 | return -ENOMEM; |
112 | vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL); | ||
113 | if (!vqs) { | ||
114 | kfree(n); | ||
115 | return -ENOMEM; | ||
116 | } | ||
109 | 117 | ||
110 | dev = &n->dev; | 118 | dev = &n->dev; |
119 | vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; | ||
111 | n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; | 120 | n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; |
112 | r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX); | 121 | r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); |
113 | if (r < 0) { | 122 | if (r < 0) { |
123 | kfree(vqs); | ||
114 | kfree(n); | 124 | kfree(n); |
115 | return r; | 125 | return r; |
116 | } | 126 | } |
@@ -126,9 +136,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n, | |||
126 | void *private; | 136 | void *private; |
127 | 137 | ||
128 | mutex_lock(&vq->mutex); | 138 | mutex_lock(&vq->mutex); |
129 | private = rcu_dereference_protected(vq->private_data, | 139 | private = vq->private_data; |
130 | lockdep_is_held(&vq->mutex)); | 140 | vq->private_data = NULL; |
131 | rcu_assign_pointer(vq->private_data, NULL); | ||
132 | mutex_unlock(&vq->mutex); | 141 | mutex_unlock(&vq->mutex); |
133 | return private; | 142 | return private; |
134 | } | 143 | } |
@@ -140,7 +149,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep) | |||
140 | 149 | ||
141 | static void vhost_test_flush_vq(struct vhost_test *n, int index) | 150 | static void vhost_test_flush_vq(struct vhost_test *n, int index) |
142 | { | 151 | { |
143 | vhost_poll_flush(&n->dev.vqs[index].poll); | 152 | vhost_poll_flush(&n->vqs[index].poll); |
144 | } | 153 | } |
145 | 154 | ||
146 | static void vhost_test_flush(struct vhost_test *n) | 155 | static void vhost_test_flush(struct vhost_test *n) |
@@ -268,14 +277,14 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, | |||
268 | return -EFAULT; | 277 | return -EFAULT; |
269 | return vhost_test_run(n, test); | 278 | return vhost_test_run(n, test); |
270 | case VHOST_GET_FEATURES: | 279 | case VHOST_GET_FEATURES: |
271 | features = VHOST_NET_FEATURES; | 280 | features = VHOST_FEATURES; |
272 | if (copy_to_user(featurep, &features, sizeof features)) | 281 | if (copy_to_user(featurep, &features, sizeof features)) |
273 | return -EFAULT; | 282 | return -EFAULT; |
274 | return 0; | 283 | return 0; |
275 | case VHOST_SET_FEATURES: | 284 | case VHOST_SET_FEATURES: |
276 | if (copy_from_user(&features, featurep, sizeof features)) | 285 | if (copy_from_user(&features, featurep, sizeof features)) |
277 | return -EFAULT; | 286 | return -EFAULT; |
278 | if (features & ~VHOST_NET_FEATURES) | 287 | if (features & ~VHOST_FEATURES) |
279 | return -EOPNOTSUPP; | 288 | return -EOPNOTSUPP; |
280 | return vhost_test_set_features(n, features); | 289 | return vhost_test_set_features(n, features); |
281 | case VHOST_RESET_OWNER: | 290 | case VHOST_RESET_OWNER: |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 60aa5ad09a2f..e58cf0001cee 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
27 | #include <linux/cgroup.h> | 27 | #include <linux/cgroup.h> |
28 | #include <linux/module.h> | ||
28 | 29 | ||
29 | #include "vhost.h" | 30 | #include "vhost.h" |
30 | 31 | ||
@@ -66,6 +67,7 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) | |||
66 | work->flushing = 0; | 67 | work->flushing = 0; |
67 | work->queue_seq = work->done_seq = 0; | 68 | work->queue_seq = work->done_seq = 0; |
68 | } | 69 | } |
70 | EXPORT_SYMBOL_GPL(vhost_work_init); | ||
69 | 71 | ||
70 | /* Init poll structure */ | 72 | /* Init poll structure */ |
71 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 73 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
@@ -79,6 +81,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | |||
79 | 81 | ||
80 | vhost_work_init(&poll->work, fn); | 82 | vhost_work_init(&poll->work, fn); |
81 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(vhost_poll_init); | ||
82 | 85 | ||
83 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 86 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
84 | * keep a reference to a file until after vhost_poll_stop is called. */ | 87 | * keep a reference to a file until after vhost_poll_stop is called. */ |
@@ -101,6 +104,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) | |||
101 | 104 | ||
102 | return ret; | 105 | return ret; |
103 | } | 106 | } |
107 | EXPORT_SYMBOL_GPL(vhost_poll_start); | ||
104 | 108 | ||
105 | /* Stop polling a file. After this function returns, it becomes safe to drop the | 109 | /* Stop polling a file. After this function returns, it becomes safe to drop the |
106 | * file reference. You must also flush afterwards. */ | 110 | * file reference. You must also flush afterwards. */ |
@@ -111,6 +115,7 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
111 | poll->wqh = NULL; | 115 | poll->wqh = NULL; |
112 | } | 116 | } |
113 | } | 117 | } |
118 | EXPORT_SYMBOL_GPL(vhost_poll_stop); | ||
114 | 119 | ||
115 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, | 120 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, |
116 | unsigned seq) | 121 | unsigned seq) |
@@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, | |||
123 | return left <= 0; | 128 | return left <= 0; |
124 | } | 129 | } |
125 | 130 | ||
126 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) | 131 | void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
127 | { | 132 | { |
128 | unsigned seq; | 133 | unsigned seq; |
129 | int flushing; | 134 | int flushing; |
@@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) | |||
138 | spin_unlock_irq(&dev->work_lock); | 143 | spin_unlock_irq(&dev->work_lock); |
139 | BUG_ON(flushing < 0); | 144 | BUG_ON(flushing < 0); |
140 | } | 145 | } |
146 | EXPORT_SYMBOL_GPL(vhost_work_flush); | ||
141 | 147 | ||
142 | /* Flush any work that has been scheduled. When calling this, don't hold any | 148 | /* Flush any work that has been scheduled. When calling this, don't hold any |
143 | * locks that are also used by the callback. */ | 149 | * locks that are also used by the callback. */ |
@@ -145,6 +151,7 @@ void vhost_poll_flush(struct vhost_poll *poll) | |||
145 | { | 151 | { |
146 | vhost_work_flush(poll->dev, &poll->work); | 152 | vhost_work_flush(poll->dev, &poll->work); |
147 | } | 153 | } |
154 | EXPORT_SYMBOL_GPL(vhost_poll_flush); | ||
148 | 155 | ||
149 | void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) | 156 | void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) |
150 | { | 157 | { |
@@ -158,11 +165,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) | |||
158 | } | 165 | } |
159 | spin_unlock_irqrestore(&dev->work_lock, flags); | 166 | spin_unlock_irqrestore(&dev->work_lock, flags); |
160 | } | 167 | } |
168 | EXPORT_SYMBOL_GPL(vhost_work_queue); | ||
161 | 169 | ||
162 | void vhost_poll_queue(struct vhost_poll *poll) | 170 | void vhost_poll_queue(struct vhost_poll *poll) |
163 | { | 171 | { |
164 | vhost_work_queue(poll->dev, &poll->work); | 172 | vhost_work_queue(poll->dev, &poll->work); |
165 | } | 173 | } |
174 | EXPORT_SYMBOL_GPL(vhost_poll_queue); | ||
166 | 175 | ||
167 | static void vhost_vq_reset(struct vhost_dev *dev, | 176 | static void vhost_vq_reset(struct vhost_dev *dev, |
168 | struct vhost_virtqueue *vq) | 177 | struct vhost_virtqueue *vq) |
@@ -251,17 +260,16 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) | |||
251 | /* Helper to allocate iovec buffers for all vqs. */ | 260 | /* Helper to allocate iovec buffers for all vqs. */ |
252 | static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) | 261 | static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) |
253 | { | 262 | { |
263 | struct vhost_virtqueue *vq; | ||
254 | int i; | 264 | int i; |
255 | 265 | ||
256 | for (i = 0; i < dev->nvqs; ++i) { | 266 | for (i = 0; i < dev->nvqs; ++i) { |
257 | dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect * | 267 | vq = dev->vqs[i]; |
258 | UIO_MAXIOV, GFP_KERNEL); | 268 | vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV, |
259 | dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV, | 269 | GFP_KERNEL); |
260 | GFP_KERNEL); | 270 | vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL); |
261 | dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads * | 271 | vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL); |
262 | UIO_MAXIOV, GFP_KERNEL); | 272 | if (!vq->indirect || !vq->log || !vq->heads) |
263 | if (!dev->vqs[i]->indirect || !dev->vqs[i]->log || | ||
264 | !dev->vqs[i]->heads) | ||
265 | goto err_nomem; | 273 | goto err_nomem; |
266 | } | 274 | } |
267 | return 0; | 275 | return 0; |
@@ -283,6 +291,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) | |||
283 | long vhost_dev_init(struct vhost_dev *dev, | 291 | long vhost_dev_init(struct vhost_dev *dev, |
284 | struct vhost_virtqueue **vqs, int nvqs) | 292 | struct vhost_virtqueue **vqs, int nvqs) |
285 | { | 293 | { |
294 | struct vhost_virtqueue *vq; | ||
286 | int i; | 295 | int i; |
287 | 296 | ||
288 | dev->vqs = vqs; | 297 | dev->vqs = vqs; |
@@ -297,19 +306,21 @@ long vhost_dev_init(struct vhost_dev *dev, | |||
297 | dev->worker = NULL; | 306 | dev->worker = NULL; |
298 | 307 | ||
299 | for (i = 0; i < dev->nvqs; ++i) { | 308 | for (i = 0; i < dev->nvqs; ++i) { |
300 | dev->vqs[i]->log = NULL; | 309 | vq = dev->vqs[i]; |
301 | dev->vqs[i]->indirect = NULL; | 310 | vq->log = NULL; |
302 | dev->vqs[i]->heads = NULL; | 311 | vq->indirect = NULL; |
303 | dev->vqs[i]->dev = dev; | 312 | vq->heads = NULL; |
304 | mutex_init(&dev->vqs[i]->mutex); | 313 | vq->dev = dev; |
305 | vhost_vq_reset(dev, dev->vqs[i]); | 314 | mutex_init(&vq->mutex); |
306 | if (dev->vqs[i]->handle_kick) | 315 | vhost_vq_reset(dev, vq); |
307 | vhost_poll_init(&dev->vqs[i]->poll, | 316 | if (vq->handle_kick) |
308 | dev->vqs[i]->handle_kick, POLLIN, dev); | 317 | vhost_poll_init(&vq->poll, vq->handle_kick, |
318 | POLLIN, dev); | ||
309 | } | 319 | } |
310 | 320 | ||
311 | return 0; | 321 | return 0; |
312 | } | 322 | } |
323 | EXPORT_SYMBOL_GPL(vhost_dev_init); | ||
313 | 324 | ||
314 | /* Caller should have device mutex */ | 325 | /* Caller should have device mutex */ |
315 | long vhost_dev_check_owner(struct vhost_dev *dev) | 326 | long vhost_dev_check_owner(struct vhost_dev *dev) |
@@ -317,6 +328,7 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
317 | /* Are you the owner? If not, I don't think you mean to do that */ | 328 | /* Are you the owner? If not, I don't think you mean to do that */ |
318 | return dev->mm == current->mm ? 0 : -EPERM; | 329 | return dev->mm == current->mm ? 0 : -EPERM; |
319 | } | 330 | } |
331 | EXPORT_SYMBOL_GPL(vhost_dev_check_owner); | ||
320 | 332 | ||
321 | struct vhost_attach_cgroups_struct { | 333 | struct vhost_attach_cgroups_struct { |
322 | struct vhost_work work; | 334 | struct vhost_work work; |
@@ -348,6 +360,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev) | |||
348 | { | 360 | { |
349 | return dev->mm; | 361 | return dev->mm; |
350 | } | 362 | } |
363 | EXPORT_SYMBOL_GPL(vhost_dev_has_owner); | ||
351 | 364 | ||
352 | /* Caller should have device mutex */ | 365 | /* Caller should have device mutex */ |
353 | long vhost_dev_set_owner(struct vhost_dev *dev) | 366 | long vhost_dev_set_owner(struct vhost_dev *dev) |
@@ -391,11 +404,13 @@ err_worker: | |||
391 | err_mm: | 404 | err_mm: |
392 | return err; | 405 | return err; |
393 | } | 406 | } |
407 | EXPORT_SYMBOL_GPL(vhost_dev_set_owner); | ||
394 | 408 | ||
395 | struct vhost_memory *vhost_dev_reset_owner_prepare(void) | 409 | struct vhost_memory *vhost_dev_reset_owner_prepare(void) |
396 | { | 410 | { |
397 | return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); | 411 | return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); |
398 | } | 412 | } |
413 | EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); | ||
399 | 414 | ||
400 | /* Caller should have device mutex */ | 415 | /* Caller should have device mutex */ |
401 | void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) | 416 | void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) |
@@ -406,6 +421,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) | |||
406 | memory->nregions = 0; | 421 | memory->nregions = 0; |
407 | RCU_INIT_POINTER(dev->memory, memory); | 422 | RCU_INIT_POINTER(dev->memory, memory); |
408 | } | 423 | } |
424 | EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); | ||
409 | 425 | ||
410 | void vhost_dev_stop(struct vhost_dev *dev) | 426 | void vhost_dev_stop(struct vhost_dev *dev) |
411 | { | 427 | { |
@@ -418,6 +434,7 @@ void vhost_dev_stop(struct vhost_dev *dev) | |||
418 | } | 434 | } |
419 | } | 435 | } |
420 | } | 436 | } |
437 | EXPORT_SYMBOL_GPL(vhost_dev_stop); | ||
421 | 438 | ||
422 | /* Caller should have device mutex if and only if locked is set */ | 439 | /* Caller should have device mutex if and only if locked is set */ |
423 | void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) | 440 | void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) |
@@ -458,6 +475,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) | |||
458 | mmput(dev->mm); | 475 | mmput(dev->mm); |
459 | dev->mm = NULL; | 476 | dev->mm = NULL; |
460 | } | 477 | } |
478 | EXPORT_SYMBOL_GPL(vhost_dev_cleanup); | ||
461 | 479 | ||
462 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) | 480 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) |
463 | { | 481 | { |
@@ -543,6 +561,7 @@ int vhost_log_access_ok(struct vhost_dev *dev) | |||
543 | lockdep_is_held(&dev->mutex)); | 561 | lockdep_is_held(&dev->mutex)); |
544 | return memory_access_ok(dev, mp, 1); | 562 | return memory_access_ok(dev, mp, 1); |
545 | } | 563 | } |
564 | EXPORT_SYMBOL_GPL(vhost_log_access_ok); | ||
546 | 565 | ||
547 | /* Verify access for write logging. */ | 566 | /* Verify access for write logging. */ |
548 | /* Caller should have vq mutex and device mutex */ | 567 | /* Caller should have vq mutex and device mutex */ |
@@ -568,6 +587,7 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq) | |||
568 | return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && | 587 | return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && |
569 | vq_log_access_ok(vq->dev, vq, vq->log_base); | 588 | vq_log_access_ok(vq->dev, vq, vq->log_base); |
570 | } | 589 | } |
590 | EXPORT_SYMBOL_GPL(vhost_vq_access_ok); | ||
571 | 591 | ||
572 | static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | 592 | static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) |
573 | { | 593 | { |
@@ -797,6 +817,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) | |||
797 | vhost_poll_flush(&vq->poll); | 817 | vhost_poll_flush(&vq->poll); |
798 | return r; | 818 | return r; |
799 | } | 819 | } |
820 | EXPORT_SYMBOL_GPL(vhost_vring_ioctl); | ||
800 | 821 | ||
801 | /* Caller must have device mutex */ | 822 | /* Caller must have device mutex */ |
802 | long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) | 823 | long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) |
@@ -877,6 +898,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) | |||
877 | done: | 898 | done: |
878 | return r; | 899 | return r; |
879 | } | 900 | } |
901 | EXPORT_SYMBOL_GPL(vhost_dev_ioctl); | ||
880 | 902 | ||
881 | static const struct vhost_memory_region *find_region(struct vhost_memory *mem, | 903 | static const struct vhost_memory_region *find_region(struct vhost_memory *mem, |
882 | __u64 addr, __u32 len) | 904 | __u64 addr, __u32 len) |
@@ -968,6 +990,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | |||
968 | BUG(); | 990 | BUG(); |
969 | return 0; | 991 | return 0; |
970 | } | 992 | } |
993 | EXPORT_SYMBOL_GPL(vhost_log_write); | ||
971 | 994 | ||
972 | static int vhost_update_used_flags(struct vhost_virtqueue *vq) | 995 | static int vhost_update_used_flags(struct vhost_virtqueue *vq) |
973 | { | 996 | { |
@@ -1019,6 +1042,7 @@ int vhost_init_used(struct vhost_virtqueue *vq) | |||
1019 | vq->signalled_used_valid = false; | 1042 | vq->signalled_used_valid = false; |
1020 | return get_user(vq->last_used_idx, &vq->used->idx); | 1043 | return get_user(vq->last_used_idx, &vq->used->idx); |
1021 | } | 1044 | } |
1045 | EXPORT_SYMBOL_GPL(vhost_init_used); | ||
1022 | 1046 | ||
1023 | static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, | 1047 | static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, |
1024 | struct iovec iov[], int iov_size) | 1048 | struct iovec iov[], int iov_size) |
@@ -1295,12 +1319,14 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
1295 | BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); | 1319 | BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); |
1296 | return head; | 1320 | return head; |
1297 | } | 1321 | } |
1322 | EXPORT_SYMBOL_GPL(vhost_get_vq_desc); | ||
1298 | 1323 | ||
1299 | /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ | 1324 | /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ |
1300 | void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) | 1325 | void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) |
1301 | { | 1326 | { |
1302 | vq->last_avail_idx -= n; | 1327 | vq->last_avail_idx -= n; |
1303 | } | 1328 | } |
1329 | EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); | ||
1304 | 1330 | ||
1305 | /* After we've used one of their buffers, we tell them about it. We'll then | 1331 | /* After we've used one of their buffers, we tell them about it. We'll then |
1306 | * want to notify the guest, using eventfd. */ | 1332 | * want to notify the guest, using eventfd. */ |
@@ -1349,6 +1375,7 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) | |||
1349 | vq->signalled_used_valid = false; | 1375 | vq->signalled_used_valid = false; |
1350 | return 0; | 1376 | return 0; |
1351 | } | 1377 | } |
1378 | EXPORT_SYMBOL_GPL(vhost_add_used); | ||
1352 | 1379 | ||
1353 | static int __vhost_add_used_n(struct vhost_virtqueue *vq, | 1380 | static int __vhost_add_used_n(struct vhost_virtqueue *vq, |
1354 | struct vring_used_elem *heads, | 1381 | struct vring_used_elem *heads, |
@@ -1418,6 +1445,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, | |||
1418 | } | 1445 | } |
1419 | return r; | 1446 | return r; |
1420 | } | 1447 | } |
1448 | EXPORT_SYMBOL_GPL(vhost_add_used_n); | ||
1421 | 1449 | ||
1422 | static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | 1450 | static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
1423 | { | 1451 | { |
@@ -1462,6 +1490,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
1462 | if (vq->call_ctx && vhost_notify(dev, vq)) | 1490 | if (vq->call_ctx && vhost_notify(dev, vq)) |
1463 | eventfd_signal(vq->call_ctx, 1); | 1491 | eventfd_signal(vq->call_ctx, 1); |
1464 | } | 1492 | } |
1493 | EXPORT_SYMBOL_GPL(vhost_signal); | ||
1465 | 1494 | ||
1466 | /* And here's the combo meal deal. Supersize me! */ | 1495 | /* And here's the combo meal deal. Supersize me! */ |
1467 | void vhost_add_used_and_signal(struct vhost_dev *dev, | 1496 | void vhost_add_used_and_signal(struct vhost_dev *dev, |
@@ -1471,6 +1500,7 @@ void vhost_add_used_and_signal(struct vhost_dev *dev, | |||
1471 | vhost_add_used(vq, head, len); | 1500 | vhost_add_used(vq, head, len); |
1472 | vhost_signal(dev, vq); | 1501 | vhost_signal(dev, vq); |
1473 | } | 1502 | } |
1503 | EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); | ||
1474 | 1504 | ||
1475 | /* multi-buffer version of vhost_add_used_and_signal */ | 1505 | /* multi-buffer version of vhost_add_used_and_signal */ |
1476 | void vhost_add_used_and_signal_n(struct vhost_dev *dev, | 1506 | void vhost_add_used_and_signal_n(struct vhost_dev *dev, |
@@ -1480,6 +1510,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev, | |||
1480 | vhost_add_used_n(vq, heads, count); | 1510 | vhost_add_used_n(vq, heads, count); |
1481 | vhost_signal(dev, vq); | 1511 | vhost_signal(dev, vq); |
1482 | } | 1512 | } |
1513 | EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); | ||
1483 | 1514 | ||
1484 | /* OK, now we need to know about added descriptors. */ | 1515 | /* OK, now we need to know about added descriptors. */ |
1485 | bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | 1516 | bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
@@ -1517,6 +1548,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
1517 | 1548 | ||
1518 | return avail_idx != vq->avail_idx; | 1549 | return avail_idx != vq->avail_idx; |
1519 | } | 1550 | } |
1551 | EXPORT_SYMBOL_GPL(vhost_enable_notify); | ||
1520 | 1552 | ||
1521 | /* We don't need to be notified again. */ | 1553 | /* We don't need to be notified again. */ |
1522 | void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | 1554 | void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
@@ -1533,3 +1565,21 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
1533 | &vq->used->flags, r); | 1565 | &vq->used->flags, r); |
1534 | } | 1566 | } |
1535 | } | 1567 | } |
1568 | EXPORT_SYMBOL_GPL(vhost_disable_notify); | ||
1569 | |||
1570 | static int __init vhost_init(void) | ||
1571 | { | ||
1572 | return 0; | ||
1573 | } | ||
1574 | |||
1575 | static void __exit vhost_exit(void) | ||
1576 | { | ||
1577 | } | ||
1578 | |||
1579 | module_init(vhost_init); | ||
1580 | module_exit(vhost_exit); | ||
1581 | |||
1582 | MODULE_VERSION("0.0.1"); | ||
1583 | MODULE_LICENSE("GPL v2"); | ||
1584 | MODULE_AUTHOR("Michael S. Tsirkin"); | ||
1585 | MODULE_DESCRIPTION("Host kernel accelerator for virtio"); | ||
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 64adcf99ff33..42298cd23c73 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -46,6 +46,8 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file); | |||
46 | void vhost_poll_stop(struct vhost_poll *poll); | 46 | void vhost_poll_stop(struct vhost_poll *poll); |
47 | void vhost_poll_flush(struct vhost_poll *poll); | 47 | void vhost_poll_flush(struct vhost_poll *poll); |
48 | void vhost_poll_queue(struct vhost_poll *poll); | 48 | void vhost_poll_queue(struct vhost_poll *poll); |
49 | void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work); | ||
50 | long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp); | ||
49 | 51 | ||
50 | struct vhost_log { | 52 | struct vhost_log { |
51 | u64 addr; | 53 | u64 addr; |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0098810df69d..1f572c00a1be 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -192,7 +192,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) | |||
192 | * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); | 192 | * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); |
193 | * is true, we *have* to do it in this order | 193 | * is true, we *have* to do it in this order |
194 | */ | 194 | */ |
195 | tell_host(vb, vb->deflate_vq); | 195 | if (vb->num_pfns != 0) |
196 | tell_host(vb, vb->deflate_vq); | ||
196 | mutex_unlock(&vb->balloon_lock); | 197 | mutex_unlock(&vb->balloon_lock); |
197 | release_pages_by_pfn(vb->pfns, vb->num_pfns); | 198 | release_pages_by_pfn(vb->pfns, vb->num_pfns); |
198 | } | 199 | } |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index a7ce73029f59..1aba255b5879 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -289,9 +289,9 @@ static void vp_free_vectors(struct virtio_device *vdev) | |||
289 | 289 | ||
290 | pci_disable_msix(vp_dev->pci_dev); | 290 | pci_disable_msix(vp_dev->pci_dev); |
291 | vp_dev->msix_enabled = 0; | 291 | vp_dev->msix_enabled = 0; |
292 | vp_dev->msix_vectors = 0; | ||
293 | } | 292 | } |
294 | 293 | ||
294 | vp_dev->msix_vectors = 0; | ||
295 | vp_dev->msix_used_vectors = 0; | 295 | vp_dev->msix_used_vectors = 0; |
296 | kfree(vp_dev->msix_names); | 296 | kfree(vp_dev->msix_names); |
297 | vp_dev->msix_names = NULL; | 297 | vp_dev->msix_names = NULL; |
@@ -309,6 +309,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, | |||
309 | unsigned i, v; | 309 | unsigned i, v; |
310 | int err = -ENOMEM; | 310 | int err = -ENOMEM; |
311 | 311 | ||
312 | vp_dev->msix_vectors = nvectors; | ||
313 | |||
312 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | 314 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, |
313 | GFP_KERNEL); | 315 | GFP_KERNEL); |
314 | if (!vp_dev->msix_entries) | 316 | if (!vp_dev->msix_entries) |
@@ -336,7 +338,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, | |||
336 | err = -ENOSPC; | 338 | err = -ENOSPC; |
337 | if (err) | 339 | if (err) |
338 | goto error; | 340 | goto error; |
339 | vp_dev->msix_vectors = nvectors; | ||
340 | vp_dev->msix_enabled = 1; | 341 | vp_dev->msix_enabled = 1; |
341 | 342 | ||
342 | /* Set the vector used for configuration */ | 343 | /* Set the vector used for configuration */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 137b4198fc03..27d9da3f86ff 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -439,7 +439,7 @@ extern struct kernel_param_ops param_ops_string; | |||
439 | extern int param_set_copystring(const char *val, const struct kernel_param *); | 439 | extern int param_set_copystring(const char *val, const struct kernel_param *); |
440 | extern int param_get_string(char *buffer, const struct kernel_param *kp); | 440 | extern int param_get_string(char *buffer, const struct kernel_param *kp); |
441 | 441 | ||
442 | /* for exporting parameters in /sys/parameters */ | 442 | /* for exporting parameters in /sys/module/.../parameters */ |
443 | 443 | ||
444 | struct module; | 444 | struct module; |
445 | 445 | ||
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index ca3ad41c2c82..b300787af8e0 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _LINUX_VIRTIO_RING_H | 1 | #ifndef _LINUX_VIRTIO_RING_H |
2 | #define _LINUX_VIRTIO_RING_H | 2 | #define _LINUX_VIRTIO_RING_H |
3 | 3 | ||
4 | #include <asm/barrier.h> | ||
4 | #include <linux/irqreturn.h> | 5 | #include <linux/irqreturn.h> |
5 | #include <uapi/linux/virtio_ring.h> | 6 | #include <uapi/linux/virtio_ring.h> |
6 | 7 | ||
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 87ee4f4cff25..916e444e6f74 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h | |||
@@ -362,10 +362,14 @@ struct vfio_iommu_type1_dma_map { | |||
362 | #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13) | 362 | #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13) |
363 | 363 | ||
364 | /** | 364 | /** |
365 | * VFIO_IOMMU_UNMAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 14, struct vfio_dma_unmap) | 365 | * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14, |
366 | * struct vfio_dma_unmap) | ||
366 | * | 367 | * |
367 | * Unmap IO virtual addresses using the provided struct vfio_dma_unmap. | 368 | * Unmap IO virtual addresses using the provided struct vfio_dma_unmap. |
368 | * Caller sets argsz. | 369 | * Caller sets argsz. The actual unmapped size is returned in the size |
370 | * field. No guarantee is made to the user that arbitrary unmaps of iova | ||
371 | * or size different from those used in the original mapping call will | ||
372 | * succeed. | ||
369 | */ | 373 | */ |
370 | struct vfio_iommu_type1_dma_unmap { | 374 | struct vfio_iommu_type1_dma_unmap { |
371 | __u32 argsz; | 375 | __u32 argsz; |
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index b7cda390fd00..3ce768c6910d 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h | |||
@@ -51,4 +51,7 @@ | |||
51 | * suppressed them? */ | 51 | * suppressed them? */ |
52 | #define VIRTIO_F_NOTIFY_ON_EMPTY 24 | 52 | #define VIRTIO_F_NOTIFY_ON_EMPTY 24 |
53 | 53 | ||
54 | /* Can the device handle any descriptor layout? */ | ||
55 | #define VIRTIO_F_ANY_LAYOUT 27 | ||
56 | |||
54 | #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ | 57 | #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1db3af933704..1833bc5a84a7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -182,7 +182,7 @@ void update_perf_cpu_limits(void) | |||
182 | u64 tmp = perf_sample_period_ns; | 182 | u64 tmp = perf_sample_period_ns; |
183 | 183 | ||
184 | tmp *= sysctl_perf_cpu_time_max_percent; | 184 | tmp *= sysctl_perf_cpu_time_max_percent; |
185 | tmp = do_div(tmp, 100); | 185 | do_div(tmp, 100); |
186 | atomic_set(&perf_sample_allowed_ns, tmp); | 186 | atomic_set(&perf_sample_allowed_ns, tmp); |
187 | } | 187 | } |
188 | 188 | ||
@@ -232,7 +232,7 @@ DEFINE_PER_CPU(u64, running_sample_length); | |||
232 | void perf_sample_event_took(u64 sample_len_ns) | 232 | void perf_sample_event_took(u64 sample_len_ns) |
233 | { | 233 | { |
234 | u64 avg_local_sample_len; | 234 | u64 avg_local_sample_len; |
235 | u64 local_samples_len = __get_cpu_var(running_sample_length); | 235 | u64 local_samples_len; |
236 | 236 | ||
237 | if (atomic_read(&perf_sample_allowed_ns) == 0) | 237 | if (atomic_read(&perf_sample_allowed_ns) == 0) |
238 | return; | 238 | return; |
diff --git a/kernel/module.c b/kernel/module.c index cab4bce49c23..206915830d29 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -455,7 +455,7 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
455 | EXPORT_SYMBOL_GPL(find_symbol); | 455 | EXPORT_SYMBOL_GPL(find_symbol); |
456 | 456 | ||
457 | /* Search for module by name: must hold module_mutex. */ | 457 | /* Search for module by name: must hold module_mutex. */ |
458 | static struct module *find_module_all(const char *name, | 458 | static struct module *find_module_all(const char *name, size_t len, |
459 | bool even_unformed) | 459 | bool even_unformed) |
460 | { | 460 | { |
461 | struct module *mod; | 461 | struct module *mod; |
@@ -463,7 +463,7 @@ static struct module *find_module_all(const char *name, | |||
463 | list_for_each_entry(mod, &modules, list) { | 463 | list_for_each_entry(mod, &modules, list) { |
464 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) | 464 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) |
465 | continue; | 465 | continue; |
466 | if (strcmp(mod->name, name) == 0) | 466 | if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) |
467 | return mod; | 467 | return mod; |
468 | } | 468 | } |
469 | return NULL; | 469 | return NULL; |
@@ -471,7 +471,7 @@ static struct module *find_module_all(const char *name, | |||
471 | 471 | ||
472 | struct module *find_module(const char *name) | 472 | struct module *find_module(const char *name) |
473 | { | 473 | { |
474 | return find_module_all(name, false); | 474 | return find_module_all(name, strlen(name), false); |
475 | } | 475 | } |
476 | EXPORT_SYMBOL_GPL(find_module); | 476 | EXPORT_SYMBOL_GPL(find_module); |
477 | 477 | ||
@@ -482,23 +482,28 @@ static inline void __percpu *mod_percpu(struct module *mod) | |||
482 | return mod->percpu; | 482 | return mod->percpu; |
483 | } | 483 | } |
484 | 484 | ||
485 | static int percpu_modalloc(struct module *mod, | 485 | static int percpu_modalloc(struct module *mod, struct load_info *info) |
486 | unsigned long size, unsigned long align) | ||
487 | { | 486 | { |
487 | Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; | ||
488 | unsigned long align = pcpusec->sh_addralign; | ||
489 | |||
490 | if (!pcpusec->sh_size) | ||
491 | return 0; | ||
492 | |||
488 | if (align > PAGE_SIZE) { | 493 | if (align > PAGE_SIZE) { |
489 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | 494 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", |
490 | mod->name, align, PAGE_SIZE); | 495 | mod->name, align, PAGE_SIZE); |
491 | align = PAGE_SIZE; | 496 | align = PAGE_SIZE; |
492 | } | 497 | } |
493 | 498 | ||
494 | mod->percpu = __alloc_reserved_percpu(size, align); | 499 | mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); |
495 | if (!mod->percpu) { | 500 | if (!mod->percpu) { |
496 | printk(KERN_WARNING | 501 | printk(KERN_WARNING |
497 | "%s: Could not allocate %lu bytes percpu data\n", | 502 | "%s: Could not allocate %lu bytes percpu data\n", |
498 | mod->name, size); | 503 | mod->name, (unsigned long)pcpusec->sh_size); |
499 | return -ENOMEM; | 504 | return -ENOMEM; |
500 | } | 505 | } |
501 | mod->percpu_size = size; | 506 | mod->percpu_size = pcpusec->sh_size; |
502 | return 0; | 507 | return 0; |
503 | } | 508 | } |
504 | 509 | ||
@@ -563,10 +568,12 @@ static inline void __percpu *mod_percpu(struct module *mod) | |||
563 | { | 568 | { |
564 | return NULL; | 569 | return NULL; |
565 | } | 570 | } |
566 | static inline int percpu_modalloc(struct module *mod, | 571 | static int percpu_modalloc(struct module *mod, struct load_info *info) |
567 | unsigned long size, unsigned long align) | ||
568 | { | 572 | { |
569 | return -ENOMEM; | 573 | /* UP modules shouldn't have this section: ENOMEM isn't quite right */ |
574 | if (info->sechdrs[info->index.pcpu].sh_size != 0) | ||
575 | return -ENOMEM; | ||
576 | return 0; | ||
570 | } | 577 | } |
571 | static inline void percpu_modfree(struct module *mod) | 578 | static inline void percpu_modfree(struct module *mod) |
572 | { | 579 | { |
@@ -2927,7 +2934,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) | |||
2927 | { | 2934 | { |
2928 | /* Module within temporary copy. */ | 2935 | /* Module within temporary copy. */ |
2929 | struct module *mod; | 2936 | struct module *mod; |
2930 | Elf_Shdr *pcpusec; | ||
2931 | int err; | 2937 | int err; |
2932 | 2938 | ||
2933 | mod = setup_load_info(info, flags); | 2939 | mod = setup_load_info(info, flags); |
@@ -2942,17 +2948,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) | |||
2942 | err = module_frob_arch_sections(info->hdr, info->sechdrs, | 2948 | err = module_frob_arch_sections(info->hdr, info->sechdrs, |
2943 | info->secstrings, mod); | 2949 | info->secstrings, mod); |
2944 | if (err < 0) | 2950 | if (err < 0) |
2945 | goto out; | 2951 | return ERR_PTR(err); |
2946 | 2952 | ||
2947 | pcpusec = &info->sechdrs[info->index.pcpu]; | 2953 | /* We will do a special allocation for per-cpu sections later. */ |
2948 | if (pcpusec->sh_size) { | 2954 | info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2949 | /* We have a special allocation for this section. */ | ||
2950 | err = percpu_modalloc(mod, | ||
2951 | pcpusec->sh_size, pcpusec->sh_addralign); | ||
2952 | if (err) | ||
2953 | goto out; | ||
2954 | pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC; | ||
2955 | } | ||
2956 | 2955 | ||
2957 | /* Determine total sizes, and put offsets in sh_entsize. For now | 2956 | /* Determine total sizes, and put offsets in sh_entsize. For now |
2958 | this is done generically; there doesn't appear to be any | 2957 | this is done generically; there doesn't appear to be any |
@@ -2963,17 +2962,12 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) | |||
2963 | /* Allocate and move to the final place */ | 2962 | /* Allocate and move to the final place */ |
2964 | err = move_module(mod, info); | 2963 | err = move_module(mod, info); |
2965 | if (err) | 2964 | if (err) |
2966 | goto free_percpu; | 2965 | return ERR_PTR(err); |
2967 | 2966 | ||
2968 | /* Module has been copied to its final place now: return it. */ | 2967 | /* Module has been copied to its final place now: return it. */ |
2969 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; | 2968 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; |
2970 | kmemleak_load_module(mod, info); | 2969 | kmemleak_load_module(mod, info); |
2971 | return mod; | 2970 | return mod; |
2972 | |||
2973 | free_percpu: | ||
2974 | percpu_modfree(mod); | ||
2975 | out: | ||
2976 | return ERR_PTR(err); | ||
2977 | } | 2971 | } |
2978 | 2972 | ||
2979 | /* mod is no longer valid after this! */ | 2973 | /* mod is no longer valid after this! */ |
@@ -3014,7 +3008,7 @@ static bool finished_loading(const char *name) | |||
3014 | bool ret; | 3008 | bool ret; |
3015 | 3009 | ||
3016 | mutex_lock(&module_mutex); | 3010 | mutex_lock(&module_mutex); |
3017 | mod = find_module_all(name, true); | 3011 | mod = find_module_all(name, strlen(name), true); |
3018 | ret = !mod || mod->state == MODULE_STATE_LIVE | 3012 | ret = !mod || mod->state == MODULE_STATE_LIVE |
3019 | || mod->state == MODULE_STATE_GOING; | 3013 | || mod->state == MODULE_STATE_GOING; |
3020 | mutex_unlock(&module_mutex); | 3014 | mutex_unlock(&module_mutex); |
@@ -3152,7 +3146,8 @@ static int add_unformed_module(struct module *mod) | |||
3152 | 3146 | ||
3153 | again: | 3147 | again: |
3154 | mutex_lock(&module_mutex); | 3148 | mutex_lock(&module_mutex); |
3155 | if ((old = find_module_all(mod->name, true)) != NULL) { | 3149 | old = find_module_all(mod->name, strlen(mod->name), true); |
3150 | if (old != NULL) { | ||
3156 | if (old->state == MODULE_STATE_COMING | 3151 | if (old->state == MODULE_STATE_COMING |
3157 | || old->state == MODULE_STATE_UNFORMED) { | 3152 | || old->state == MODULE_STATE_UNFORMED) { |
3158 | /* Wait in case it fails to load. */ | 3153 | /* Wait in case it fails to load. */ |
@@ -3198,6 +3193,17 @@ out: | |||
3198 | return err; | 3193 | return err; |
3199 | } | 3194 | } |
3200 | 3195 | ||
3196 | static int unknown_module_param_cb(char *param, char *val, const char *modname) | ||
3197 | { | ||
3198 | /* Check for magic 'dyndbg' arg */ | ||
3199 | int ret = ddebug_dyndbg_module_param_cb(param, val, modname); | ||
3200 | if (ret != 0) { | ||
3201 | printk(KERN_WARNING "%s: unknown parameter '%s' ignored\n", | ||
3202 | modname, param); | ||
3203 | } | ||
3204 | return 0; | ||
3205 | } | ||
3206 | |||
3201 | /* Allocate and load the module: note that size of section 0 is always | 3207 | /* Allocate and load the module: note that size of section 0 is always |
3202 | zero, and we rely on this for optional sections. */ | 3208 | zero, and we rely on this for optional sections. */ |
3203 | static int load_module(struct load_info *info, const char __user *uargs, | 3209 | static int load_module(struct load_info *info, const char __user *uargs, |
@@ -3237,6 +3243,11 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3237 | } | 3243 | } |
3238 | #endif | 3244 | #endif |
3239 | 3245 | ||
3246 | /* To avoid stressing percpu allocator, do this once we're unique. */ | ||
3247 | err = percpu_modalloc(mod, info); | ||
3248 | if (err) | ||
3249 | goto unlink_mod; | ||
3250 | |||
3240 | /* Now module is in final location, initialize linked lists, etc. */ | 3251 | /* Now module is in final location, initialize linked lists, etc. */ |
3241 | err = module_unload_init(mod); | 3252 | err = module_unload_init(mod); |
3242 | if (err) | 3253 | if (err) |
@@ -3284,7 +3295,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3284 | 3295 | ||
3285 | /* Module is ready to execute: parsing args may do that. */ | 3296 | /* Module is ready to execute: parsing args may do that. */ |
3286 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, | 3297 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, |
3287 | -32768, 32767, &ddebug_dyndbg_module_param_cb); | 3298 | -32768, 32767, unknown_module_param_cb); |
3288 | if (err < 0) | 3299 | if (err < 0) |
3289 | goto bug_cleanup; | 3300 | goto bug_cleanup; |
3290 | 3301 | ||
@@ -3563,10 +3574,8 @@ unsigned long module_kallsyms_lookup_name(const char *name) | |||
3563 | /* Don't lock: we're in enough trouble already. */ | 3574 | /* Don't lock: we're in enough trouble already. */ |
3564 | preempt_disable(); | 3575 | preempt_disable(); |
3565 | if ((colon = strchr(name, ':')) != NULL) { | 3576 | if ((colon = strchr(name, ':')) != NULL) { |
3566 | *colon = '\0'; | 3577 | if ((mod = find_module_all(name, colon - name, false)) != NULL) |
3567 | if ((mod = find_module(name)) != NULL) | ||
3568 | ret = mod_find_symname(mod, colon+1); | 3578 | ret = mod_find_symname(mod, colon+1); |
3569 | *colon = ':'; | ||
3570 | } else { | 3579 | } else { |
3571 | list_for_each_entry_rcu(mod, &modules, list) { | 3580 | list_for_each_entry_rcu(mod, &modules, list) { |
3572 | if (mod->state == MODULE_STATE_UNFORMED) | 3581 | if (mod->state == MODULE_STATE_UNFORMED) |
diff --git a/kernel/params.c b/kernel/params.c index 53b958fcd639..440e65d1a544 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -787,7 +787,7 @@ static void __init kernel_add_sysfs_param(const char *name, | |||
787 | } | 787 | } |
788 | 788 | ||
789 | /* | 789 | /* |
790 | * param_sysfs_builtin - add contents in /sys/parameters for built-in modules | 790 | * param_sysfs_builtin - add sysfs parameters for built-in modules |
791 | * | 791 | * |
792 | * Add module_parameters to sysfs for "modules" built into the kernel. | 792 | * Add module_parameters to sysfs for "modules" built into the kernel. |
793 | * | 793 | * |
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index 182084d728c8..8ccf83056a7a 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst | |||
@@ -47,18 +47,24 @@ header-y := $(filter-out $(generic-y), $(header-y)) | |||
47 | all-files := $(header-y) $(genhdr-y) $(wrapper-files) | 47 | all-files := $(header-y) $(genhdr-y) $(wrapper-files) |
48 | output-files := $(addprefix $(installdir)/, $(all-files)) | 48 | output-files := $(addprefix $(installdir)/, $(all-files)) |
49 | 49 | ||
50 | input-files := $(foreach hdr, $(header-y), \ | 50 | input-files1 := $(foreach hdr, $(header-y), \ |
51 | $(if $(wildcard $(srcdir)/$(hdr)), \ | 51 | $(if $(wildcard $(srcdir)/$(hdr)), \ |
52 | $(wildcard $(srcdir)/$(hdr)), \ | 52 | $(wildcard $(srcdir)/$(hdr))) \ |
53 | ) | ||
54 | input-files1-name := $(notdir $(input-files1)) | ||
55 | input-files2 := $(foreach hdr, $(header-y), \ | ||
56 | $(if $(wildcard $(srcdir)/$(hdr)),, \ | ||
53 | $(if $(wildcard $(oldsrcdir)/$(hdr)), \ | 57 | $(if $(wildcard $(oldsrcdir)/$(hdr)), \ |
54 | $(wildcard $(oldsrcdir)/$(hdr)), \ | 58 | $(wildcard $(oldsrcdir)/$(hdr)), \ |
55 | $(error Missing UAPI file $(srcdir)/$(hdr))) \ | 59 | $(error Missing UAPI file $(srcdir)/$(hdr))) \ |
56 | )) \ | 60 | )) |
57 | $(foreach hdr, $(genhdr-y), \ | 61 | input-files2-name := $(notdir $(input-files2)) |
62 | input-files3 := $(foreach hdr, $(genhdr-y), \ | ||
58 | $(if $(wildcard $(gendir)/$(hdr)), \ | 63 | $(if $(wildcard $(gendir)/$(hdr)), \ |
59 | $(wildcard $(gendir)/$(hdr)), \ | 64 | $(wildcard $(gendir)/$(hdr)), \ |
60 | $(error Missing generated UAPI file $(gendir)/$(hdr)) \ | 65 | $(error Missing generated UAPI file $(gendir)/$(hdr)) \ |
61 | )) | 66 | )) |
67 | input-files3-name := $(notdir $(input-files3)) | ||
62 | 68 | ||
63 | # Work out what needs to be removed | 69 | # Work out what needs to be removed |
64 | oldheaders := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h)) | 70 | oldheaders := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h)) |
@@ -72,7 +78,9 @@ printdir = $(patsubst $(INSTALL_HDR_PATH)/%/,%,$(dir $@)) | |||
72 | quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\ | 78 | quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\ |
73 | file$(if $(word 2, $(all-files)),s)) | 79 | file$(if $(word 2, $(all-files)),s)) |
74 | cmd_install = \ | 80 | cmd_install = \ |
75 | $(CONFIG_SHELL) $< $(installdir) $(input-files); \ | 81 | $(CONFIG_SHELL) $< $(installdir) $(srcdir) $(input-files1-name); \ |
82 | $(CONFIG_SHELL) $< $(installdir) $(oldsrcdir) $(input-files2-name); \ | ||
83 | $(CONFIG_SHELL) $< $(installdir) $(gendir) $(input-files3-name); \ | ||
76 | for F in $(wrapper-files); do \ | 84 | for F in $(wrapper-files); do \ |
77 | echo "\#include <asm-generic/$$F>" > $(installdir)/$$F; \ | 85 | echo "\#include <asm-generic/$$F>" > $(installdir)/$$F; \ |
78 | done; \ | 86 | done; \ |
@@ -98,7 +106,7 @@ __headersinst: $(subdirs) $(install-file) | |||
98 | @: | 106 | @: |
99 | 107 | ||
100 | targets += $(install-file) | 108 | targets += $(install-file) |
101 | $(install-file): scripts/headers_install.sh $(input-files) FORCE | 109 | $(install-file): scripts/headers_install.sh $(input-files1) $(input-files2) $(input-files3) FORCE |
102 | $(if $(unwanted),$(call cmd,remove),) | 110 | $(if $(unwanted),$(call cmd,remove),) |
103 | $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@))) | 111 | $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@))) |
104 | $(call if_changed,install) | 112 | $(call if_changed,install) |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 6031e2380638..49392ecbef17 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -63,7 +63,7 @@ multi-objs := $(multi-objs-y) $(multi-objs-m) | |||
63 | subdir-obj-y := $(filter %/built-in.o, $(obj-y)) | 63 | subdir-obj-y := $(filter %/built-in.o, $(obj-y)) |
64 | 64 | ||
65 | # $(obj-dirs) is a list of directories that contain object files | 65 | # $(obj-dirs) is a list of directories that contain object files |
66 | obj-dirs := $(dir $(multi-objs) $(subdir-obj-y)) | 66 | obj-dirs := $(dir $(multi-objs) $(obj-y)) |
67 | 67 | ||
68 | # Replace multi-part objects by their individual parts, look at local dir only | 68 | # Replace multi-part objects by their individual parts, look at local dir only |
69 | real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y) | 69 | real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y) |
@@ -244,7 +244,7 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \ | |||
244 | # --------------------------------------------------------------------------- | 244 | # --------------------------------------------------------------------------- |
245 | 245 | ||
246 | # Generate an assembly file to wrap the output of the device tree compiler | 246 | # Generate an assembly file to wrap the output of the device tree compiler |
247 | quiet_cmd_dt_S_dtb= DTB $@ | 247 | quiet_cmd_dt_S_dtb= DTB $@ |
248 | cmd_dt_S_dtb= \ | 248 | cmd_dt_S_dtb= \ |
249 | ( \ | 249 | ( \ |
250 | echo '\#include <asm-generic/vmlinux.lds.h>'; \ | 250 | echo '\#include <asm-generic/vmlinux.lds.h>'; \ |
diff --git a/scripts/coccicheck b/scripts/coccicheck index 06fcb3333247..bbf901afb606 100755 --- a/scripts/coccicheck +++ b/scripts/coccicheck | |||
@@ -1,17 +1,31 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | 2 | ||
3 | # | ||
4 | # This script requires at least spatch | ||
5 | # version 1.0.0-rc11. | ||
6 | # | ||
7 | |||
3 | SPATCH="`which ${SPATCH:=spatch}`" | 8 | SPATCH="`which ${SPATCH:=spatch}`" |
4 | 9 | ||
10 | trap kill_running SIGTERM SIGINT | ||
11 | declare -a SPATCH_PID | ||
12 | |||
5 | # The verbosity may be set by the environmental parameter V= | 13 | # The verbosity may be set by the environmental parameter V= |
6 | # as for example with 'make V=1 coccicheck' | 14 | # as for example with 'make V=1 coccicheck' |
7 | 15 | ||
8 | if [ -n "$V" -a "$V" != "0" ]; then | 16 | if [ -n "$V" -a "$V" != "0" ]; then |
9 | VERBOSE=1 | 17 | VERBOSE="$V" |
10 | else | 18 | else |
11 | VERBOSE=0 | 19 | VERBOSE=0 |
12 | fi | 20 | fi |
13 | 21 | ||
14 | FLAGS="$SPFLAGS -very_quiet" | 22 | if [ -z "$J" ]; then |
23 | NPROC=$(getconf _NPROCESSORS_ONLN) | ||
24 | else | ||
25 | NPROC="$J" | ||
26 | fi | ||
27 | |||
28 | FLAGS="$SPFLAGS --very-quiet" | ||
15 | 29 | ||
16 | # spatch only allows include directories with the syntax "-I include" | 30 | # spatch only allows include directories with the syntax "-I include" |
17 | # while gcc also allows "-Iinclude" and "-include include" | 31 | # while gcc also allows "-Iinclude" and "-include include" |
@@ -27,14 +41,14 @@ if [ "$C" = "1" -o "$C" = "2" ]; then | |||
27 | else | 41 | else |
28 | ONLINE=0 | 42 | ONLINE=0 |
29 | if [ "$KBUILD_EXTMOD" = "" ] ; then | 43 | if [ "$KBUILD_EXTMOD" = "" ] ; then |
30 | OPTIONS="-dir $srctree $COCCIINCLUDE" | 44 | OPTIONS="--dir $srctree $COCCIINCLUDE" |
31 | else | 45 | else |
32 | OPTIONS="-dir $KBUILD_EXTMOD $COCCIINCLUDE" | 46 | OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE" |
33 | fi | 47 | fi |
34 | fi | 48 | fi |
35 | 49 | ||
36 | if [ "$KBUILD_EXTMOD" != "" ] ; then | 50 | if [ "$KBUILD_EXTMOD" != "" ] ; then |
37 | OPTIONS="-patch $srctree $OPTIONS" | 51 | OPTIONS="--patch $srctree $OPTIONS" |
38 | fi | 52 | fi |
39 | 53 | ||
40 | if [ ! -x "$SPATCH" ]; then | 54 | if [ ! -x "$SPATCH" ]; then |
@@ -44,13 +58,21 @@ fi | |||
44 | 58 | ||
45 | if [ "$MODE" = "" ] ; then | 59 | if [ "$MODE" = "" ] ; then |
46 | if [ "$ONLINE" = "0" ] ; then | 60 | if [ "$ONLINE" = "0" ] ; then |
47 | echo 'You have not explicitly specified the mode to use. Using default "chain" mode.' | 61 | echo 'You have not explicitly specified the mode to use. Using default "report" mode.' |
48 | echo 'All available modes will be tried (in that order): patch, report, context, org' | 62 | echo 'Available modes are the following: patch, report, context, org' |
49 | echo 'You can specify the mode with "make coccicheck MODE=<mode>"' | 63 | echo 'You can specify the mode with "make coccicheck MODE=<mode>"' |
64 | echo 'Note however that some modes are not implemented by some semantic patches.' | ||
65 | fi | ||
66 | MODE="report" | ||
67 | fi | ||
68 | |||
69 | if [ "$MODE" = "chain" ] ; then | ||
70 | if [ "$ONLINE" = "0" ] ; then | ||
71 | echo 'You have selected the "chain" mode.' | ||
72 | echo 'All available modes will be tried (in that order): patch, report, context, org' | ||
50 | fi | 73 | fi |
51 | MODE="chain" | ||
52 | elif [ "$MODE" = "report" -o "$MODE" = "org" ] ; then | 74 | elif [ "$MODE" = "report" -o "$MODE" = "org" ] ; then |
53 | FLAGS="$FLAGS -no_show_diff" | 75 | FLAGS="$FLAGS --no-show-diff" |
54 | fi | 76 | fi |
55 | 77 | ||
56 | if [ "$ONLINE" = "0" ] ; then | 78 | if [ "$ONLINE" = "0" ] ; then |
@@ -61,19 +83,35 @@ if [ "$ONLINE" = "0" ] ; then | |||
61 | fi | 83 | fi |
62 | 84 | ||
63 | run_cmd() { | 85 | run_cmd() { |
86 | local i | ||
64 | if [ $VERBOSE -ne 0 ] ; then | 87 | if [ $VERBOSE -ne 0 ] ; then |
65 | echo "Running: $@" | 88 | echo "Running ($NPROC in parallel): $@" |
66 | fi | 89 | fi |
67 | eval $@ | 90 | for i in $(seq 0 $(( NPROC - 1)) ); do |
91 | eval "$@ --max $NPROC --index $i &" | ||
92 | SPATCH_PID[$i]=$! | ||
93 | if [ $VERBOSE -eq 2 ] ; then | ||
94 | echo "${SPATCH_PID[$i]} running" | ||
95 | fi | ||
96 | done | ||
97 | wait | ||
68 | } | 98 | } |
69 | 99 | ||
100 | kill_running() { | ||
101 | for i in $(seq $(( NPROC - 1 )) ); do | ||
102 | if [ $VERBOSE -eq 2 ] ; then | ||
103 | echo "Killing ${SPATCH_PID[$i]}" | ||
104 | fi | ||
105 | kill ${SPATCH_PID[$i]} 2>/dev/null | ||
106 | done | ||
107 | } | ||
70 | 108 | ||
71 | coccinelle () { | 109 | coccinelle () { |
72 | COCCI="$1" | 110 | COCCI="$1" |
73 | 111 | ||
74 | OPT=`grep "Option" $COCCI | cut -d':' -f2` | 112 | OPT=`grep "Option" $COCCI | cut -d':' -f2` |
75 | 113 | ||
76 | # The option '-parse_cocci' can be used to syntactically check the SmPL files. | 114 | # The option '--parse-cocci' can be used to syntactically check the SmPL files. |
77 | # | 115 | # |
78 | # $SPATCH -D $MODE $FLAGS -parse_cocci $COCCI $OPT > /dev/null | 116 | # $SPATCH -D $MODE $FLAGS -parse_cocci $COCCI $OPT > /dev/null |
79 | 117 | ||
@@ -114,20 +152,20 @@ coccinelle () { | |||
114 | 152 | ||
115 | if [ "$MODE" = "chain" ] ; then | 153 | if [ "$MODE" = "chain" ] ; then |
116 | run_cmd $SPATCH -D patch \ | 154 | run_cmd $SPATCH -D patch \ |
117 | $FLAGS -sp_file $COCCI $OPT $OPTIONS || \ | 155 | $FLAGS --cocci-file $COCCI $OPT $OPTIONS || \ |
118 | run_cmd $SPATCH -D report \ | 156 | run_cmd $SPATCH -D report \ |
119 | $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \ | 157 | $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || \ |
120 | run_cmd $SPATCH -D context \ | 158 | run_cmd $SPATCH -D context \ |
121 | $FLAGS -sp_file $COCCI $OPT $OPTIONS || \ | 159 | $FLAGS --cocci-file $COCCI $OPT $OPTIONS || \ |
122 | run_cmd $SPATCH -D org \ | 160 | run_cmd $SPATCH -D org \ |
123 | $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1 | 161 | $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || exit 1 |
124 | elif [ "$MODE" = "rep+ctxt" ] ; then | 162 | elif [ "$MODE" = "rep+ctxt" ] ; then |
125 | run_cmd $SPATCH -D report \ | 163 | run_cmd $SPATCH -D report \ |
126 | $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \ | 164 | $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff && \ |
127 | run_cmd $SPATCH -D context \ | 165 | run_cmd $SPATCH -D context \ |
128 | $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1 | 166 | $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1 |
129 | else | 167 | else |
130 | run_cmd $SPATCH -D $MODE $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1 | 168 | run_cmd $SPATCH -D $MODE $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1 |
131 | fi | 169 | fi |
132 | 170 | ||
133 | } | 171 | } |
diff --git a/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci b/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci index 7d4771d449c3..bd5d08b882ee 100644 --- a/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci +++ b/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci | |||
@@ -5,7 +5,7 @@ | |||
5 | // Confidence: High | 5 | // Confidence: High |
6 | // Copyright: 2009,2010 Nicolas Palix, DIKU. GPLv2. | 6 | // Copyright: 2009,2010 Nicolas Palix, DIKU. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Options: -no_includes -include_headers | 8 | // Options: --no-includes --include-headers |
9 | // | 9 | // |
10 | // Keywords: kmalloc, kzalloc, kcalloc | 10 | // Keywords: kmalloc, kzalloc, kcalloc |
11 | // Version min: < 2.6.12 kmalloc | 11 | // Version min: < 2.6.12 kmalloc |
diff --git a/scripts/coccinelle/api/alloc/kzalloc-simple.cocci b/scripts/coccinelle/api/alloc/kzalloc-simple.cocci index 046b9b16f8f9..52c55e4fa67d 100644 --- a/scripts/coccinelle/api/alloc/kzalloc-simple.cocci +++ b/scripts/coccinelle/api/alloc/kzalloc-simple.cocci | |||
@@ -9,7 +9,7 @@ | |||
9 | // Copyright: (C) 2009-2010 Julia Lawall, Nicolas Palix, DIKU. GPLv2. | 9 | // Copyright: (C) 2009-2010 Julia Lawall, Nicolas Palix, DIKU. GPLv2. |
10 | // Copyright: (C) 2009-2010 Gilles Muller, INRIA/LiP6. GPLv2. | 10 | // Copyright: (C) 2009-2010 Gilles Muller, INRIA/LiP6. GPLv2. |
11 | // URL: http://coccinelle.lip6.fr/rules/kzalloc.html | 11 | // URL: http://coccinelle.lip6.fr/rules/kzalloc.html |
12 | // Options: -no_includes -include_headers | 12 | // Options: --no-includes --include-headers |
13 | // | 13 | // |
14 | // Keywords: kmalloc, kzalloc | 14 | // Keywords: kmalloc, kzalloc |
15 | // Version min: < 2.6.12 kmalloc | 15 | // Version min: < 2.6.12 kmalloc |
diff --git a/scripts/coccinelle/api/d_find_alias.cocci b/scripts/coccinelle/api/d_find_alias.cocci index a9694a8d3e5a..9594c9f7eb8d 100644 --- a/scripts/coccinelle/api/d_find_alias.cocci +++ b/scripts/coccinelle/api/d_find_alias.cocci | |||
@@ -4,7 +4,7 @@ | |||
4 | // | 4 | // |
5 | // Confidence: Moderate | 5 | // Confidence: Moderate |
6 | // URL: http://coccinelle.lip6.fr/ | 6 | // URL: http://coccinelle.lip6.fr/ |
7 | // Options: -include_headers | 7 | // Options: --include-headers |
8 | 8 | ||
9 | virtual context | 9 | virtual context |
10 | virtual org | 10 | virtual org |
diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci index 46beb81406ab..562ec88b6352 100644 --- a/scripts/coccinelle/api/devm_request_and_ioremap.cocci +++ b/scripts/coccinelle/api/devm_request_and_ioremap.cocci | |||
@@ -10,7 +10,7 @@ | |||
10 | // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2. | 10 | // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2. |
11 | // URL: http://coccinelle.lip6.fr/ | 11 | // URL: http://coccinelle.lip6.fr/ |
12 | // Comments: | 12 | // Comments: |
13 | // Options: -no_includes -include_headers | 13 | // Options: --no-includes --include-headers |
14 | 14 | ||
15 | virtual patch | 15 | virtual patch |
16 | virtual org | 16 | virtual org |
diff --git a/scripts/coccinelle/api/kstrdup.cocci b/scripts/coccinelle/api/kstrdup.cocci index 07a74b2c6196..09cba54ed0cf 100644 --- a/scripts/coccinelle/api/kstrdup.cocci +++ b/scripts/coccinelle/api/kstrdup.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual patch | 11 | virtual patch |
12 | virtual context | 12 | virtual context |
diff --git a/scripts/coccinelle/api/memdup.cocci b/scripts/coccinelle/api/memdup.cocci index 4dceab6d54de..3d1aa71b7579 100644 --- a/scripts/coccinelle/api/memdup.cocci +++ b/scripts/coccinelle/api/memdup.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual patch | 11 | virtual patch |
12 | virtual context | 12 | virtual context |
diff --git a/scripts/coccinelle/api/memdup_user.cocci b/scripts/coccinelle/api/memdup_user.cocci index 2b131a8a1306..c606231b0e46 100644 --- a/scripts/coccinelle/api/memdup_user.cocci +++ b/scripts/coccinelle/api/memdup_user.cocci | |||
@@ -7,7 +7,7 @@ | |||
7 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 7 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
8 | // URL: http://coccinelle.lip6.fr/ | 8 | // URL: http://coccinelle.lip6.fr/ |
9 | // Comments: | 9 | // Comments: |
10 | // Options: -no_includes -include_headers | 10 | // Options: --no-includes --include-headers |
11 | 11 | ||
12 | virtual patch | 12 | virtual patch |
13 | virtual context | 13 | virtual context |
diff --git a/scripts/coccinelle/api/ptr_ret.cocci b/scripts/coccinelle/api/ptr_ret.cocci index 15f076fdecbe..2274638d005b 100644 --- a/scripts/coccinelle/api/ptr_ret.cocci +++ b/scripts/coccinelle/api/ptr_ret.cocci | |||
@@ -5,7 +5,7 @@ | |||
5 | // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2. | 5 | // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2. |
6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Options: -no_includes -include_headers | 8 | // Options: --no-includes --include-headers |
9 | // | 9 | // |
10 | // Keywords: ERR_PTR, PTR_ERR, PTR_RET | 10 | // Keywords: ERR_PTR, PTR_ERR, PTR_RET |
11 | // Version min: 2.6.39 | 11 | // Version min: 2.6.39 |
diff --git a/scripts/coccinelle/api/simple_open.cocci b/scripts/coccinelle/api/simple_open.cocci index 05962f7be155..b67e174f3d95 100644 --- a/scripts/coccinelle/api/simple_open.cocci +++ b/scripts/coccinelle/api/simple_open.cocci | |||
@@ -4,7 +4,7 @@ | |||
4 | /// | 4 | /// |
5 | // Confidence: High | 5 | // Confidence: High |
6 | // Comments: | 6 | // Comments: |
7 | // Options: -no_includes -include_headers | 7 | // Options: --no-includes --include-headers |
8 | 8 | ||
9 | virtual patch | 9 | virtual patch |
10 | virtual report | 10 | virtual report |
diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci index 0a1e36146d76..3d9349012bb3 100644 --- a/scripts/coccinelle/free/devm_free.cocci +++ b/scripts/coccinelle/free/devm_free.cocci | |||
@@ -18,7 +18,7 @@ | |||
18 | // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2. | 18 | // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2. |
19 | // URL: http://coccinelle.lip6.fr/ | 19 | // URL: http://coccinelle.lip6.fr/ |
20 | // Comments: | 20 | // Comments: |
21 | // Options: -no_includes -include_headers | 21 | // Options: --no-includes --include-headers |
22 | 22 | ||
23 | virtual org | 23 | virtual org |
24 | virtual report | 24 | virtual report |
diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci index d9ae6d89c2f5..577b78056990 100644 --- a/scripts/coccinelle/free/kfree.cocci +++ b/scripts/coccinelle/free/kfree.cocci | |||
@@ -10,7 +10,7 @@ | |||
10 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 10 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
11 | // URL: http://coccinelle.lip6.fr/ | 11 | // URL: http://coccinelle.lip6.fr/ |
12 | // Comments: | 12 | // Comments: |
13 | // Options: -no_includes -include_headers | 13 | // Options: --no-includes --include-headers |
14 | 14 | ||
15 | virtual org | 15 | virtual org |
16 | virtual report | 16 | virtual report |
diff --git a/scripts/coccinelle/free/kfreeaddr.cocci b/scripts/coccinelle/free/kfreeaddr.cocci new file mode 100644 index 000000000000..ce8aacc314cb --- /dev/null +++ b/scripts/coccinelle/free/kfreeaddr.cocci | |||
@@ -0,0 +1,32 @@ | |||
1 | /// Free of a structure field | ||
2 | /// | ||
3 | // Confidence: High | ||
4 | // Copyright: (C) 2013 Julia Lawall, INRIA/LIP6. GPLv2. | ||
5 | // URL: http://coccinelle.lip6.fr/ | ||
6 | // Comments: | ||
7 | // Options: --no-includes --include-headers | ||
8 | |||
9 | virtual org | ||
10 | virtual report | ||
11 | virtual context | ||
12 | |||
13 | @r depends on context || report || org @ | ||
14 | expression e; | ||
15 | identifier f; | ||
16 | position p; | ||
17 | @@ | ||
18 | |||
19 | * kfree@p(&e->f) | ||
20 | |||
21 | @script:python depends on org@ | ||
22 | p << r.p; | ||
23 | @@ | ||
24 | |||
25 | cocci.print_main("kfree",p) | ||
26 | |||
27 | @script:python depends on report@ | ||
28 | p << r.p; | ||
29 | @@ | ||
30 | |||
31 | msg = "ERROR: kfree of structure field" | ||
32 | coccilib.report.print_report(p[0],msg) | ||
diff --git a/scripts/coccinelle/free/pci_free_consistent.cocci b/scripts/coccinelle/free/pci_free_consistent.cocci new file mode 100644 index 000000000000..43600ccb62a8 --- /dev/null +++ b/scripts/coccinelle/free/pci_free_consistent.cocci | |||
@@ -0,0 +1,52 @@ | |||
1 | /// Find missing pci_free_consistent for every pci_alloc_consistent. | ||
2 | /// | ||
3 | // Confidence: Moderate | ||
4 | // Copyright: (C) 2013 Petr Strnad. GPLv2. | ||
5 | // URL: http://coccinelle.lip6.fr/ | ||
6 | // Keywords: pci_free_consistent, pci_alloc_consistent | ||
7 | // Options: --no-includes --include-headers | ||
8 | |||
9 | virtual report | ||
10 | virtual org | ||
11 | |||
12 | @search@ | ||
13 | local idexpression id; | ||
14 | expression x,y,z,e; | ||
15 | position p1,p2; | ||
16 | type T; | ||
17 | @@ | ||
18 | |||
19 | id = pci_alloc_consistent@p1(x,y,&z) | ||
20 | ... when != e = id | ||
21 | if (id == NULL || ...) { ... return ...; } | ||
22 | ... when != pci_free_consistent(x,y,id,z) | ||
23 | when != if (id) { ... pci_free_consistent(x,y,id,z) ... } | ||
24 | when != if (y) { ... pci_free_consistent(x,y,id,z) ... } | ||
25 | when != e = (T)id | ||
26 | when exists | ||
27 | ( | ||
28 | return 0; | ||
29 | | | ||
30 | return 1; | ||
31 | | | ||
32 | return id; | ||
33 | | | ||
34 | return@p2 ...; | ||
35 | ) | ||
36 | |||
37 | @script:python depends on report@ | ||
38 | p1 << search.p1; | ||
39 | p2 << search.p2; | ||
40 | @@ | ||
41 | |||
42 | msg = "ERROR: missing pci_free_consistent; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line) | ||
43 | coccilib.report.print_report(p2[0],msg) | ||
44 | |||
45 | @script:python depends on org@ | ||
46 | p1 << search.p1; | ||
47 | p2 << search.p2; | ||
48 | @@ | ||
49 | |||
50 | msg = "ERROR: missing pci_free_consistent; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line) | ||
51 | cocci.print_main(msg,p1) | ||
52 | cocci.print_secs("",p2) | ||
diff --git a/scripts/coccinelle/iterators/fen.cocci b/scripts/coccinelle/iterators/fen.cocci index 0a40af828c43..48c152f224e1 100644 --- a/scripts/coccinelle/iterators/fen.cocci +++ b/scripts/coccinelle/iterators/fen.cocci | |||
@@ -7,7 +7,7 @@ | |||
7 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 7 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
8 | // URL: http://coccinelle.lip6.fr/ | 8 | // URL: http://coccinelle.lip6.fr/ |
9 | // Comments: | 9 | // Comments: |
10 | // Options: -no_includes -include_headers | 10 | // Options: --no-includes --include-headers |
11 | 11 | ||
12 | virtual patch | 12 | virtual patch |
13 | virtual context | 13 | virtual context |
diff --git a/scripts/coccinelle/iterators/itnull.cocci b/scripts/coccinelle/iterators/itnull.cocci index 259899f6838e..f58732b56a40 100644 --- a/scripts/coccinelle/iterators/itnull.cocci +++ b/scripts/coccinelle/iterators/itnull.cocci | |||
@@ -11,7 +11,7 @@ | |||
11 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 11 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
12 | // URL: http://coccinelle.lip6.fr/ | 12 | // URL: http://coccinelle.lip6.fr/ |
13 | // Comments: | 13 | // Comments: |
14 | // Options: -no_includes -include_headers | 14 | // Options: --no-includes --include-headers |
15 | 15 | ||
16 | virtual patch | 16 | virtual patch |
17 | virtual context | 17 | virtual context |
diff --git a/scripts/coccinelle/iterators/list_entry_update.cocci b/scripts/coccinelle/iterators/list_entry_update.cocci index b2967475679b..873f444e7137 100644 --- a/scripts/coccinelle/iterators/list_entry_update.cocci +++ b/scripts/coccinelle/iterators/list_entry_update.cocci | |||
@@ -9,7 +9,7 @@ | |||
9 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. | 9 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. |
10 | // URL: http://coccinelle.lip6.fr/ | 10 | // URL: http://coccinelle.lip6.fr/ |
11 | // Comments: | 11 | // Comments: |
12 | // Options: -no_includes -include_headers | 12 | // Options: --no-includes --include-headers |
13 | 13 | ||
14 | virtual context | 14 | virtual context |
15 | virtual org | 15 | virtual org |
diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci index 06284c57a951..f085f5968c52 100644 --- a/scripts/coccinelle/iterators/use_after_iter.cocci +++ b/scripts/coccinelle/iterators/use_after_iter.cocci | |||
@@ -11,7 +11,7 @@ | |||
11 | // Copyright: (C) 2012 Gilles Muller, INRIA/LIP6. GPLv2. | 11 | // Copyright: (C) 2012 Gilles Muller, INRIA/LIP6. GPLv2. |
12 | // URL: http://coccinelle.lip6.fr/ | 12 | // URL: http://coccinelle.lip6.fr/ |
13 | // Comments: | 13 | // Comments: |
14 | // Options: -no_includes -include_headers | 14 | // Options: --no-includes --include-headers |
15 | 15 | ||
16 | virtual context | 16 | virtual context |
17 | virtual org | 17 | virtual org |
diff --git a/scripts/coccinelle/locks/call_kern.cocci b/scripts/coccinelle/locks/call_kern.cocci index 8f10b49603c3..669b24436248 100644 --- a/scripts/coccinelle/locks/call_kern.cocci +++ b/scripts/coccinelle/locks/call_kern.cocci | |||
@@ -9,7 +9,7 @@ | |||
9 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 9 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
10 | // URL: http://coccinelle.lip6.fr/ | 10 | // URL: http://coccinelle.lip6.fr/ |
11 | // Comments: | 11 | // Comments: |
12 | // Options: -no_includes -include_headers | 12 | // Options: --no-includes --include-headers |
13 | 13 | ||
14 | virtual patch | 14 | virtual patch |
15 | virtual context | 15 | virtual context |
diff --git a/scripts/coccinelle/locks/double_lock.cocci b/scripts/coccinelle/locks/double_lock.cocci index 63b24e682fad..002752f97dca 100644 --- a/scripts/coccinelle/locks/double_lock.cocci +++ b/scripts/coccinelle/locks/double_lock.cocci | |||
@@ -8,7 +8,7 @@ | |||
8 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. | 8 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. |
9 | // URL: http://coccinelle.lip6.fr/ | 9 | // URL: http://coccinelle.lip6.fr/ |
10 | // Comments: | 10 | // Comments: |
11 | // Options: -no_includes -include_headers | 11 | // Options: --no-includes --include-headers |
12 | 12 | ||
13 | virtual org | 13 | virtual org |
14 | virtual report | 14 | virtual report |
diff --git a/scripts/coccinelle/locks/flags.cocci b/scripts/coccinelle/locks/flags.cocci index 1c4ffe6fd846..debd70e46267 100644 --- a/scripts/coccinelle/locks/flags.cocci +++ b/scripts/coccinelle/locks/flags.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual context | 11 | virtual context |
12 | virtual org | 12 | virtual org |
diff --git a/scripts/coccinelle/locks/mini_lock.cocci b/scripts/coccinelle/locks/mini_lock.cocci index 3267d7410bd5..47f649b0ea87 100644 --- a/scripts/coccinelle/locks/mini_lock.cocci +++ b/scripts/coccinelle/locks/mini_lock.cocci | |||
@@ -11,7 +11,7 @@ | |||
11 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 11 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
12 | // URL: http://coccinelle.lip6.fr/ | 12 | // URL: http://coccinelle.lip6.fr/ |
13 | // Comments: | 13 | // Comments: |
14 | // Options: -no_includes -include_headers | 14 | // Options: --no-includes --include-headers |
15 | 15 | ||
16 | virtual context | 16 | virtual context |
17 | virtual org | 17 | virtual org |
diff --git a/scripts/coccinelle/misc/boolinit.cocci b/scripts/coccinelle/misc/boolinit.cocci index 97ce41ce8135..b9abed49cd95 100644 --- a/scripts/coccinelle/misc/boolinit.cocci +++ b/scripts/coccinelle/misc/boolinit.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2. | 6 | // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2. |
7 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 7 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
8 | // URL: http://coccinelle.lip6.fr/ | 8 | // URL: http://coccinelle.lip6.fr/ |
9 | // Options: -include_headers | 9 | // Options: --include-headers |
10 | 10 | ||
11 | virtual patch | 11 | virtual patch |
12 | virtual context | 12 | virtual context |
diff --git a/scripts/coccinelle/misc/cstptr.cocci b/scripts/coccinelle/misc/cstptr.cocci index d42564484528..f0368b3d4563 100644 --- a/scripts/coccinelle/misc/cstptr.cocci +++ b/scripts/coccinelle/misc/cstptr.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual org | 11 | virtual org |
12 | virtual report | 12 | virtual report |
diff --git a/scripts/coccinelle/misc/doubleinit.cocci b/scripts/coccinelle/misc/doubleinit.cocci index cf74a00cf597..c0c3371d25e0 100644 --- a/scripts/coccinelle/misc/doubleinit.cocci +++ b/scripts/coccinelle/misc/doubleinit.cocci | |||
@@ -8,7 +8,7 @@ | |||
8 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 8 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
9 | // URL: http://coccinelle.lip6.fr/ | 9 | // URL: http://coccinelle.lip6.fr/ |
10 | // Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise | 10 | // Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise |
11 | // Options: -no_includes -include_headers | 11 | // Options: --no-includes --include-headers |
12 | 12 | ||
13 | virtual org | 13 | virtual org |
14 | virtual report | 14 | virtual report |
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci index 3e4089a77000..8aebd1875e75 100644 --- a/scripts/coccinelle/misc/ifaddr.cocci +++ b/scripts/coccinelle/misc/ifaddr.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual org | 11 | virtual org |
12 | virtual report | 12 | virtual report |
diff --git a/scripts/coccinelle/misc/ifcol.cocci b/scripts/coccinelle/misc/ifcol.cocci index b7ed91dbeb95..d0d00ef1f12a 100644 --- a/scripts/coccinelle/misc/ifcol.cocci +++ b/scripts/coccinelle/misc/ifcol.cocci | |||
@@ -13,7 +13,7 @@ | |||
13 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. | 13 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. |
14 | // URL: http://coccinelle.lip6.fr/ | 14 | // URL: http://coccinelle.lip6.fr/ |
15 | // Comments: | 15 | // Comments: |
16 | // Options: -no_includes -include_headers | 16 | // Options: --no-includes --include-headers |
17 | 17 | ||
18 | virtual org | 18 | virtual org |
19 | virtual report | 19 | virtual report |
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci index c1707214e602..80a831c91161 100644 --- a/scripts/coccinelle/misc/noderef.cocci +++ b/scripts/coccinelle/misc/noderef.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual org | 11 | virtual org |
12 | virtual report | 12 | virtual report |
diff --git a/scripts/coccinelle/misc/orplus.cocci b/scripts/coccinelle/misc/orplus.cocci index 4a28cef1484e..81fabf379390 100644 --- a/scripts/coccinelle/misc/orplus.cocci +++ b/scripts/coccinelle/misc/orplus.cocci | |||
@@ -7,7 +7,7 @@ | |||
7 | // Copyright: (C) 2013 Gilles Muller, INRIA/LIP6. GPLv2. | 7 | // Copyright: (C) 2013 Gilles Muller, INRIA/LIP6. GPLv2. |
8 | // URL: http://coccinelle.lip6.fr/ | 8 | // URL: http://coccinelle.lip6.fr/ |
9 | // Comments: | 9 | // Comments: |
10 | // Options: -no_includes -include_headers | 10 | // Options: --no-includes --include-headers |
11 | 11 | ||
12 | virtual org | 12 | virtual org |
13 | virtual report | 13 | virtual report |
diff --git a/scripts/coccinelle/misc/warn.cocci b/scripts/coccinelle/misc/warn.cocci index fda8c3558e4f..d2e5b6cedb84 100644 --- a/scripts/coccinelle/misc/warn.cocci +++ b/scripts/coccinelle/misc/warn.cocci | |||
@@ -5,7 +5,7 @@ | |||
5 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. | 5 | // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. |
6 | // URL: http://coccinelle.lip6.fr/ | 6 | // URL: http://coccinelle.lip6.fr/ |
7 | // Comments: | 7 | // Comments: |
8 | // Options: -no_includes -include_headers | 8 | // Options: --no-includes --include-headers |
9 | 9 | ||
10 | virtual patch | 10 | virtual patch |
11 | virtual context | 11 | virtual context |
diff --git a/scripts/coccinelle/null/eno.cocci b/scripts/coccinelle/null/eno.cocci index ed961a1f7d11..9bd29aa83399 100644 --- a/scripts/coccinelle/null/eno.cocci +++ b/scripts/coccinelle/null/eno.cocci | |||
@@ -6,7 +6,7 @@ | |||
6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. | 6 | // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. |
7 | // URL: http://coccinelle.lip6.fr/ | 7 | // URL: http://coccinelle.lip6.fr/ |
8 | // Comments: | 8 | // Comments: |
9 | // Options: -no_includes -include_headers | 9 | // Options: --no-includes --include-headers |
10 | 10 | ||
11 | virtual patch | 11 | virtual patch |
12 | virtual context | 12 | virtual context |
diff --git a/scripts/coccinelle/null/kmerr.cocci b/scripts/coccinelle/null/kmerr.cocci index 949bf656c64c..5354a7903ccb 100644 --- a/scripts/coccinelle/null/kmerr.cocci +++ b/scripts/coccinelle/null/kmerr.cocci | |||
@@ -10,7 +10,7 @@ | |||
10 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. | 10 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. |
11 | // URL: http://coccinelle.lip6.fr/ | 11 | // URL: http://coccinelle.lip6.fr/ |
12 | // Comments: | 12 | // Comments: |
13 | // Options: -no_includes -include_headers | 13 | // Options: --no-includes --include-headers |
14 | 14 | ||
15 | virtual context | 15 | virtual context |
16 | virtual org | 16 | virtual org |
diff --git a/scripts/coccinelle/tests/doublebitand.cocci b/scripts/coccinelle/tests/doublebitand.cocci index 9ba73d05a77e..72f1572aaec3 100644 --- a/scripts/coccinelle/tests/doublebitand.cocci +++ b/scripts/coccinelle/tests/doublebitand.cocci | |||
@@ -10,7 +10,7 @@ | |||
10 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. | 10 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. |
11 | // URL: http://coccinelle.lip6.fr/ | 11 | // URL: http://coccinelle.lip6.fr/ |
12 | // Comments: | 12 | // Comments: |
13 | // Options: -no_includes -include_headers | 13 | // Options: --no-includes --include-headers |
14 | 14 | ||
15 | virtual context | 15 | virtual context |
16 | virtual org | 16 | virtual org |
diff --git a/scripts/coccinelle/tests/doubletest.cocci b/scripts/coccinelle/tests/doubletest.cocci index 13a2c0e8a4bf..78d74c22ca12 100644 --- a/scripts/coccinelle/tests/doubletest.cocci +++ b/scripts/coccinelle/tests/doubletest.cocci | |||
@@ -8,7 +8,7 @@ | |||
8 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. | 8 | // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. |
9 | // URL: http://coccinelle.lip6.fr/ | 9 | // URL: http://coccinelle.lip6.fr/ |
10 | // Comments: | 10 | // Comments: |
11 | // Options: -no_includes -include_headers | 11 | // Options: --no-includes --include-headers |
12 | 12 | ||
13 | virtual context | 13 | virtual context |
14 | virtual org | 14 | virtual org |
diff --git a/scripts/coccinelle/tests/odd_ptr_err.cocci b/scripts/coccinelle/tests/odd_ptr_err.cocci index e8dd8a6b28a2..cfe0a35cf2dd 100644 --- a/scripts/coccinelle/tests/odd_ptr_err.cocci +++ b/scripts/coccinelle/tests/odd_ptr_err.cocci | |||
@@ -7,7 +7,7 @@ | |||
7 | // Copyright: (C) 2012 Gilles Muller, INRIA. GPLv2. | 7 | // Copyright: (C) 2012 Gilles Muller, INRIA. GPLv2. |
8 | // URL: http://coccinelle.lip6.fr/ | 8 | // URL: http://coccinelle.lip6.fr/ |
9 | // Comments: | 9 | // Comments: |
10 | // Options: -no_includes -include_headers | 10 | // Options: --no-includes --include-headers |
11 | 11 | ||
12 | virtual patch | 12 | virtual patch |
13 | virtual context | 13 | virtual context |
diff --git a/scripts/config b/scripts/config index a65ecbbdd32a..567120a87c39 100755 --- a/scripts/config +++ b/scripts/config | |||
@@ -1,6 +1,8 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | # Manipulate options in a .config file from the command line | 2 | # Manipulate options in a .config file from the command line |
3 | 3 | ||
4 | myname=${0##*/} | ||
5 | |||
4 | # If no prefix forced, use the default CONFIG_ | 6 | # If no prefix forced, use the default CONFIG_ |
5 | CONFIG_="${CONFIG_-CONFIG_}" | 7 | CONFIG_="${CONFIG_-CONFIG_}" |
6 | 8 | ||
@@ -8,7 +10,7 @@ usage() { | |||
8 | cat >&2 <<EOL | 10 | cat >&2 <<EOL |
9 | Manipulate options in a .config file from the command line. | 11 | Manipulate options in a .config file from the command line. |
10 | Usage: | 12 | Usage: |
11 | config options command ... | 13 | $myname options command ... |
12 | commands: | 14 | commands: |
13 | --enable|-e option Enable option | 15 | --enable|-e option Enable option |
14 | --disable|-d option Disable option | 16 | --disable|-d option Disable option |
@@ -33,14 +35,14 @@ options: | |||
33 | --file config-file .config file to change (default .config) | 35 | --file config-file .config file to change (default .config) |
34 | --keep-case|-k Keep next symbols' case (dont' upper-case it) | 36 | --keep-case|-k Keep next symbols' case (dont' upper-case it) |
35 | 37 | ||
36 | config doesn't check the validity of the .config file. This is done at next | 38 | $myname doesn't check the validity of the .config file. This is done at next |
37 | make time. | 39 | make time. |
38 | 40 | ||
39 | By default, config will upper-case the given symbol. Use --keep-case to keep | 41 | By default, $myname will upper-case the given symbol. Use --keep-case to keep |
40 | the case of all following symbols unchanged. | 42 | the case of all following symbols unchanged. |
41 | 43 | ||
42 | config uses 'CONFIG_' as the default symbol prefix. Set the environment | 44 | $myname uses 'CONFIG_' as the default symbol prefix. Set the environment |
43 | variable CONFIG_ to the prefix to use. Eg.: CONFIG_="FOO_" config ... | 45 | variable CONFIG_ to the prefix to use. Eg.: CONFIG_="FOO_" $myname ... |
44 | EOL | 46 | EOL |
45 | exit 1 | 47 | exit 1 |
46 | } | 48 | } |
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 643764f53ea7..5de5660cb708 100644 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | if [ $# -lt 1 ] | 3 | if [ $# -lt 1 ] |
4 | then | 4 | then |
5 | echo "Usage: headers_install.sh OUTDIR [FILES...] | 5 | echo "Usage: headers_install.sh OUTDIR SRCDIR [FILES...] |
6 | echo | 6 | echo |
7 | echo "Prepares kernel header files for use by user space, by removing" | 7 | echo "Prepares kernel header files for use by user space, by removing" |
8 | echo "all compiler.h definitions and #includes, removing any" | 8 | echo "all compiler.h definitions and #includes, removing any" |
@@ -10,6 +10,7 @@ then | |||
10 | echo "asm/inline/volatile keywords." | 10 | echo "asm/inline/volatile keywords." |
11 | echo | 11 | echo |
12 | echo "OUTDIR: directory to write each userspace header FILE to." | 12 | echo "OUTDIR: directory to write each userspace header FILE to." |
13 | echo "SRCDIR: source directory where files are picked." | ||
13 | echo "FILES: list of header files to operate on." | 14 | echo "FILES: list of header files to operate on." |
14 | 15 | ||
15 | exit 1 | 16 | exit 1 |
@@ -19,6 +20,8 @@ fi | |||
19 | 20 | ||
20 | OUTDIR="$1" | 21 | OUTDIR="$1" |
21 | shift | 22 | shift |
23 | SRCDIR="$1" | ||
24 | shift | ||
22 | 25 | ||
23 | # Iterate through files listed on command line | 26 | # Iterate through files listed on command line |
24 | 27 | ||
@@ -34,7 +37,7 @@ do | |||
34 | -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \ | 37 | -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \ |
35 | -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \ | 38 | -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \ |
36 | -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \ | 39 | -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \ |
37 | "$i" > "$OUTDIR/$FILE.sed" || exit 1 | 40 | "$SRCDIR/$i" > "$OUTDIR/$FILE.sed" || exit 1 |
38 | scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ "$OUTDIR/$FILE.sed" \ | 41 | scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ "$OUTDIR/$FILE.sed" \ |
39 | > "$OUTDIR/$FILE" | 42 | > "$OUTDIR/$FILE" |
40 | [ $? -gt 1 ] && exit 1 | 43 | [ $? -gt 1 ] && exit 1 |
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index bde5b95c8c19..d19944f9c3ac 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c | |||
@@ -527,11 +527,12 @@ int main(int ac, char **av) | |||
527 | seed_env = getenv("KCONFIG_SEED"); | 527 | seed_env = getenv("KCONFIG_SEED"); |
528 | if( seed_env && *seed_env ) { | 528 | if( seed_env && *seed_env ) { |
529 | char *endp; | 529 | char *endp; |
530 | int tmp = (int)strtol(seed_env, &endp, 10); | 530 | int tmp = (int)strtol(seed_env, &endp, 0); |
531 | if (*endp == '\0') { | 531 | if (*endp == '\0') { |
532 | seed = tmp; | 532 | seed = tmp; |
533 | } | 533 | } |
534 | } | 534 | } |
535 | fprintf( stderr, "KCONFIG_SEED=0x%X\n", seed ); | ||
535 | srand(seed); | 536 | srand(seed); |
536 | break; | 537 | break; |
537 | } | 538 | } |
@@ -653,7 +654,8 @@ int main(int ac, char **av) | |||
653 | conf_set_all_new_symbols(def_default); | 654 | conf_set_all_new_symbols(def_default); |
654 | break; | 655 | break; |
655 | case randconfig: | 656 | case randconfig: |
656 | conf_set_all_new_symbols(def_random); | 657 | /* Really nothing to do in this loop */ |
658 | while (conf_set_all_new_symbols(def_random)) ; | ||
657 | break; | 659 | break; |
658 | case defconfig: | 660 | case defconfig: |
659 | conf_set_all_new_symbols(def_default); | 661 | conf_set_all_new_symbols(def_default); |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 43eda40c3838..c55c227af463 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
@@ -1040,7 +1040,7 @@ void conf_set_changed_callback(void (*fn)(void)) | |||
1040 | conf_changed_callback = fn; | 1040 | conf_changed_callback = fn; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | static void randomize_choice_values(struct symbol *csym) | 1043 | static bool randomize_choice_values(struct symbol *csym) |
1044 | { | 1044 | { |
1045 | struct property *prop; | 1045 | struct property *prop; |
1046 | struct symbol *sym; | 1046 | struct symbol *sym; |
@@ -1053,7 +1053,7 @@ static void randomize_choice_values(struct symbol *csym) | |||
1053 | * In both cases stop. | 1053 | * In both cases stop. |
1054 | */ | 1054 | */ |
1055 | if (csym->curr.tri != yes) | 1055 | if (csym->curr.tri != yes) |
1056 | return; | 1056 | return false; |
1057 | 1057 | ||
1058 | prop = sym_get_choice_prop(csym); | 1058 | prop = sym_get_choice_prop(csym); |
1059 | 1059 | ||
@@ -1077,13 +1077,18 @@ static void randomize_choice_values(struct symbol *csym) | |||
1077 | else { | 1077 | else { |
1078 | sym->def[S_DEF_USER].tri = no; | 1078 | sym->def[S_DEF_USER].tri = no; |
1079 | } | 1079 | } |
1080 | sym->flags |= SYMBOL_DEF_USER; | ||
1081 | /* clear VALID to get value calculated */ | ||
1082 | sym->flags &= ~SYMBOL_VALID; | ||
1080 | } | 1083 | } |
1081 | csym->flags |= SYMBOL_DEF_USER; | 1084 | csym->flags |= SYMBOL_DEF_USER; |
1082 | /* clear VALID to get value calculated */ | 1085 | /* clear VALID to get value calculated */ |
1083 | csym->flags &= ~(SYMBOL_VALID); | 1086 | csym->flags &= ~(SYMBOL_VALID); |
1087 | |||
1088 | return true; | ||
1084 | } | 1089 | } |
1085 | 1090 | ||
1086 | static void set_all_choice_values(struct symbol *csym) | 1091 | void set_all_choice_values(struct symbol *csym) |
1087 | { | 1092 | { |
1088 | struct property *prop; | 1093 | struct property *prop; |
1089 | struct symbol *sym; | 1094 | struct symbol *sym; |
@@ -1100,10 +1105,10 @@ static void set_all_choice_values(struct symbol *csym) | |||
1100 | } | 1105 | } |
1101 | csym->flags |= SYMBOL_DEF_USER; | 1106 | csym->flags |= SYMBOL_DEF_USER; |
1102 | /* clear VALID to get value calculated */ | 1107 | /* clear VALID to get value calculated */ |
1103 | csym->flags &= ~(SYMBOL_VALID); | 1108 | csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES); |
1104 | } | 1109 | } |
1105 | 1110 | ||
1106 | void conf_set_all_new_symbols(enum conf_def_mode mode) | 1111 | bool conf_set_all_new_symbols(enum conf_def_mode mode) |
1107 | { | 1112 | { |
1108 | struct symbol *sym, *csym; | 1113 | struct symbol *sym, *csym; |
1109 | int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y | 1114 | int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y |
@@ -1151,6 +1156,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) | |||
1151 | exit( 1 ); | 1156 | exit( 1 ); |
1152 | } | 1157 | } |
1153 | } | 1158 | } |
1159 | bool has_changed = false; | ||
1154 | 1160 | ||
1155 | for_all_symbols(i, sym) { | 1161 | for_all_symbols(i, sym) { |
1156 | if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID)) | 1162 | if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID)) |
@@ -1158,6 +1164,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) | |||
1158 | switch (sym_get_type(sym)) { | 1164 | switch (sym_get_type(sym)) { |
1159 | case S_BOOLEAN: | 1165 | case S_BOOLEAN: |
1160 | case S_TRISTATE: | 1166 | case S_TRISTATE: |
1167 | has_changed = true; | ||
1161 | switch (mode) { | 1168 | switch (mode) { |
1162 | case def_yes: | 1169 | case def_yes: |
1163 | sym->def[S_DEF_USER].tri = yes; | 1170 | sym->def[S_DEF_USER].tri = yes; |
@@ -1202,14 +1209,26 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) | |||
1202 | * selected in a choice block and we set it to yes, | 1209 | * selected in a choice block and we set it to yes, |
1203 | * and the rest to no. | 1210 | * and the rest to no. |
1204 | */ | 1211 | */ |
1212 | if (mode != def_random) { | ||
1213 | for_all_symbols(i, csym) { | ||
1214 | if ((sym_is_choice(csym) && !sym_has_value(csym)) || | ||
1215 | sym_is_choice_value(csym)) | ||
1216 | csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES; | ||
1217 | } | ||
1218 | } | ||
1219 | |||
1205 | for_all_symbols(i, csym) { | 1220 | for_all_symbols(i, csym) { |
1206 | if (sym_has_value(csym) || !sym_is_choice(csym)) | 1221 | if (sym_has_value(csym) || !sym_is_choice(csym)) |
1207 | continue; | 1222 | continue; |
1208 | 1223 | ||
1209 | sym_calc_value(csym); | 1224 | sym_calc_value(csym); |
1210 | if (mode == def_random) | 1225 | if (mode == def_random) |
1211 | randomize_choice_values(csym); | 1226 | has_changed = randomize_choice_values(csym); |
1212 | else | 1227 | else { |
1213 | set_all_choice_values(csym); | 1228 | set_all_choice_values(csym); |
1229 | has_changed = true; | ||
1230 | } | ||
1214 | } | 1231 | } |
1232 | |||
1233 | return has_changed; | ||
1215 | } | 1234 | } |
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index cdd48600e02a..df198a5f4822 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h | |||
@@ -106,6 +106,9 @@ struct symbol { | |||
106 | #define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */ | 106 | #define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */ |
107 | #define SYMBOL_DEF4 0x80000 /* symbol.def[S_DEF_4] is valid */ | 107 | #define SYMBOL_DEF4 0x80000 /* symbol.def[S_DEF_4] is valid */ |
108 | 108 | ||
109 | /* choice values need to be set before calculating this symbol value */ | ||
110 | #define SYMBOL_NEED_SET_CHOICE_VALUES 0x100000 | ||
111 | |||
109 | #define SYMBOL_MAXLENGTH 256 | 112 | #define SYMBOL_MAXLENGTH 256 |
110 | #define SYMBOL_HASHSIZE 9973 | 113 | #define SYMBOL_HASHSIZE 9973 |
111 | 114 | ||
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h index f8aee5fc6d5e..09f4edfdc911 100644 --- a/scripts/kconfig/lkc.h +++ b/scripts/kconfig/lkc.h | |||
@@ -86,7 +86,8 @@ const char *conf_get_autoconfig_name(void); | |||
86 | char *conf_get_default_confname(void); | 86 | char *conf_get_default_confname(void); |
87 | void sym_set_change_count(int count); | 87 | void sym_set_change_count(int count); |
88 | void sym_add_change_count(int count); | 88 | void sym_add_change_count(int count); |
89 | void conf_set_all_new_symbols(enum conf_def_mode mode); | 89 | bool conf_set_all_new_symbols(enum conf_def_mode mode); |
90 | void set_all_choice_values(struct symbol *csym); | ||
90 | 91 | ||
91 | struct conf_printer { | 92 | struct conf_printer { |
92 | void (*print_symbol)(FILE *, struct symbol *, const char *, void *); | 93 | void (*print_symbol)(FILE *, struct symbol *, const char *, void *); |
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h index ef1a7381f956..ecdb9659b67d 100644 --- a/scripts/kconfig/lkc_proto.h +++ b/scripts/kconfig/lkc_proto.h | |||
@@ -14,6 +14,7 @@ P(conf_set_message_callback, void,(void (*fn)(const char *fmt, va_list ap))); | |||
14 | /* menu.c */ | 14 | /* menu.c */ |
15 | P(rootmenu,struct menu,); | 15 | P(rootmenu,struct menu,); |
16 | 16 | ||
17 | P(menu_is_empty, bool, (struct menu *menu)); | ||
17 | P(menu_is_visible, bool, (struct menu *menu)); | 18 | P(menu_is_visible, bool, (struct menu *menu)); |
18 | P(menu_has_prompt, bool, (struct menu *menu)); | 19 | P(menu_has_prompt, bool, (struct menu *menu)); |
19 | P(menu_get_prompt,const char *,(struct menu *menu)); | 20 | P(menu_get_prompt,const char *,(struct menu *menu)); |
diff --git a/scripts/kconfig/lxdialog/checklist.c b/scripts/kconfig/lxdialog/checklist.c index a2eb80fbc896..3b15c08ec1fa 100644 --- a/scripts/kconfig/lxdialog/checklist.c +++ b/scripts/kconfig/lxdialog/checklist.c | |||
@@ -132,16 +132,16 @@ int dialog_checklist(const char *title, const char *prompt, int height, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | do_resize: | 134 | do_resize: |
135 | if (getmaxy(stdscr) < (height + 6)) | 135 | if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN)) |
136 | return -ERRDISPLAYTOOSMALL; | 136 | return -ERRDISPLAYTOOSMALL; |
137 | if (getmaxx(stdscr) < (width + 6)) | 137 | if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN)) |
138 | return -ERRDISPLAYTOOSMALL; | 138 | return -ERRDISPLAYTOOSMALL; |
139 | 139 | ||
140 | max_choice = MIN(list_height, item_count()); | 140 | max_choice = MIN(list_height, item_count()); |
141 | 141 | ||
142 | /* center dialog box on screen */ | 142 | /* center dialog box on screen */ |
143 | x = (COLS - width) / 2; | 143 | x = (getmaxx(stdscr) - width) / 2; |
144 | y = (LINES - height) / 2; | 144 | y = (getmaxy(stdscr) - height) / 2; |
145 | 145 | ||
146 | draw_shadow(stdscr, y, x, height, width); | 146 | draw_shadow(stdscr, y, x, height, width); |
147 | 147 | ||
diff --git a/scripts/kconfig/lxdialog/dialog.h b/scripts/kconfig/lxdialog/dialog.h index 1099337079b6..b4343d384926 100644 --- a/scripts/kconfig/lxdialog/dialog.h +++ b/scripts/kconfig/lxdialog/dialog.h | |||
@@ -200,6 +200,20 @@ int item_is_tag(char tag); | |||
200 | int on_key_esc(WINDOW *win); | 200 | int on_key_esc(WINDOW *win); |
201 | int on_key_resize(void); | 201 | int on_key_resize(void); |
202 | 202 | ||
203 | /* minimum (re)size values */ | ||
204 | #define CHECKLIST_HEIGTH_MIN 6 /* For dialog_checklist() */ | ||
205 | #define CHECKLIST_WIDTH_MIN 6 | ||
206 | #define INPUTBOX_HEIGTH_MIN 2 /* For dialog_inputbox() */ | ||
207 | #define INPUTBOX_WIDTH_MIN 2 | ||
208 | #define MENUBOX_HEIGTH_MIN 15 /* For dialog_menu() */ | ||
209 | #define MENUBOX_WIDTH_MIN 65 | ||
210 | #define TEXTBOX_HEIGTH_MIN 8 /* For dialog_textbox() */ | ||
211 | #define TEXTBOX_WIDTH_MIN 8 | ||
212 | #define YESNO_HEIGTH_MIN 4 /* For dialog_yesno() */ | ||
213 | #define YESNO_WIDTH_MIN 4 | ||
214 | #define WINDOW_HEIGTH_MIN 19 /* For init_dialog() */ | ||
215 | #define WINDOW_WIDTH_MIN 80 | ||
216 | |||
203 | int init_dialog(const char *backtitle); | 217 | int init_dialog(const char *backtitle); |
204 | void set_dialog_backtitle(const char *backtitle); | 218 | void set_dialog_backtitle(const char *backtitle); |
205 | void set_dialog_subtitles(struct subtitle_list *subtitles); | 219 | void set_dialog_subtitles(struct subtitle_list *subtitles); |
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c index 21404a04d7c3..447a582198c9 100644 --- a/scripts/kconfig/lxdialog/inputbox.c +++ b/scripts/kconfig/lxdialog/inputbox.c | |||
@@ -56,14 +56,14 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width | |||
56 | strcpy(instr, init); | 56 | strcpy(instr, init); |
57 | 57 | ||
58 | do_resize: | 58 | do_resize: |
59 | if (getmaxy(stdscr) <= (height - 2)) | 59 | if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN)) |
60 | return -ERRDISPLAYTOOSMALL; | 60 | return -ERRDISPLAYTOOSMALL; |
61 | if (getmaxx(stdscr) <= (width - 2)) | 61 | if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN)) |
62 | return -ERRDISPLAYTOOSMALL; | 62 | return -ERRDISPLAYTOOSMALL; |
63 | 63 | ||
64 | /* center dialog box on screen */ | 64 | /* center dialog box on screen */ |
65 | x = (COLS - width) / 2; | 65 | x = (getmaxx(stdscr) - width) / 2; |
66 | y = (LINES - height) / 2; | 66 | y = (getmaxy(stdscr) - height) / 2; |
67 | 67 | ||
68 | draw_shadow(stdscr, y, x, height, width); | 68 | draw_shadow(stdscr, y, x, height, width); |
69 | 69 | ||
diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c index 38cd69c5660e..c93de0b2faca 100644 --- a/scripts/kconfig/lxdialog/menubox.c +++ b/scripts/kconfig/lxdialog/menubox.c | |||
@@ -193,7 +193,7 @@ int dialog_menu(const char *title, const char *prompt, | |||
193 | do_resize: | 193 | do_resize: |
194 | height = getmaxy(stdscr); | 194 | height = getmaxy(stdscr); |
195 | width = getmaxx(stdscr); | 195 | width = getmaxx(stdscr); |
196 | if (height < 15 || width < 65) | 196 | if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN) |
197 | return -ERRDISPLAYTOOSMALL; | 197 | return -ERRDISPLAYTOOSMALL; |
198 | 198 | ||
199 | height -= 4; | 199 | height -= 4; |
@@ -203,8 +203,8 @@ do_resize: | |||
203 | max_choice = MIN(menu_height, item_count()); | 203 | max_choice = MIN(menu_height, item_count()); |
204 | 204 | ||
205 | /* center dialog box on screen */ | 205 | /* center dialog box on screen */ |
206 | x = (COLS - width) / 2; | 206 | x = (getmaxx(stdscr) - width) / 2; |
207 | y = (LINES - height) / 2; | 207 | y = (getmaxy(stdscr) - height) / 2; |
208 | 208 | ||
209 | draw_shadow(stdscr, y, x, height, width); | 209 | draw_shadow(stdscr, y, x, height, width); |
210 | 210 | ||
diff --git a/scripts/kconfig/lxdialog/textbox.c b/scripts/kconfig/lxdialog/textbox.c index a48bb93e0907..1773319b95e7 100644 --- a/scripts/kconfig/lxdialog/textbox.c +++ b/scripts/kconfig/lxdialog/textbox.c | |||
@@ -80,7 +80,7 @@ int dialog_textbox(const char *title, char *tbuf, int initial_height, | |||
80 | 80 | ||
81 | do_resize: | 81 | do_resize: |
82 | getmaxyx(stdscr, height, width); | 82 | getmaxyx(stdscr, height, width); |
83 | if (height < 8 || width < 8) | 83 | if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN) |
84 | return -ERRDISPLAYTOOSMALL; | 84 | return -ERRDISPLAYTOOSMALL; |
85 | if (initial_height != 0) | 85 | if (initial_height != 0) |
86 | height = initial_height; | 86 | height = initial_height; |
@@ -98,8 +98,8 @@ do_resize: | |||
98 | width = 0; | 98 | width = 0; |
99 | 99 | ||
100 | /* center dialog box on screen */ | 100 | /* center dialog box on screen */ |
101 | x = (COLS - width) / 2; | 101 | x = (getmaxx(stdscr) - width) / 2; |
102 | y = (LINES - height) / 2; | 102 | y = (getmaxy(stdscr) - height) / 2; |
103 | 103 | ||
104 | draw_shadow(stdscr, y, x, height, width); | 104 | draw_shadow(stdscr, y, x, height, width); |
105 | 105 | ||
diff --git a/scripts/kconfig/lxdialog/util.c b/scripts/kconfig/lxdialog/util.c index a0e97c299410..58a8289dd650 100644 --- a/scripts/kconfig/lxdialog/util.c +++ b/scripts/kconfig/lxdialog/util.c | |||
@@ -254,7 +254,12 @@ void attr_clear(WINDOW * win, int height, int width, chtype attr) | |||
254 | 254 | ||
255 | void dialog_clear(void) | 255 | void dialog_clear(void) |
256 | { | 256 | { |
257 | attr_clear(stdscr, LINES, COLS, dlg.screen.atr); | 257 | int lines, columns; |
258 | |||
259 | lines = getmaxy(stdscr); | ||
260 | columns = getmaxx(stdscr); | ||
261 | |||
262 | attr_clear(stdscr, lines, columns, dlg.screen.atr); | ||
258 | /* Display background title if it exists ... - SLH */ | 263 | /* Display background title if it exists ... - SLH */ |
259 | if (dlg.backtitle != NULL) { | 264 | if (dlg.backtitle != NULL) { |
260 | int i, len = 0, skip = 0; | 265 | int i, len = 0, skip = 0; |
@@ -269,10 +274,10 @@ void dialog_clear(void) | |||
269 | } | 274 | } |
270 | 275 | ||
271 | wmove(stdscr, 1, 1); | 276 | wmove(stdscr, 1, 1); |
272 | if (len > COLS - 2) { | 277 | if (len > columns - 2) { |
273 | const char *ellipsis = "[...] "; | 278 | const char *ellipsis = "[...] "; |
274 | waddstr(stdscr, ellipsis); | 279 | waddstr(stdscr, ellipsis); |
275 | skip = len - (COLS - 2 - strlen(ellipsis)); | 280 | skip = len - (columns - 2 - strlen(ellipsis)); |
276 | } | 281 | } |
277 | 282 | ||
278 | for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { | 283 | for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { |
@@ -298,7 +303,7 @@ void dialog_clear(void) | |||
298 | skip--; | 303 | skip--; |
299 | } | 304 | } |
300 | 305 | ||
301 | for (i = len + 1; i < COLS - 1; i++) | 306 | for (i = len + 1; i < columns - 1; i++) |
302 | waddch(stdscr, ACS_HLINE); | 307 | waddch(stdscr, ACS_HLINE); |
303 | } | 308 | } |
304 | wnoutrefresh(stdscr); | 309 | wnoutrefresh(stdscr); |
@@ -317,7 +322,7 @@ int init_dialog(const char *backtitle) | |||
317 | getyx(stdscr, saved_y, saved_x); | 322 | getyx(stdscr, saved_y, saved_x); |
318 | 323 | ||
319 | getmaxyx(stdscr, height, width); | 324 | getmaxyx(stdscr, height, width); |
320 | if (height < 19 || width < 80) { | 325 | if (height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN) { |
321 | endwin(); | 326 | endwin(); |
322 | return -ERRDISPLAYTOOSMALL; | 327 | return -ERRDISPLAYTOOSMALL; |
323 | } | 328 | } |
@@ -371,27 +376,19 @@ void print_title(WINDOW *dialog, const char *title, int width) | |||
371 | /* | 376 | /* |
372 | * Print a string of text in a window, automatically wrap around to the | 377 | * Print a string of text in a window, automatically wrap around to the |
373 | * next line if the string is too long to fit on one line. Newline | 378 | * next line if the string is too long to fit on one line. Newline |
374 | * characters '\n' are replaced by spaces. We start on a new line | 379 | * characters '\n' are propperly processed. We start on a new line |
375 | * if there is no room for at least 4 nonblanks following a double-space. | 380 | * if there is no room for at least 4 nonblanks following a double-space. |
376 | */ | 381 | */ |
377 | void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) | 382 | void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) |
378 | { | 383 | { |
379 | int newl, cur_x, cur_y; | 384 | int newl, cur_x, cur_y; |
380 | int i, prompt_len, room, wlen; | 385 | int prompt_len, room, wlen; |
381 | char tempstr[MAX_LEN + 1], *word, *sp, *sp2; | 386 | char tempstr[MAX_LEN + 1], *word, *sp, *sp2, *newline_separator = 0; |
382 | 387 | ||
383 | strcpy(tempstr, prompt); | 388 | strcpy(tempstr, prompt); |
384 | 389 | ||
385 | prompt_len = strlen(tempstr); | 390 | prompt_len = strlen(tempstr); |
386 | 391 | ||
387 | /* | ||
388 | * Remove newlines | ||
389 | */ | ||
390 | for (i = 0; i < prompt_len; i++) { | ||
391 | if (tempstr[i] == '\n') | ||
392 | tempstr[i] = ' '; | ||
393 | } | ||
394 | |||
395 | if (prompt_len <= width - x * 2) { /* If prompt is short */ | 392 | if (prompt_len <= width - x * 2) { /* If prompt is short */ |
396 | wmove(win, y, (width - prompt_len) / 2); | 393 | wmove(win, y, (width - prompt_len) / 2); |
397 | waddstr(win, tempstr); | 394 | waddstr(win, tempstr); |
@@ -401,7 +398,10 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) | |||
401 | newl = 1; | 398 | newl = 1; |
402 | word = tempstr; | 399 | word = tempstr; |
403 | while (word && *word) { | 400 | while (word && *word) { |
404 | sp = strchr(word, ' '); | 401 | sp = strpbrk(word, "\n "); |
402 | if (sp && *sp == '\n') | ||
403 | newline_separator = sp; | ||
404 | |||
405 | if (sp) | 405 | if (sp) |
406 | *sp++ = 0; | 406 | *sp++ = 0; |
407 | 407 | ||
@@ -413,7 +413,7 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) | |||
413 | if (wlen > room || | 413 | if (wlen > room || |
414 | (newl && wlen < 4 && sp | 414 | (newl && wlen < 4 && sp |
415 | && wlen + 1 + strlen(sp) > room | 415 | && wlen + 1 + strlen(sp) > room |
416 | && (!(sp2 = strchr(sp, ' ')) | 416 | && (!(sp2 = strpbrk(sp, "\n ")) |
417 | || wlen + 1 + (sp2 - sp) > room))) { | 417 | || wlen + 1 + (sp2 - sp) > room))) { |
418 | cur_y++; | 418 | cur_y++; |
419 | cur_x = x; | 419 | cur_x = x; |
@@ -421,7 +421,15 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) | |||
421 | wmove(win, cur_y, cur_x); | 421 | wmove(win, cur_y, cur_x); |
422 | waddstr(win, word); | 422 | waddstr(win, word); |
423 | getyx(win, cur_y, cur_x); | 423 | getyx(win, cur_y, cur_x); |
424 | cur_x++; | 424 | |
425 | /* Move to the next line if the word separator was a newline */ | ||
426 | if (newline_separator) { | ||
427 | cur_y++; | ||
428 | cur_x = x; | ||
429 | newline_separator = 0; | ||
430 | } else | ||
431 | cur_x++; | ||
432 | |||
425 | if (sp && *sp == ' ') { | 433 | if (sp && *sp == ' ') { |
426 | cur_x++; /* double space */ | 434 | cur_x++; /* double space */ |
427 | while (*++sp == ' ') ; | 435 | while (*++sp == ' ') ; |
diff --git a/scripts/kconfig/lxdialog/yesno.c b/scripts/kconfig/lxdialog/yesno.c index 4e6e8090c20b..676fb2f824a3 100644 --- a/scripts/kconfig/lxdialog/yesno.c +++ b/scripts/kconfig/lxdialog/yesno.c | |||
@@ -45,14 +45,14 @@ int dialog_yesno(const char *title, const char *prompt, int height, int width) | |||
45 | WINDOW *dialog; | 45 | WINDOW *dialog; |
46 | 46 | ||
47 | do_resize: | 47 | do_resize: |
48 | if (getmaxy(stdscr) < (height + 4)) | 48 | if (getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN)) |
49 | return -ERRDISPLAYTOOSMALL; | 49 | return -ERRDISPLAYTOOSMALL; |
50 | if (getmaxx(stdscr) < (width + 4)) | 50 | if (getmaxx(stdscr) < (width + YESNO_WIDTH_MIN)) |
51 | return -ERRDISPLAYTOOSMALL; | 51 | return -ERRDISPLAYTOOSMALL; |
52 | 52 | ||
53 | /* center dialog box on screen */ | 53 | /* center dialog box on screen */ |
54 | x = (COLS - width) / 2; | 54 | x = (getmaxx(stdscr) - width) / 2; |
55 | y = (LINES - height) / 2; | 55 | y = (getmaxy(stdscr) - height) / 2; |
56 | 56 | ||
57 | draw_shadow(stdscr, y, x, height, width); | 57 | draw_shadow(stdscr, y, x, height, width); |
58 | 58 | ||
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index a69cbd78fb38..6c9c45f9fbba 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c | |||
@@ -48,7 +48,7 @@ static const char mconf_readme[] = N_( | |||
48 | "----------\n" | 48 | "----------\n" |
49 | "o Use the Up/Down arrow keys (cursor keys) to highlight the item\n" | 49 | "o Use the Up/Down arrow keys (cursor keys) to highlight the item\n" |
50 | " you wish to change or submenu wish to select and press <Enter>.\n" | 50 | " you wish to change or submenu wish to select and press <Enter>.\n" |
51 | " Submenus are designated by \"--->\".\n" | 51 | " Submenus are designated by \"--->\", empty ones by \"----\".\n" |
52 | "\n" | 52 | "\n" |
53 | " Shortcut: Press the option's highlighted letter (hotkey).\n" | 53 | " Shortcut: Press the option's highlighted letter (hotkey).\n" |
54 | " Pressing a hotkey more than once will sequence\n" | 54 | " Pressing a hotkey more than once will sequence\n" |
@@ -176,7 +176,7 @@ static const char mconf_readme[] = N_( | |||
176 | "\n"), | 176 | "\n"), |
177 | menu_instructions[] = N_( | 177 | menu_instructions[] = N_( |
178 | "Arrow keys navigate the menu. " | 178 | "Arrow keys navigate the menu. " |
179 | "<Enter> selects submenus --->. " | 179 | "<Enter> selects submenus ---> (or empty submenus ----). " |
180 | "Highlighted letters are hotkeys. " | 180 | "Highlighted letters are hotkeys. " |
181 | "Pressing <Y> includes, <N> excludes, <M> modularizes features. " | 181 | "Pressing <Y> includes, <N> excludes, <M> modularizes features. " |
182 | "Press <Esc><Esc> to exit, <?> for Help, </> for Search. " | 182 | "Press <Esc><Esc> to exit, <?> for Help, </> for Search. " |
@@ -401,7 +401,7 @@ static void search_conf(void) | |||
401 | struct subtitle_part stpart; | 401 | struct subtitle_part stpart; |
402 | 402 | ||
403 | title = str_new(); | 403 | title = str_new(); |
404 | str_printf( &title, _("Enter %s (sub)string to search for " | 404 | str_printf( &title, _("Enter %s (sub)string or regexp to search for " |
405 | "(with or without \"%s\")"), CONFIG_, CONFIG_); | 405 | "(with or without \"%s\")"), CONFIG_, CONFIG_); |
406 | 406 | ||
407 | again: | 407 | again: |
@@ -498,8 +498,9 @@ static void build_conf(struct menu *menu) | |||
498 | menu->data ? "-->" : "++>", | 498 | menu->data ? "-->" : "++>", |
499 | indent + 1, ' ', prompt); | 499 | indent + 1, ' ', prompt); |
500 | } else | 500 | } else |
501 | item_make(" %*c%s --->", indent + 1, ' ', prompt); | 501 | item_make(" %*c%s %s", |
502 | 502 | indent + 1, ' ', prompt, | |
503 | menu_is_empty(menu) ? "----" : "--->"); | ||
503 | item_set_tag('m'); | 504 | item_set_tag('m'); |
504 | item_set_data(menu); | 505 | item_set_data(menu); |
505 | if (single_menu_mode && menu->data) | 506 | if (single_menu_mode && menu->data) |
@@ -630,7 +631,7 @@ static void build_conf(struct menu *menu) | |||
630 | (sym_has_value(sym) || !sym_is_changable(sym)) ? | 631 | (sym_has_value(sym) || !sym_is_changable(sym)) ? |
631 | "" : _(" (NEW)")); | 632 | "" : _(" (NEW)")); |
632 | if (menu->prompt->type == P_MENU) { | 633 | if (menu->prompt->type == P_MENU) { |
633 | item_add_str(" --->"); | 634 | item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); |
634 | return; | 635 | return; |
635 | } | 636 | } |
636 | } | 637 | } |
@@ -826,7 +827,9 @@ static void conf_choice(struct menu *menu) | |||
826 | dialog_clear(); | 827 | dialog_clear(); |
827 | res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"), | 828 | res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"), |
828 | _(radiolist_instructions), | 829 | _(radiolist_instructions), |
829 | 15, 70, 6); | 830 | MENUBOX_HEIGTH_MIN, |
831 | MENUBOX_WIDTH_MIN, | ||
832 | CHECKLIST_HEIGTH_MIN); | ||
830 | selected = item_activate_selected(); | 833 | selected = item_activate_selected(); |
831 | switch (res) { | 834 | switch (res) { |
832 | case 0: | 835 | case 0: |
@@ -957,8 +960,8 @@ static int handle_exit(void) | |||
957 | dialog_clear(); | 960 | dialog_clear(); |
958 | if (conf_get_changed()) | 961 | if (conf_get_changed()) |
959 | res = dialog_yesno(NULL, | 962 | res = dialog_yesno(NULL, |
960 | _("Do you wish to save your new configuration ?\n" | 963 | _("Do you wish to save your new configuration?\n" |
961 | "<ESC><ESC> to continue."), | 964 | "(Press <ESC><ESC> to continue kernel configuration.)"), |
962 | 6, 60); | 965 | 6, 60); |
963 | else | 966 | else |
964 | res = -1; | 967 | res = -1; |
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index fd3f0180e08f..7e233a6ca64e 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c | |||
@@ -443,6 +443,22 @@ bool menu_has_prompt(struct menu *menu) | |||
443 | return true; | 443 | return true; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* | ||
447 | * Determine if a menu is empty. | ||
448 | * A menu is considered empty if it contains no or only | ||
449 | * invisible entries. | ||
450 | */ | ||
451 | bool menu_is_empty(struct menu *menu) | ||
452 | { | ||
453 | struct menu *child; | ||
454 | |||
455 | for (child = menu->list; child; child = child->next) { | ||
456 | if (menu_is_visible(child)) | ||
457 | return(false); | ||
458 | } | ||
459 | return(true); | ||
460 | } | ||
461 | |||
446 | bool menu_is_visible(struct menu *menu) | 462 | bool menu_is_visible(struct menu *menu) |
447 | { | 463 | { |
448 | struct menu *child; | 464 | struct menu *child; |
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index dbf31edd22b2..7975d8d258c3 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c | |||
@@ -45,8 +45,8 @@ static const char nconf_global_help[] = N_( | |||
45 | "<n> to remove it. You may press the <Space> key to cycle through the\n" | 45 | "<n> to remove it. You may press the <Space> key to cycle through the\n" |
46 | "available options.\n" | 46 | "available options.\n" |
47 | "\n" | 47 | "\n" |
48 | "A trailing \"--->\" designates a submenu.\n" | 48 | "A trailing \"--->\" designates a submenu, a trailing \"----\" an\n" |
49 | "\n" | 49 | "empty submenu.\n" |
50 | "\n" | 50 | "\n" |
51 | "Menu navigation keys\n" | 51 | "Menu navigation keys\n" |
52 | "----------------------------------------------------------------------\n" | 52 | "----------------------------------------------------------------------\n" |
@@ -131,7 +131,7 @@ static const char nconf_global_help[] = N_( | |||
131 | "\n"), | 131 | "\n"), |
132 | menu_no_f_instructions[] = N_( | 132 | menu_no_f_instructions[] = N_( |
133 | "Legend: [*] built-in [ ] excluded <M> module < > module capable.\n" | 133 | "Legend: [*] built-in [ ] excluded <M> module < > module capable.\n" |
134 | "Submenus are designated by a trailing \"--->\".\n" | 134 | "Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n" |
135 | "\n" | 135 | "\n" |
136 | "Use the following keys to navigate the menus:\n" | 136 | "Use the following keys to navigate the menus:\n" |
137 | "Move up or down with <Up> and <Down>.\n" | 137 | "Move up or down with <Up> and <Down>.\n" |
@@ -148,7 +148,7 @@ menu_no_f_instructions[] = N_( | |||
148 | "For help related to the current menu entry press <?> or <h>.\n"), | 148 | "For help related to the current menu entry press <?> or <h>.\n"), |
149 | menu_instructions[] = N_( | 149 | menu_instructions[] = N_( |
150 | "Legend: [*] built-in [ ] excluded <M> module < > module capable.\n" | 150 | "Legend: [*] built-in [ ] excluded <M> module < > module capable.\n" |
151 | "Submenus are designated by a trailing \"--->\".\n" | 151 | "Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n" |
152 | "\n" | 152 | "\n" |
153 | "Use the following keys to navigate the menus:\n" | 153 | "Use the following keys to navigate the menus:\n" |
154 | "Move up or down with <Up> or <Down>.\n" | 154 | "Move up or down with <Up> or <Down>.\n" |
@@ -365,15 +365,16 @@ static void print_function_line(void) | |||
365 | int i; | 365 | int i; |
366 | int offset = 1; | 366 | int offset = 1; |
367 | const int skip = 1; | 367 | const int skip = 1; |
368 | int lines = getmaxy(stdscr); | ||
368 | 369 | ||
369 | for (i = 0; i < function_keys_num; i++) { | 370 | for (i = 0; i < function_keys_num; i++) { |
370 | (void) wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]); | 371 | (void) wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]); |
371 | mvwprintw(main_window, LINES-3, offset, | 372 | mvwprintw(main_window, lines-3, offset, |
372 | "%s", | 373 | "%s", |
373 | function_keys[i].key_str); | 374 | function_keys[i].key_str); |
374 | (void) wattrset(main_window, attributes[FUNCTION_TEXT]); | 375 | (void) wattrset(main_window, attributes[FUNCTION_TEXT]); |
375 | offset += strlen(function_keys[i].key_str); | 376 | offset += strlen(function_keys[i].key_str); |
376 | mvwprintw(main_window, LINES-3, | 377 | mvwprintw(main_window, lines-3, |
377 | offset, "%s", | 378 | offset, "%s", |
378 | function_keys[i].func); | 379 | function_keys[i].func); |
379 | offset += strlen(function_keys[i].func) + skip; | 380 | offset += strlen(function_keys[i].func) + skip; |
@@ -694,7 +695,7 @@ static void search_conf(void) | |||
694 | int dres; | 695 | int dres; |
695 | 696 | ||
696 | title = str_new(); | 697 | title = str_new(); |
697 | str_printf( &title, _("Enter %s (sub)string to search for " | 698 | str_printf( &title, _("Enter %s (sub)string or regexp to search for " |
698 | "(with or without \"%s\")"), CONFIG_, CONFIG_); | 699 | "(with or without \"%s\")"), CONFIG_, CONFIG_); |
699 | 700 | ||
700 | again: | 701 | again: |
@@ -759,9 +760,9 @@ static void build_conf(struct menu *menu) | |||
759 | indent + 1, ' ', prompt); | 760 | indent + 1, ' ', prompt); |
760 | } else | 761 | } else |
761 | item_make(menu, 'm', | 762 | item_make(menu, 'm', |
762 | " %*c%s --->", | 763 | " %*c%s %s", |
763 | indent + 1, | 764 | indent + 1, ' ', prompt, |
764 | ' ', prompt); | 765 | menu_is_empty(menu) ? "----" : "--->"); |
765 | 766 | ||
766 | if (single_menu_mode && menu->data) | 767 | if (single_menu_mode && menu->data) |
767 | goto conf_childs; | 768 | goto conf_childs; |
@@ -903,7 +904,7 @@ static void build_conf(struct menu *menu) | |||
903 | (sym_has_value(sym) || !sym_is_changable(sym)) ? | 904 | (sym_has_value(sym) || !sym_is_changable(sym)) ? |
904 | "" : _(" (NEW)")); | 905 | "" : _(" (NEW)")); |
905 | if (menu->prompt && menu->prompt->type == P_MENU) { | 906 | if (menu->prompt && menu->prompt->type == P_MENU) { |
906 | item_add_str(" --->"); | 907 | item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); |
907 | return; | 908 | return; |
908 | } | 909 | } |
909 | } | 910 | } |
@@ -954,7 +955,7 @@ static void show_menu(const char *prompt, const char *instructions, | |||
954 | 955 | ||
955 | clear(); | 956 | clear(); |
956 | (void) wattrset(main_window, attributes[NORMAL]); | 957 | (void) wattrset(main_window, attributes[NORMAL]); |
957 | print_in_middle(stdscr, 1, 0, COLS, | 958 | print_in_middle(stdscr, 1, 0, getmaxx(stdscr), |
958 | menu_backtitle, | 959 | menu_backtitle, |
959 | attributes[MAIN_HEADING]); | 960 | attributes[MAIN_HEADING]); |
960 | 961 | ||
@@ -1455,14 +1456,18 @@ static void conf_save(void) | |||
1455 | 1456 | ||
1456 | void setup_windows(void) | 1457 | void setup_windows(void) |
1457 | { | 1458 | { |
1459 | int lines, columns; | ||
1460 | |||
1461 | getmaxyx(stdscr, lines, columns); | ||
1462 | |||
1458 | if (main_window != NULL) | 1463 | if (main_window != NULL) |
1459 | delwin(main_window); | 1464 | delwin(main_window); |
1460 | 1465 | ||
1461 | /* set up the menu and menu window */ | 1466 | /* set up the menu and menu window */ |
1462 | main_window = newwin(LINES-2, COLS-2, 2, 1); | 1467 | main_window = newwin(lines-2, columns-2, 2, 1); |
1463 | keypad(main_window, TRUE); | 1468 | keypad(main_window, TRUE); |
1464 | mwin_max_lines = LINES-7; | 1469 | mwin_max_lines = lines-7; |
1465 | mwin_max_cols = COLS-6; | 1470 | mwin_max_cols = columns-6; |
1466 | 1471 | ||
1467 | /* panels order is from bottom to top */ | 1472 | /* panels order is from bottom to top */ |
1468 | new_panel(main_window); | 1473 | new_panel(main_window); |
@@ -1470,6 +1475,7 @@ void setup_windows(void) | |||
1470 | 1475 | ||
1471 | int main(int ac, char **av) | 1476 | int main(int ac, char **av) |
1472 | { | 1477 | { |
1478 | int lines, columns; | ||
1473 | char *mode; | 1479 | char *mode; |
1474 | 1480 | ||
1475 | setlocale(LC_ALL, ""); | 1481 | setlocale(LC_ALL, ""); |
@@ -1495,7 +1501,8 @@ int main(int ac, char **av) | |||
1495 | keypad(stdscr, TRUE); | 1501 | keypad(stdscr, TRUE); |
1496 | curs_set(0); | 1502 | curs_set(0); |
1497 | 1503 | ||
1498 | if (COLS < 75 || LINES < 20) { | 1504 | getmaxyx(stdscr, lines, columns); |
1505 | if (columns < 75 || lines < 20) { | ||
1499 | endwin(); | 1506 | endwin(); |
1500 | printf("Your terminal should have at " | 1507 | printf("Your terminal should have at " |
1501 | "least 20 lines and 75 columns\n"); | 1508 | "least 20 lines and 75 columns\n"); |
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 9f8c44ecc703..8275f0e55106 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c | |||
@@ -276,8 +276,8 @@ int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...) | |||
276 | 276 | ||
277 | total_width = max(msg_width, btns_width); | 277 | total_width = max(msg_width, btns_width); |
278 | /* place dialog in middle of screen */ | 278 | /* place dialog in middle of screen */ |
279 | y = (LINES-(msg_lines+4))/2; | 279 | y = (getmaxy(stdscr)-(msg_lines+4))/2; |
280 | x = (COLS-(total_width+4))/2; | 280 | x = (getmaxx(stdscr)-(total_width+4))/2; |
281 | 281 | ||
282 | 282 | ||
283 | /* create the windows */ | 283 | /* create the windows */ |
@@ -387,8 +387,8 @@ int dialog_inputbox(WINDOW *main_window, | |||
387 | prompt_width = max(prompt_width, strlen(title)); | 387 | prompt_width = max(prompt_width, strlen(title)); |
388 | 388 | ||
389 | /* place dialog in middle of screen */ | 389 | /* place dialog in middle of screen */ |
390 | y = (LINES-(prompt_lines+4))/2; | 390 | y = (getmaxy(stdscr)-(prompt_lines+4))/2; |
391 | x = (COLS-(prompt_width+4))/2; | 391 | x = (getmaxx(stdscr)-(prompt_width+4))/2; |
392 | 392 | ||
393 | strncpy(result, init, *result_len); | 393 | strncpy(result, init, *result_len); |
394 | 394 | ||
@@ -545,7 +545,7 @@ void show_scroll_win(WINDOW *main_window, | |||
545 | { | 545 | { |
546 | int res; | 546 | int res; |
547 | int total_lines = get_line_no(text); | 547 | int total_lines = get_line_no(text); |
548 | int x, y; | 548 | int x, y, lines, columns; |
549 | int start_x = 0, start_y = 0; | 549 | int start_x = 0, start_y = 0; |
550 | int text_lines = 0, text_cols = 0; | 550 | int text_lines = 0, text_cols = 0; |
551 | int total_cols = 0; | 551 | int total_cols = 0; |
@@ -556,6 +556,8 @@ void show_scroll_win(WINDOW *main_window, | |||
556 | WINDOW *pad; | 556 | WINDOW *pad; |
557 | PANEL *panel; | 557 | PANEL *panel; |
558 | 558 | ||
559 | getmaxyx(stdscr, lines, columns); | ||
560 | |||
559 | /* find the widest line of msg: */ | 561 | /* find the widest line of msg: */ |
560 | total_lines = get_line_no(text); | 562 | total_lines = get_line_no(text); |
561 | for (i = 0; i < total_lines; i++) { | 563 | for (i = 0; i < total_lines; i++) { |
@@ -569,14 +571,14 @@ void show_scroll_win(WINDOW *main_window, | |||
569 | (void) wattrset(pad, attributes[SCROLLWIN_TEXT]); | 571 | (void) wattrset(pad, attributes[SCROLLWIN_TEXT]); |
570 | fill_window(pad, text); | 572 | fill_window(pad, text); |
571 | 573 | ||
572 | win_lines = min(total_lines+4, LINES-2); | 574 | win_lines = min(total_lines+4, lines-2); |
573 | win_cols = min(total_cols+2, COLS-2); | 575 | win_cols = min(total_cols+2, columns-2); |
574 | text_lines = max(win_lines-4, 0); | 576 | text_lines = max(win_lines-4, 0); |
575 | text_cols = max(win_cols-2, 0); | 577 | text_cols = max(win_cols-2, 0); |
576 | 578 | ||
577 | /* place window in middle of screen */ | 579 | /* place window in middle of screen */ |
578 | y = (LINES-win_lines)/2; | 580 | y = (lines-win_lines)/2; |
579 | x = (COLS-win_cols)/2; | 581 | x = (columns-win_cols)/2; |
580 | 582 | ||
581 | win = newwin(win_lines, win_cols, y, x); | 583 | win = newwin(win_lines, win_cols, y, x); |
582 | keypad(win, TRUE); | 584 | keypad(win, TRUE); |
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index ecc5aa5f865d..d550300ec00c 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c | |||
@@ -136,7 +136,7 @@ static struct property *sym_get_range_prop(struct symbol *sym) | |||
136 | return NULL; | 136 | return NULL; |
137 | } | 137 | } |
138 | 138 | ||
139 | static int sym_get_range_val(struct symbol *sym, int base) | 139 | static long sym_get_range_val(struct symbol *sym, int base) |
140 | { | 140 | { |
141 | sym_calc_value(sym); | 141 | sym_calc_value(sym); |
142 | switch (sym->type) { | 142 | switch (sym->type) { |
@@ -155,7 +155,7 @@ static int sym_get_range_val(struct symbol *sym, int base) | |||
155 | static void sym_validate_range(struct symbol *sym) | 155 | static void sym_validate_range(struct symbol *sym) |
156 | { | 156 | { |
157 | struct property *prop; | 157 | struct property *prop; |
158 | int base, val, val2; | 158 | long base, val, val2; |
159 | char str[64]; | 159 | char str[64]; |
160 | 160 | ||
161 | switch (sym->type) { | 161 | switch (sym->type) { |
@@ -179,9 +179,9 @@ static void sym_validate_range(struct symbol *sym) | |||
179 | return; | 179 | return; |
180 | } | 180 | } |
181 | if (sym->type == S_INT) | 181 | if (sym->type == S_INT) |
182 | sprintf(str, "%d", val2); | 182 | sprintf(str, "%ld", val2); |
183 | else | 183 | else |
184 | sprintf(str, "0x%x", val2); | 184 | sprintf(str, "0x%lx", val2); |
185 | sym->curr.val = strdup(str); | 185 | sym->curr.val = strdup(str); |
186 | } | 186 | } |
187 | 187 | ||
@@ -300,6 +300,14 @@ void sym_calc_value(struct symbol *sym) | |||
300 | 300 | ||
301 | if (sym->flags & SYMBOL_VALID) | 301 | if (sym->flags & SYMBOL_VALID) |
302 | return; | 302 | return; |
303 | |||
304 | if (sym_is_choice_value(sym) && | ||
305 | sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) { | ||
306 | sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES; | ||
307 | prop = sym_get_choice_prop(sym); | ||
308 | sym_calc_value(prop_get_symbol(prop)); | ||
309 | } | ||
310 | |||
303 | sym->flags |= SYMBOL_VALID; | 311 | sym->flags |= SYMBOL_VALID; |
304 | 312 | ||
305 | oldval = sym->curr; | 313 | oldval = sym->curr; |
@@ -425,6 +433,9 @@ void sym_calc_value(struct symbol *sym) | |||
425 | 433 | ||
426 | if (sym->flags & SYMBOL_AUTO) | 434 | if (sym->flags & SYMBOL_AUTO) |
427 | sym->flags &= ~SYMBOL_WRITE; | 435 | sym->flags &= ~SYMBOL_WRITE; |
436 | |||
437 | if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) | ||
438 | set_all_choice_values(sym); | ||
428 | } | 439 | } |
429 | 440 | ||
430 | void sym_clear_all_valid(void) | 441 | void sym_clear_all_valid(void) |
@@ -583,7 +594,7 @@ bool sym_string_valid(struct symbol *sym, const char *str) | |||
583 | bool sym_string_within_range(struct symbol *sym, const char *str) | 594 | bool sym_string_within_range(struct symbol *sym, const char *str) |
584 | { | 595 | { |
585 | struct property *prop; | 596 | struct property *prop; |
586 | int val; | 597 | long val; |
587 | 598 | ||
588 | switch (sym->type) { | 599 | switch (sym->type) { |
589 | case S_STRING: | 600 | case S_STRING: |
@@ -943,38 +954,98 @@ const char *sym_escape_string_value(const char *in) | |||
943 | return res; | 954 | return res; |
944 | } | 955 | } |
945 | 956 | ||
957 | struct sym_match { | ||
958 | struct symbol *sym; | ||
959 | off_t so, eo; | ||
960 | }; | ||
961 | |||
962 | /* Compare matched symbols as thus: | ||
963 | * - first, symbols that match exactly | ||
964 | * - then, alphabetical sort | ||
965 | */ | ||
966 | static int sym_rel_comp( const void *sym1, const void *sym2 ) | ||
967 | { | ||
968 | struct sym_match *s1 = *(struct sym_match **)sym1; | ||
969 | struct sym_match *s2 = *(struct sym_match **)sym2; | ||
970 | int l1, l2; | ||
971 | |||
972 | /* Exact match: | ||
973 | * - if matched length on symbol s1 is the length of that symbol, | ||
974 | * then this symbol should come first; | ||
975 | * - if matched length on symbol s2 is the length of that symbol, | ||
976 | * then this symbol should come first. | ||
977 | * Note: since the search can be a regexp, both symbols may match | ||
978 | * exactly; if this is the case, we can't decide which comes first, | ||
979 | * and we fallback to sorting alphabetically. | ||
980 | */ | ||
981 | l1 = s1->eo - s1->so; | ||
982 | l2 = s2->eo - s2->so; | ||
983 | if (l1 == strlen(s1->sym->name) && l2 != strlen(s2->sym->name)) | ||
984 | return -1; | ||
985 | if (l1 != strlen(s1->sym->name) && l2 == strlen(s2->sym->name)) | ||
986 | return 1; | ||
987 | |||
988 | /* As a fallback, sort symbols alphabetically */ | ||
989 | return strcmp(s1->sym->name, s2->sym->name); | ||
990 | } | ||
991 | |||
946 | struct symbol **sym_re_search(const char *pattern) | 992 | struct symbol **sym_re_search(const char *pattern) |
947 | { | 993 | { |
948 | struct symbol *sym, **sym_arr = NULL; | 994 | struct symbol *sym, **sym_arr = NULL; |
995 | struct sym_match **sym_match_arr = NULL; | ||
949 | int i, cnt, size; | 996 | int i, cnt, size; |
950 | regex_t re; | 997 | regex_t re; |
998 | regmatch_t match[1]; | ||
951 | 999 | ||
952 | cnt = size = 0; | 1000 | cnt = size = 0; |
953 | /* Skip if empty */ | 1001 | /* Skip if empty */ |
954 | if (strlen(pattern) == 0) | 1002 | if (strlen(pattern) == 0) |
955 | return NULL; | 1003 | return NULL; |
956 | if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB|REG_ICASE)) | 1004 | if (regcomp(&re, pattern, REG_EXTENDED|REG_ICASE)) |
957 | return NULL; | 1005 | return NULL; |
958 | 1006 | ||
959 | for_all_symbols(i, sym) { | 1007 | for_all_symbols(i, sym) { |
1008 | struct sym_match *tmp_sym_match; | ||
960 | if (sym->flags & SYMBOL_CONST || !sym->name) | 1009 | if (sym->flags & SYMBOL_CONST || !sym->name) |
961 | continue; | 1010 | continue; |
962 | if (regexec(&re, sym->name, 0, NULL, 0)) | 1011 | if (regexec(&re, sym->name, 1, match, 0)) |
963 | continue; | 1012 | continue; |
964 | if (cnt + 1 >= size) { | 1013 | if (cnt + 1 >= size) { |
965 | void *tmp = sym_arr; | 1014 | void *tmp; |
966 | size += 16; | 1015 | size += 16; |
967 | sym_arr = realloc(sym_arr, size * sizeof(struct symbol *)); | 1016 | tmp = realloc(sym_match_arr, size * sizeof(struct sym_match *)); |
968 | if (!sym_arr) { | 1017 | if (!tmp) { |
969 | free(tmp); | 1018 | goto sym_re_search_free; |
970 | return NULL; | ||
971 | } | 1019 | } |
1020 | sym_match_arr = tmp; | ||
972 | } | 1021 | } |
973 | sym_calc_value(sym); | 1022 | sym_calc_value(sym); |
974 | sym_arr[cnt++] = sym; | 1023 | tmp_sym_match = (struct sym_match*)malloc(sizeof(struct sym_match)); |
1024 | if (!tmp_sym_match) | ||
1025 | goto sym_re_search_free; | ||
1026 | tmp_sym_match->sym = sym; | ||
1027 | /* As regexec return 0, we know we have a match, so | ||
1028 | * we can use match[0].rm_[se]o without further checks | ||
1029 | */ | ||
1030 | tmp_sym_match->so = match[0].rm_so; | ||
1031 | tmp_sym_match->eo = match[0].rm_eo; | ||
1032 | sym_match_arr[cnt++] = tmp_sym_match; | ||
975 | } | 1033 | } |
976 | if (sym_arr) | 1034 | if (sym_match_arr) { |
1035 | qsort(sym_match_arr, cnt, sizeof(struct sym_match*), sym_rel_comp); | ||
1036 | sym_arr = malloc((cnt+1) * sizeof(struct symbol)); | ||
1037 | if (!sym_arr) | ||
1038 | goto sym_re_search_free; | ||
1039 | for (i = 0; i < cnt; i++) | ||
1040 | sym_arr[i] = sym_match_arr[i]->sym; | ||
977 | sym_arr[cnt] = NULL; | 1041 | sym_arr[cnt] = NULL; |
1042 | } | ||
1043 | sym_re_search_free: | ||
1044 | if (sym_match_arr) { | ||
1045 | for (i = 0; i < cnt; i++) | ||
1046 | free(sym_match_arr[i]); | ||
1047 | free(sym_match_arr); | ||
1048 | } | ||
978 | regfree(&re); | 1049 | regfree(&re); |
979 | 1050 | ||
980 | return sym_arr; | 1051 | return sym_arr; |
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile index 75d59fcd48b8..c11212ff3510 100644 --- a/scripts/mod/Makefile +++ b/scripts/mod/Makefile | |||
@@ -15,8 +15,8 @@ endef | |||
15 | quiet_cmd_offsets = GEN $@ | 15 | quiet_cmd_offsets = GEN $@ |
16 | define cmd_offsets | 16 | define cmd_offsets |
17 | (set -e; \ | 17 | (set -e; \ |
18 | echo "#ifndef __DEVICEVTABLE_OFFSETS_H__"; \ | 18 | echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \ |
19 | echo "#define __DEVICEVTABLE_OFFSETS_H__"; \ | 19 | echo "#define __DEVICETABLE_OFFSETS_H__"; \ |
20 | echo "/*"; \ | 20 | echo "/*"; \ |
21 | echo " * DO NOT MODIFY."; \ | 21 | echo " * DO NOT MODIFY."; \ |
22 | echo " *"; \ | 22 | echo " *"; \ |
@@ -29,15 +29,10 @@ define cmd_offsets | |||
29 | echo "#endif" ) > $@ | 29 | echo "#endif" ) > $@ |
30 | endef | 30 | endef |
31 | 31 | ||
32 | # We use internal kbuild rules to avoid the "is up to date" message from make | 32 | $(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s |
33 | scripts/mod/devicetable-offsets.s: scripts/mod/devicetable-offsets.c FORCE | 33 | $(call if_changed,offsets) |
34 | $(Q)mkdir -p $(dir $@) | ||
35 | $(call if_changed_dep,cc_s_c) | ||
36 | 34 | ||
37 | $(obj)/$(devicetable-offsets-file): scripts/mod/devicetable-offsets.s | 35 | targets += $(devicetable-offsets-file) devicetable-offsets.s |
38 | $(call cmd,offsets) | ||
39 | |||
40 | targets += $(devicetable-offsets-file) | ||
41 | 36 | ||
42 | # dependencies on generated files need to be listed explicitly | 37 | # dependencies on generated files need to be listed explicitly |
43 | 38 | ||
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index d9e67b719f08..23708636b05c 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
@@ -79,10 +79,12 @@ struct devtable **__start___devtable, **__stop___devtable; | |||
79 | extern struct devtable *__start___devtable[], *__stop___devtable[]; | 79 | extern struct devtable *__start___devtable[], *__stop___devtable[]; |
80 | #endif /* __MACH__ */ | 80 | #endif /* __MACH__ */ |
81 | 81 | ||
82 | #if __GNUC__ == 3 && __GNUC_MINOR__ < 3 | 82 | #if !defined(__used) |
83 | # define __used __attribute__((__unused__)) | 83 | # if __GNUC__ == 3 && __GNUC_MINOR__ < 3 |
84 | #else | 84 | # define __used __attribute__((__unused__)) |
85 | # define __used __attribute__((__used__)) | 85 | # else |
86 | # define __used __attribute__((__used__)) | ||
87 | # endif | ||
86 | #endif | 88 | #endif |
87 | 89 | ||
88 | /* Define a variable f that holds the value of field f of struct devid | 90 | /* Define a variable f that holds the value of field f of struct devid |
diff --git a/scripts/package/mkspec b/scripts/package/mkspec index fbbfd08853d3..fdd3fbf4d4a4 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec | |||
@@ -74,6 +74,7 @@ echo "" | |||
74 | fi | 74 | fi |
75 | 75 | ||
76 | echo "%install" | 76 | echo "%install" |
77 | echo 'KBUILD_IMAGE=$(make image_name)' | ||
77 | echo "%ifarch ia64" | 78 | echo "%ifarch ia64" |
78 | echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules' | 79 | echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules' |
79 | echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware' | 80 | echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware' |
diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 84b88f109b80..d105a44b68f6 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion | |||
@@ -71,9 +71,6 @@ scm_version() | |||
71 | printf -- '-svn%s' "`git svn find-rev $head`" | 71 | printf -- '-svn%s' "`git svn find-rev $head`" |
72 | fi | 72 | fi |
73 | 73 | ||
74 | # Update index only on r/w media | ||
75 | [ -w . ] && git update-index --refresh --unmerged > /dev/null | ||
76 | |||
77 | # Check for uncommitted changes | 74 | # Check for uncommitted changes |
78 | if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then | 75 | if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then |
79 | printf '%s' -dirty | 76 | printf '%s' -dirty |
diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h index f4912e2668ba..84c17d836578 100644 --- a/tools/include/tools/be_byteshift.h +++ b/tools/include/tools/be_byteshift.h | |||
@@ -1,68 +1,68 @@ | |||
1 | #ifndef _TOOLS_BE_BYTESHIFT_H | 1 | #ifndef _TOOLS_BE_BYTESHIFT_H |
2 | #define _TOOLS_BE_BYTESHIFT_H | 2 | #define _TOOLS_BE_BYTESHIFT_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <stdint.h> |
5 | 5 | ||
6 | static inline __u16 __get_unaligned_be16(const __u8 *p) | 6 | static inline uint16_t __get_unaligned_be16(const uint8_t *p) |
7 | { | 7 | { |
8 | return p[0] << 8 | p[1]; | 8 | return p[0] << 8 | p[1]; |
9 | } | 9 | } |
10 | 10 | ||
11 | static inline __u32 __get_unaligned_be32(const __u8 *p) | 11 | static inline uint32_t __get_unaligned_be32(const uint8_t *p) |
12 | { | 12 | { |
13 | return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; | 13 | return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; |
14 | } | 14 | } |
15 | 15 | ||
16 | static inline __u64 __get_unaligned_be64(const __u8 *p) | 16 | static inline uint64_t __get_unaligned_be64(const uint8_t *p) |
17 | { | 17 | { |
18 | return (__u64)__get_unaligned_be32(p) << 32 | | 18 | return (uint64_t)__get_unaligned_be32(p) << 32 | |
19 | __get_unaligned_be32(p + 4); | 19 | __get_unaligned_be32(p + 4); |
20 | } | 20 | } |
21 | 21 | ||
22 | static inline void __put_unaligned_be16(__u16 val, __u8 *p) | 22 | static inline void __put_unaligned_be16(uint16_t val, uint8_t *p) |
23 | { | 23 | { |
24 | *p++ = val >> 8; | 24 | *p++ = val >> 8; |
25 | *p++ = val; | 25 | *p++ = val; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void __put_unaligned_be32(__u32 val, __u8 *p) | 28 | static inline void __put_unaligned_be32(uint32_t val, uint8_t *p) |
29 | { | 29 | { |
30 | __put_unaligned_be16(val >> 16, p); | 30 | __put_unaligned_be16(val >> 16, p); |
31 | __put_unaligned_be16(val, p + 2); | 31 | __put_unaligned_be16(val, p + 2); |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline void __put_unaligned_be64(__u64 val, __u8 *p) | 34 | static inline void __put_unaligned_be64(uint64_t val, uint8_t *p) |
35 | { | 35 | { |
36 | __put_unaligned_be32(val >> 32, p); | 36 | __put_unaligned_be32(val >> 32, p); |
37 | __put_unaligned_be32(val, p + 4); | 37 | __put_unaligned_be32(val, p + 4); |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline __u16 get_unaligned_be16(const void *p) | 40 | static inline uint16_t get_unaligned_be16(const void *p) |
41 | { | 41 | { |
42 | return __get_unaligned_be16((const __u8 *)p); | 42 | return __get_unaligned_be16((const uint8_t *)p); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline __u32 get_unaligned_be32(const void *p) | 45 | static inline uint32_t get_unaligned_be32(const void *p) |
46 | { | 46 | { |
47 | return __get_unaligned_be32((const __u8 *)p); | 47 | return __get_unaligned_be32((const uint8_t *)p); |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline __u64 get_unaligned_be64(const void *p) | 50 | static inline uint64_t get_unaligned_be64(const void *p) |
51 | { | 51 | { |
52 | return __get_unaligned_be64((const __u8 *)p); | 52 | return __get_unaligned_be64((const uint8_t *)p); |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline void put_unaligned_be16(__u16 val, void *p) | 55 | static inline void put_unaligned_be16(uint16_t val, void *p) |
56 | { | 56 | { |
57 | __put_unaligned_be16(val, p); | 57 | __put_unaligned_be16(val, p); |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline void put_unaligned_be32(__u32 val, void *p) | 60 | static inline void put_unaligned_be32(uint32_t val, void *p) |
61 | { | 61 | { |
62 | __put_unaligned_be32(val, p); | 62 | __put_unaligned_be32(val, p); |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void put_unaligned_be64(__u64 val, void *p) | 65 | static inline void put_unaligned_be64(uint64_t val, void *p) |
66 | { | 66 | { |
67 | __put_unaligned_be64(val, p); | 67 | __put_unaligned_be64(val, p); |
68 | } | 68 | } |
diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h index c99d45a68bda..8fe9f2488ec7 100644 --- a/tools/include/tools/le_byteshift.h +++ b/tools/include/tools/le_byteshift.h | |||
@@ -1,68 +1,68 @@ | |||
1 | #ifndef _TOOLS_LE_BYTESHIFT_H | 1 | #ifndef _TOOLS_LE_BYTESHIFT_H |
2 | #define _TOOLS_LE_BYTESHIFT_H | 2 | #define _TOOLS_LE_BYTESHIFT_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <stdint.h> |
5 | 5 | ||
6 | static inline __u16 __get_unaligned_le16(const __u8 *p) | 6 | static inline uint16_t __get_unaligned_le16(const uint8_t *p) |
7 | { | 7 | { |
8 | return p[0] | p[1] << 8; | 8 | return p[0] | p[1] << 8; |
9 | } | 9 | } |
10 | 10 | ||
11 | static inline __u32 __get_unaligned_le32(const __u8 *p) | 11 | static inline uint32_t __get_unaligned_le32(const uint8_t *p) |
12 | { | 12 | { |
13 | return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; | 13 | return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; |
14 | } | 14 | } |
15 | 15 | ||
16 | static inline __u64 __get_unaligned_le64(const __u8 *p) | 16 | static inline uint64_t __get_unaligned_le64(const uint8_t *p) |
17 | { | 17 | { |
18 | return (__u64)__get_unaligned_le32(p + 4) << 32 | | 18 | return (uint64_t)__get_unaligned_le32(p + 4) << 32 | |
19 | __get_unaligned_le32(p); | 19 | __get_unaligned_le32(p); |
20 | } | 20 | } |
21 | 21 | ||
22 | static inline void __put_unaligned_le16(__u16 val, __u8 *p) | 22 | static inline void __put_unaligned_le16(uint16_t val, uint8_t *p) |
23 | { | 23 | { |
24 | *p++ = val; | 24 | *p++ = val; |
25 | *p++ = val >> 8; | 25 | *p++ = val >> 8; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void __put_unaligned_le32(__u32 val, __u8 *p) | 28 | static inline void __put_unaligned_le32(uint32_t val, uint8_t *p) |
29 | { | 29 | { |
30 | __put_unaligned_le16(val >> 16, p + 2); | 30 | __put_unaligned_le16(val >> 16, p + 2); |
31 | __put_unaligned_le16(val, p); | 31 | __put_unaligned_le16(val, p); |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline void __put_unaligned_le64(__u64 val, __u8 *p) | 34 | static inline void __put_unaligned_le64(uint64_t val, uint8_t *p) |
35 | { | 35 | { |
36 | __put_unaligned_le32(val >> 32, p + 4); | 36 | __put_unaligned_le32(val >> 32, p + 4); |
37 | __put_unaligned_le32(val, p); | 37 | __put_unaligned_le32(val, p); |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline __u16 get_unaligned_le16(const void *p) | 40 | static inline uint16_t get_unaligned_le16(const void *p) |
41 | { | 41 | { |
42 | return __get_unaligned_le16((const __u8 *)p); | 42 | return __get_unaligned_le16((const uint8_t *)p); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline __u32 get_unaligned_le32(const void *p) | 45 | static inline uint32_t get_unaligned_le32(const void *p) |
46 | { | 46 | { |
47 | return __get_unaligned_le32((const __u8 *)p); | 47 | return __get_unaligned_le32((const uint8_t *)p); |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline __u64 get_unaligned_le64(const void *p) | 50 | static inline uint64_t get_unaligned_le64(const void *p) |
51 | { | 51 | { |
52 | return __get_unaligned_le64((const __u8 *)p); | 52 | return __get_unaligned_le64((const uint8_t *)p); |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline void put_unaligned_le16(__u16 val, void *p) | 55 | static inline void put_unaligned_le16(uint16_t val, void *p) |
56 | { | 56 | { |
57 | __put_unaligned_le16(val, p); | 57 | __put_unaligned_le16(val, p); |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline void put_unaligned_le32(__u32 val, void *p) | 60 | static inline void put_unaligned_le32(uint32_t val, void *p) |
61 | { | 61 | { |
62 | __put_unaligned_le32(val, p); | 62 | __put_unaligned_le32(val, p); |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void put_unaligned_le64(__u64 val, void *p) | 65 | static inline void put_unaligned_le64(uint64_t val, void *p) |
66 | { | 66 | { |
67 | __put_unaligned_le64(val, p); | 67 | __put_unaligned_le64(val, p); |
68 | } | 68 | } |
diff --git a/tools/lguest/Makefile b/tools/lguest/Makefile index 0ac34206f7a7..97bca4871ea3 100644 --- a/tools/lguest/Makefile +++ b/tools/lguest/Makefile | |||
@@ -1,5 +1,4 @@ | |||
1 | # This creates the demonstration utility "lguest" which runs a Linux guest. | 1 | # This creates the demonstration utility "lguest" which runs a Linux guest. |
2 | # Missing headers? Add "-I../../../include -I../../../arch/x86/include" | ||
3 | CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE | 2 | CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE |
4 | 3 | ||
5 | all: lguest | 4 | all: lguest |
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 07a03452c227..68f67cf3d318 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c | |||
@@ -42,14 +42,6 @@ | |||
42 | #include <pwd.h> | 42 | #include <pwd.h> |
43 | #include <grp.h> | 43 | #include <grp.h> |
44 | 44 | ||
45 | #include <linux/virtio_config.h> | ||
46 | #include <linux/virtio_net.h> | ||
47 | #include <linux/virtio_blk.h> | ||
48 | #include <linux/virtio_console.h> | ||
49 | #include <linux/virtio_rng.h> | ||
50 | #include <linux/virtio_ring.h> | ||
51 | #include <asm/bootparam.h> | ||
52 | #include "../../include/linux/lguest_launcher.h" | ||
53 | /*L:110 | 45 | /*L:110 |
54 | * We can ignore the 43 include files we need for this program, but I do want | 46 | * We can ignore the 43 include files we need for this program, but I do want |
55 | * to draw attention to the use of kernel-style types. | 47 | * to draw attention to the use of kernel-style types. |
@@ -65,6 +57,15 @@ typedef uint16_t u16; | |||
65 | typedef uint8_t u8; | 57 | typedef uint8_t u8; |
66 | /*:*/ | 58 | /*:*/ |
67 | 59 | ||
60 | #include <linux/virtio_config.h> | ||
61 | #include <linux/virtio_net.h> | ||
62 | #include <linux/virtio_blk.h> | ||
63 | #include <linux/virtio_console.h> | ||
64 | #include <linux/virtio_rng.h> | ||
65 | #include <linux/virtio_ring.h> | ||
66 | #include <asm/bootparam.h> | ||
67 | #include "../../include/linux/lguest_launcher.h" | ||
68 | |||
68 | #define BRIDGE_PFX "bridge:" | 69 | #define BRIDGE_PFX "bridge:" |
69 | #ifndef SIOCBRADDIF | 70 | #ifndef SIOCBRADDIF |
70 | #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ | 71 | #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ |
@@ -177,7 +178,8 @@ static struct termios orig_term; | |||
177 | * in precise order. | 178 | * in precise order. |
178 | */ | 179 | */ |
179 | #define wmb() __asm__ __volatile__("" : : : "memory") | 180 | #define wmb() __asm__ __volatile__("" : : : "memory") |
180 | #define mb() __asm__ __volatile__("" : : : "memory") | 181 | #define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") |
182 | #define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") | ||
181 | 183 | ||
182 | /* Wrapper for the last available index. Makes it easier to change. */ | 184 | /* Wrapper for the last available index. Makes it easier to change. */ |
183 | #define lg_last_avail(vq) ((vq)->last_avail_idx) | 185 | #define lg_last_avail(vq) ((vq)->last_avail_idx) |
@@ -676,6 +678,12 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
676 | errx(1, "Guest moved used index from %u to %u", | 678 | errx(1, "Guest moved used index from %u to %u", |
677 | last_avail, vq->vring.avail->idx); | 679 | last_avail, vq->vring.avail->idx); |
678 | 680 | ||
681 | /* | ||
682 | * Make sure we read the descriptor number *after* we read the ring | ||
683 | * update; don't let the cpu or compiler change the order. | ||
684 | */ | ||
685 | rmb(); | ||
686 | |||
679 | /* | 687 | /* |
680 | * Grab the next descriptor number they're advertising, and increment | 688 | * Grab the next descriptor number they're advertising, and increment |
681 | * the index we've seen. | 689 | * the index we've seen. |
@@ -695,6 +703,12 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
695 | i = head; | 703 | i = head; |
696 | 704 | ||
697 | /* | 705 | /* |
706 | * We have to read the descriptor after we read the descriptor number, | ||
707 | * but there's a data dependency there so the CPU shouldn't reorder | ||
708 | * that: no rmb() required. | ||
709 | */ | ||
710 | |||
711 | /* | ||
698 | * If this is an indirect entry, then this buffer contains a descriptor | 712 | * If this is an indirect entry, then this buffer contains a descriptor |
699 | * table which we handle as if it's any normal descriptor chain. | 713 | * table which we handle as if it's any normal descriptor chain. |
700 | */ | 714 | */ |
diff --git a/tools/virtio/linux/module.h b/tools/virtio/linux/module.h index 3039a7e972b6..28ce95a05997 100644 --- a/tools/virtio/linux/module.h +++ b/tools/virtio/linux/module.h | |||
@@ -1 +1,6 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
2 | |||
3 | #define MODULE_LICENSE(__MODULE_LICENSE_value) \ | ||
4 | static __attribute__((unused)) const char *__MODULE_LICENSE_name = \ | ||
5 | __MODULE_LICENSE_value | ||
6 | |||
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index cd801838156f..844783040703 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h | |||
@@ -45,9 +45,6 @@ struct virtqueue { | |||
45 | void *priv; | 45 | void *priv; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define MODULE_LICENSE(__MODULE_LICENSE_value) \ | ||
49 | const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value | ||
50 | |||
51 | /* Interfaces exported by virtio_ring. */ | 48 | /* Interfaces exported by virtio_ring. */ |
52 | int virtqueue_add_sgs(struct virtqueue *vq, | 49 | int virtqueue_add_sgs(struct virtqueue *vq, |
53 | struct scatterlist *sgs[], | 50 | struct scatterlist *sgs[], |