diff options
1015 files changed, 10259 insertions, 5367 deletions
@@ -655,6 +655,11 @@ S: Stanford University | |||
655 | S: Stanford, California 94305 | 655 | S: Stanford, California 94305 |
656 | S: USA | 656 | S: USA |
657 | 657 | ||
658 | N: Carlos Chinea | ||
659 | E: carlos.chinea@nokia.com | ||
660 | E: cch.devel@gmail.com | ||
661 | D: Author of HSI Subsystem | ||
662 | |||
658 | N: Randolph Chung | 663 | N: Randolph Chung |
659 | E: tausq@debian.org | 664 | E: tausq@debian.org |
660 | D: Linux/PA-RISC hacker | 665 | D: Linux/PA-RISC hacker |
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml index e287c8fc803b..4165e7bfa4ff 100644 --- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml +++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml | |||
@@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active | |||
73 | format. For the single-planar API, applications must set <structfield> plane | 73 | format. For the single-planar API, applications must set <structfield> plane |
74 | </structfield> to zero. Additional flags may be posted in the <structfield> | 74 | </structfield> to zero. Additional flags may be posted in the <structfield> |
75 | flags </structfield> field. Refer to a manual for open() for details. | 75 | flags </structfield> field. Refer to a manual for open() for details. |
76 | Currently only O_CLOEXEC is supported. All other fields must be set to zero. | 76 | Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All |
77 | other fields must be set to zero. | ||
77 | In the case of multi-planar API, every plane is exported separately using | 78 | In the case of multi-planar API, every plane is exported separately using |
78 | multiple <constant> VIDIOC_EXPBUF </constant> calls. </para> | 79 | multiple <constant> VIDIOC_EXPBUF </constant> calls. </para> |
79 | 80 | ||
@@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry> | |||
170 | <entry>__u32</entry> | 171 | <entry>__u32</entry> |
171 | <entry><structfield>flags</structfield></entry> | 172 | <entry><structfield>flags</structfield></entry> |
172 | <entry>Flags for the newly created file, currently only <constant> | 173 | <entry>Flags for the newly created file, currently only <constant> |
173 | O_CLOEXEC </constant> is supported, refer to the manual of open() for more | 174 | O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY |
174 | details.</entry> | 175 | </constant>, and <constant>O_RDWR</constant> are supported, refer to the manual |
176 | of open() for more details.</entry> | ||
175 | </row> | 177 | </row> |
176 | <row> | 178 | <row> |
177 | <entry>__s32</entry> | 179 | <entry>__s32</entry> |
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt index f4faec0f66e4..2f2c6cdd73c0 100644 --- a/Documentation/assoc_array.txt +++ b/Documentation/assoc_array.txt | |||
@@ -164,10 +164,10 @@ This points to a number of methods, all of which need to be provided: | |||
164 | 164 | ||
165 | (4) Diff the index keys of two objects. | 165 | (4) Diff the index keys of two objects. |
166 | 166 | ||
167 | int (*diff_objects)(const void *a, const void *b); | 167 | int (*diff_objects)(const void *object, const void *index_key); |
168 | 168 | ||
169 | Return the bit position at which the index keys of two objects differ or | 169 | Return the bit position at which the index key of the specified object |
170 | -1 if they are the same. | 170 | differs from the given index key or -1 if they are the same. |
171 | 171 | ||
172 | 172 | ||
173 | (5) Free an object. | 173 | (5) Free an object. |
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt new file mode 100644 index 000000000000..b2830b435895 --- /dev/null +++ b/Documentation/block/null_blk.txt | |||
@@ -0,0 +1,72 @@ | |||
1 | Null block device driver | ||
2 | ================================================================================ | ||
3 | |||
4 | I. Overview | ||
5 | |||
6 | The null block device (/dev/nullb*) is used for benchmarking the various | ||
7 | block-layer implementations. It emulates a block device of X gigabytes in size. | ||
8 | The following instances are possible: | ||
9 | |||
10 | Single-queue block-layer | ||
11 | - Request-based. | ||
12 | - Single submission queue per device. | ||
13 | - Implements IO scheduling algorithms (CFQ, Deadline, noop). | ||
14 | Multi-queue block-layer | ||
15 | - Request-based. | ||
16 | - Configurable submission queues per device. | ||
17 | No block-layer (Known as bio-based) | ||
18 | - Bio-based. IO requests are submitted directly to the device driver. | ||
19 | - Directly accepts bio data structure and returns them. | ||
20 | |||
21 | All of them have a completion queue for each core in the system. | ||
22 | |||
23 | II. Module parameters applicable for all instances: | ||
24 | |||
25 | queue_mode=[0-2]: Default: 2-Multi-queue | ||
26 | Selects which block-layer the module should instantiate with. | ||
27 | |||
28 | 0: Bio-based. | ||
29 | 1: Single-queue. | ||
30 | 2: Multi-queue. | ||
31 | |||
32 | home_node=[0--nr_nodes]: Default: NUMA_NO_NODE | ||
33 | Selects what CPU node the data structures are allocated from. | ||
34 | |||
35 | gb=[Size in GB]: Default: 250GB | ||
36 | The size of the device reported to the system. | ||
37 | |||
38 | bs=[Block size (in bytes)]: Default: 512 bytes | ||
39 | The block size reported to the system. | ||
40 | |||
41 | nr_devices=[Number of devices]: Default: 2 | ||
42 | Number of block devices instantiated. They are instantiated as /dev/nullb0, | ||
43 | etc. | ||
44 | |||
45 | irq_mode=[0-2]: Default: 1-Soft-irq | ||
46 | The completion mode used for completing IOs to the block-layer. | ||
47 | |||
48 | 0: None. | ||
49 | 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead | ||
50 | when IOs are issued from another CPU node than the home the device is | ||
51 | connected to. | ||
52 | 2: Timer: Waits a specific period (completion_nsec) for each IO before | ||
53 | completion. | ||
54 | |||
55 | completion_nsec=[ns]: Default: 10.000ns | ||
56 | Combined with irq_mode=2 (timer). The time each completion event must wait. | ||
57 | |||
58 | submit_queues=[0..nr_cpus]: | ||
59 | The number of submission queues attached to the device driver. If unset, it | ||
60 | defaults to 1 on single-queue and bio-based instances. For multi-queue, | ||
61 | it is ignored when use_per_node_hctx module parameter is 1. | ||
62 | |||
63 | hw_queue_depth=[0..qdepth]: Default: 64 | ||
64 | The hardware queue depth of the device. | ||
65 | |||
66 | III: Multi-queue specific parameters | ||
67 | |||
68 | use_per_node_hctx=[0/1]: Default: 0 | ||
69 | 0: The number of submit queues are set to the value of the submit_queues | ||
70 | parameter. | ||
71 | 1: The multi-queue block layer is instantiated with a hardware dispatch | ||
72 | queue for each CPU node in the system. | ||
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt index 274752f8bdf9..719320b5ed3f 100644 --- a/Documentation/device-mapper/cache.txt +++ b/Documentation/device-mapper/cache.txt | |||
@@ -266,10 +266,12 @@ E.g. | |||
266 | Invalidation is removing an entry from the cache without writing it | 266 | Invalidation is removing an entry from the cache without writing it |
267 | back. Cache blocks can be invalidated via the invalidate_cblocks | 267 | back. Cache blocks can be invalidated via the invalidate_cblocks |
268 | message, which takes an arbitrary number of cblock ranges. Each cblock | 268 | message, which takes an arbitrary number of cblock ranges. Each cblock |
269 | must be expressed as a decimal value, in the future a variant message | 269 | range's end value is "one past the end", meaning 5-10 expresses a range |
270 | that takes cblock ranges expressed in hexidecimal may be needed to | 270 | of values from 5 to 9. Each cblock must be expressed as a decimal |
271 | better support efficient invalidation of larger caches. The cache must | 271 | value, in the future a variant message that takes cblock ranges |
272 | be in passthrough mode when invalidate_cblocks is used. | 272 | expressed in hexidecimal may be needed to better support efficient |
273 | invalidation of larger caches. The cache must be in passthrough mode | ||
274 | when invalidate_cblocks is used. | ||
273 | 275 | ||
274 | invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]* | 276 | invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]* |
275 | 277 | ||
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt index 46f5c791ea0d..0f2f920e8734 100644 --- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt | |||
@@ -159,6 +159,8 @@ clock which they consume. | |||
159 | mixer 343 | 159 | mixer 343 |
160 | hdmi 344 | 160 | hdmi 344 |
161 | g2d 345 | 161 | g2d 345 |
162 | mdma0 346 | ||
163 | smmu_mdma0 347 | ||
162 | 164 | ||
163 | 165 | ||
164 | [Clock Muxes] | 166 | [Clock Muxes] |
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt index 48b259e29e87..bad381faf036 100644 --- a/Documentation/devicetree/bindings/net/davinci_emac.txt +++ b/Documentation/devicetree/bindings/net/davinci_emac.txt | |||
@@ -4,7 +4,7 @@ This file provides information, what the device node | |||
4 | for the davinci_emac interface contains. | 4 | for the davinci_emac interface contains. |
5 | 5 | ||
6 | Required properties: | 6 | Required properties: |
7 | - compatible: "ti,davinci-dm6467-emac"; | 7 | - compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac" |
8 | - reg: Offset and length of the register set for the device | 8 | - reg: Offset and length of the register set for the device |
9 | - ti,davinci-ctrl-reg-offset: offset to control register | 9 | - ti,davinci-ctrl-reg-offset: offset to control register |
10 | - ti,davinci-ctrl-mod-reg-offset: offset to control module register | 10 | - ti,davinci-ctrl-mod-reg-offset: offset to control module register |
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt index 953049b4248a..5a41a8658daa 100644 --- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt +++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt | |||
@@ -8,3 +8,7 @@ Required properties: | |||
8 | Optional properties: | 8 | Optional properties: |
9 | - phy-device : phandle to Ethernet phy | 9 | - phy-device : phandle to Ethernet phy |
10 | - local-mac-address : Ethernet mac address to use | 10 | - local-mac-address : Ethernet mac address to use |
11 | - reg-io-width : Mask of sizes (in bytes) of the IO accesses that | ||
12 | are supported on the device. Valid value for SMSC LAN91c111 are | ||
13 | 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning | ||
14 | 16-bit access only. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 50680a59a2ff..b9e9bd854298 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1529 | 1529 | ||
1530 | * atapi_dmadir: Enable ATAPI DMADIR bridge support | 1530 | * atapi_dmadir: Enable ATAPI DMADIR bridge support |
1531 | 1531 | ||
1532 | * disable: Disable this device. | ||
1533 | |||
1532 | If there are multiple matching configurations changing | 1534 | If there are multiple matching configurations changing |
1533 | the same attribute, the last one is used. | 1535 | the same attribute, the last one is used. |
1534 | 1536 | ||
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c index 0c980ad40b17..4d17487d5ad9 100644 --- a/Documentation/mic/mpssd/mpssd.c +++ b/Documentation/mic/mpssd/mpssd.c | |||
@@ -313,7 +313,7 @@ static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type) | |||
313 | int i; | 313 | int i; |
314 | void *dp = get_dp(mic, type); | 314 | void *dp = get_dp(mic, type); |
315 | 315 | ||
316 | for (i = mic_aligned_size(struct mic_bootparam); i < PAGE_SIZE; | 316 | for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE; |
317 | i += mic_total_desc_size(d)) { | 317 | i += mic_total_desc_size(d)) { |
318 | d = dp + i; | 318 | d = dp + i; |
319 | 319 | ||
@@ -445,8 +445,8 @@ init_vr(struct mic_info *mic, int fd, int type, | |||
445 | __func__, mic->name, vr0->va, vr0->info, vr_size, | 445 | __func__, mic->name, vr0->va, vr0->info, vr_size, |
446 | vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); | 446 | vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); |
447 | mpsslog("magic 0x%x expected 0x%x\n", | 447 | mpsslog("magic 0x%x expected 0x%x\n", |
448 | vr0->info->magic, MIC_MAGIC + type); | 448 | le32toh(vr0->info->magic), MIC_MAGIC + type); |
449 | assert(vr0->info->magic == MIC_MAGIC + type); | 449 | assert(le32toh(vr0->info->magic) == MIC_MAGIC + type); |
450 | if (vr1) { | 450 | if (vr1) { |
451 | vr1->va = (struct mic_vring *) | 451 | vr1->va = (struct mic_vring *) |
452 | &va[MIC_DEVICE_PAGE_END + vr_size]; | 452 | &va[MIC_DEVICE_PAGE_END + vr_size]; |
@@ -458,8 +458,8 @@ init_vr(struct mic_info *mic, int fd, int type, | |||
458 | __func__, mic->name, vr1->va, vr1->info, vr_size, | 458 | __func__, mic->name, vr1->va, vr1->info, vr_size, |
459 | vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); | 459 | vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); |
460 | mpsslog("magic 0x%x expected 0x%x\n", | 460 | mpsslog("magic 0x%x expected 0x%x\n", |
461 | vr1->info->magic, MIC_MAGIC + type + 1); | 461 | le32toh(vr1->info->magic), MIC_MAGIC + type + 1); |
462 | assert(vr1->info->magic == MIC_MAGIC + type + 1); | 462 | assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1); |
463 | } | 463 | } |
464 | done: | 464 | done: |
465 | return va; | 465 | return va; |
@@ -520,7 +520,7 @@ static void * | |||
520 | virtio_net(void *arg) | 520 | virtio_net(void *arg) |
521 | { | 521 | { |
522 | static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)]; | 522 | static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)]; |
523 | static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __aligned(64); | 523 | static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64))); |
524 | struct iovec vnet_iov[2][2] = { | 524 | struct iovec vnet_iov[2][2] = { |
525 | { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, | 525 | { { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) }, |
526 | { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, | 526 | { .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } }, |
@@ -1412,6 +1412,12 @@ mic_config(void *arg) | |||
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | do { | 1414 | do { |
1415 | ret = lseek(fd, 0, SEEK_SET); | ||
1416 | if (ret < 0) { | ||
1417 | mpsslog("%s: Failed to seek to file start '%s': %s\n", | ||
1418 | mic->name, pathname, strerror(errno)); | ||
1419 | goto close_error1; | ||
1420 | } | ||
1415 | ret = read(fd, value, sizeof(value)); | 1421 | ret = read(fd, value, sizeof(value)); |
1416 | if (ret < 0) { | 1422 | if (ret < 0) { |
1417 | mpsslog("%s: Failed to read sysfs entry '%s': %s\n", | 1423 | mpsslog("%s: Failed to read sysfs entry '%s': %s\n", |
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt new file mode 100644 index 000000000000..2b40e04d3c49 --- /dev/null +++ b/Documentation/module-signing.txt | |||
@@ -0,0 +1,240 @@ | |||
1 | ============================== | ||
2 | KERNEL MODULE SIGNING FACILITY | ||
3 | ============================== | ||
4 | |||
5 | CONTENTS | ||
6 | |||
7 | - Overview. | ||
8 | - Configuring module signing. | ||
9 | - Generating signing keys. | ||
10 | - Public keys in the kernel. | ||
11 | - Manually signing modules. | ||
12 | - Signed modules and stripping. | ||
13 | - Loading signed modules. | ||
14 | - Non-valid signatures and unsigned modules. | ||
15 | - Administering/protecting the private key. | ||
16 | |||
17 | |||
18 | ======== | ||
19 | OVERVIEW | ||
20 | ======== | ||
21 | |||
22 | The kernel module signing facility cryptographically signs modules during | ||
23 | installation and then checks the signature upon loading the module. This | ||
24 | allows increased kernel security by disallowing the loading of unsigned modules | ||
25 | or modules signed with an invalid key. Module signing increases security by | ||
26 | making it harder to load a malicious module into the kernel. The module | ||
27 | signature checking is done by the kernel so that it is not necessary to have | ||
28 | trusted userspace bits. | ||
29 | |||
30 | This facility uses X.509 ITU-T standard certificates to encode the public keys | ||
31 | involved. The signatures are not themselves encoded in any industrial standard | ||
32 | type. The facility currently only supports the RSA public key encryption | ||
33 | standard (though it is pluggable and permits others to be used). The possible | ||
34 | hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and | ||
35 | SHA-512 (the algorithm is selected by data in the signature). | ||
36 | |||
37 | |||
38 | ========================== | ||
39 | CONFIGURING MODULE SIGNING | ||
40 | ========================== | ||
41 | |||
42 | The module signing facility is enabled by going to the "Enable Loadable Module | ||
43 | Support" section of the kernel configuration and turning on | ||
44 | |||
45 | CONFIG_MODULE_SIG "Module signature verification" | ||
46 | |||
47 | This has a number of options available: | ||
48 | |||
49 | (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE) | ||
50 | |||
51 | This specifies how the kernel should deal with a module that has a | ||
52 | signature for which the key is not known or a module that is unsigned. | ||
53 | |||
54 | If this is off (ie. "permissive"), then modules for which the key is not | ||
55 | available and modules that are unsigned are permitted, but the kernel will | ||
56 | be marked as being tainted. | ||
57 | |||
58 | If this is on (ie. "restrictive"), only modules that have a valid | ||
59 | signature that can be verified by a public key in the kernel's possession | ||
60 | will be loaded. All other modules will generate an error. | ||
61 | |||
62 | Irrespective of the setting here, if the module has a signature block that | ||
63 | cannot be parsed, it will be rejected out of hand. | ||
64 | |||
65 | |||
66 | (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL) | ||
67 | |||
68 | If this is on then modules will be automatically signed during the | ||
69 | modules_install phase of a build. If this is off, then the modules must | ||
70 | be signed manually using: | ||
71 | |||
72 | scripts/sign-file | ||
73 | |||
74 | |||
75 | (3) "Which hash algorithm should modules be signed with?" | ||
76 | |||
77 | This presents a choice of which hash algorithm the installation phase will | ||
78 | sign the modules with: | ||
79 | |||
80 | CONFIG_SIG_SHA1 "Sign modules with SHA-1" | ||
81 | CONFIG_SIG_SHA224 "Sign modules with SHA-224" | ||
82 | CONFIG_SIG_SHA256 "Sign modules with SHA-256" | ||
83 | CONFIG_SIG_SHA384 "Sign modules with SHA-384" | ||
84 | CONFIG_SIG_SHA512 "Sign modules with SHA-512" | ||
85 | |||
86 | The algorithm selected here will also be built into the kernel (rather | ||
87 | than being a module) so that modules signed with that algorithm can have | ||
88 | their signatures checked without causing a dependency loop. | ||
89 | |||
90 | |||
91 | ======================= | ||
92 | GENERATING SIGNING KEYS | ||
93 | ======================= | ||
94 | |||
95 | Cryptographic keypairs are required to generate and check signatures. A | ||
96 | private key is used to generate a signature and the corresponding public key is | ||
97 | used to check it. The private key is only needed during the build, after which | ||
98 | it can be deleted or stored securely. The public key gets built into the | ||
99 | kernel so that it can be used to check the signatures as the modules are | ||
100 | loaded. | ||
101 | |||
102 | Under normal conditions, the kernel build will automatically generate a new | ||
103 | keypair using openssl if one does not exist in the files: | ||
104 | |||
105 | signing_key.priv | ||
106 | signing_key.x509 | ||
107 | |||
108 | during the building of vmlinux (the public part of the key needs to be built | ||
109 | into vmlinux) using parameters in the: | ||
110 | |||
111 | x509.genkey | ||
112 | |||
113 | file (which is also generated if it does not already exist). | ||
114 | |||
115 | It is strongly recommended that you provide your own x509.genkey file. | ||
116 | |||
117 | Most notably, in the x509.genkey file, the req_distinguished_name section | ||
118 | should be altered from the default: | ||
119 | |||
120 | [ req_distinguished_name ] | ||
121 | O = Magrathea | ||
122 | CN = Glacier signing key | ||
123 | emailAddress = slartibartfast@magrathea.h2g2 | ||
124 | |||
125 | The generated RSA key size can also be set with: | ||
126 | |||
127 | [ req ] | ||
128 | default_bits = 4096 | ||
129 | |||
130 | |||
131 | It is also possible to manually generate the key private/public files using the | ||
132 | x509.genkey key generation configuration file in the root node of the Linux | ||
133 | kernel sources tree and the openssl command. The following is an example to | ||
134 | generate the public/private key files: | ||
135 | |||
136 | openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \ | ||
137 | -config x509.genkey -outform DER -out signing_key.x509 \ | ||
138 | -keyout signing_key.priv | ||
139 | |||
140 | |||
141 | ========================= | ||
142 | PUBLIC KEYS IN THE KERNEL | ||
143 | ========================= | ||
144 | |||
145 | The kernel contains a ring of public keys that can be viewed by root. They're | ||
146 | in a keyring called ".system_keyring" that can be seen by: | ||
147 | |||
148 | [root@deneb ~]# cat /proc/keys | ||
149 | ... | ||
150 | 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1 | ||
151 | 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 [] | ||
152 | ... | ||
153 | |||
154 | Beyond the public key generated specifically for module signing, any file | ||
155 | placed in the kernel source root directory or the kernel build root directory | ||
156 | whose name is suffixed with ".x509" will be assumed to be an X.509 public key | ||
157 | and will be added to the keyring. | ||
158 | |||
159 | Further, the architecture code may take public keys from a hardware store and | ||
160 | add those in also (e.g. from the UEFI key database). | ||
161 | |||
162 | Finally, it is possible to add additional public keys by doing: | ||
163 | |||
164 | keyctl padd asymmetric "" [.system_keyring-ID] <[key-file] | ||
165 | |||
166 | e.g.: | ||
167 | |||
168 | keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509 | ||
169 | |||
170 | Note, however, that the kernel will only permit keys to be added to | ||
171 | .system_keyring _if_ the new key's X.509 wrapper is validly signed by a key | ||
172 | that is already resident in the .system_keyring at the time the key was added. | ||
173 | |||
174 | |||
175 | ========================= | ||
176 | MANUALLY SIGNING MODULES | ||
177 | ========================= | ||
178 | |||
179 | To manually sign a module, use the scripts/sign-file tool available in | ||
180 | the Linux kernel source tree. The script requires 4 arguments: | ||
181 | |||
182 | 1. The hash algorithm (e.g., sha256) | ||
183 | 2. The private key filename | ||
184 | 3. The public key filename | ||
185 | 4. The kernel module to be signed | ||
186 | |||
187 | The following is an example to sign a kernel module: | ||
188 | |||
189 | scripts/sign-file sha512 kernel-signkey.priv \ | ||
190 | kernel-signkey.x509 module.ko | ||
191 | |||
192 | The hash algorithm used does not have to match the one configured, but if it | ||
193 | doesn't, you should make sure that hash algorithm is either built into the | ||
194 | kernel or can be loaded without requiring itself. | ||
195 | |||
196 | |||
197 | ============================ | ||
198 | SIGNED MODULES AND STRIPPING | ||
199 | ============================ | ||
200 | |||
201 | A signed module has a digital signature simply appended at the end. The string | ||
202 | "~Module signature appended~." at the end of the module's file confirms that a | ||
203 | signature is present but it does not confirm that the signature is valid! | ||
204 | |||
205 | Signed modules are BRITTLE as the signature is outside of the defined ELF | ||
206 | container. Thus they MAY NOT be stripped once the signature is computed and | ||
207 | attached. Note the entire module is the signed payload, including any and all | ||
208 | debug information present at the time of signing. | ||
209 | |||
210 | |||
211 | ====================== | ||
212 | LOADING SIGNED MODULES | ||
213 | ====================== | ||
214 | |||
215 | Modules are loaded with insmod, modprobe, init_module() or finit_module(), | ||
216 | exactly as for unsigned modules as no processing is done in userspace. The | ||
217 | signature checking is all done within the kernel. | ||
218 | |||
219 | |||
220 | ========================================= | ||
221 | NON-VALID SIGNATURES AND UNSIGNED MODULES | ||
222 | ========================================= | ||
223 | |||
224 | If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on | ||
225 | the kernel command line, the kernel will only load validly signed modules | ||
226 | for which it has a public key. Otherwise, it will also load modules that are | ||
227 | unsigned. Any module for which the kernel has a key, but which proves to have | ||
228 | a signature mismatch will not be permitted to load. | ||
229 | |||
230 | Any module that has an unparseable signature will be rejected. | ||
231 | |||
232 | |||
233 | ========================================= | ||
234 | ADMINISTERING/PROTECTING THE PRIVATE KEY | ||
235 | ========================================= | ||
236 | |||
237 | Since the private key is used to sign modules, viruses and malware could use | ||
238 | the private key to sign modules and compromise the operating system. The | ||
239 | private key must be either destroyed or moved to a secure location and not kept | ||
240 | in the root node of the kernel source tree. | ||
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 3c12d9a7ed00..8a984e994e61 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -16,8 +16,12 @@ ip_default_ttl - INTEGER | |||
16 | Default: 64 (as recommended by RFC1700) | 16 | Default: 64 (as recommended by RFC1700) |
17 | 17 | ||
18 | ip_no_pmtu_disc - BOOLEAN | 18 | ip_no_pmtu_disc - BOOLEAN |
19 | Disable Path MTU Discovery. | 19 | Disable Path MTU Discovery. If enabled and a |
20 | default FALSE | 20 | fragmentation-required ICMP is received, the PMTU to this |
21 | destination will be set to min_pmtu (see below). You will need | ||
22 | to raise min_pmtu to the smallest interface MTU on your system | ||
23 | manually if you want to avoid locally generated fragments. | ||
24 | Default: FALSE | ||
21 | 25 | ||
22 | min_pmtu - INTEGER | 26 | min_pmtu - INTEGER |
23 | default 552 - minimum discovered Path MTU | 27 | default 552 - minimum discovered Path MTU |
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index c01223628a87..8e48e3b14227 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt | |||
@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below. | |||
123 | [shutdown] close() --------> destruction of the transmission socket and | 123 | [shutdown] close() --------> destruction of the transmission socket and |
124 | deallocation of all associated resources. | 124 | deallocation of all associated resources. |
125 | 125 | ||
126 | Socket creation and destruction is also straight forward, and is done | ||
127 | the same way as in capturing described in the previous paragraph: | ||
128 | |||
129 | int fd = socket(PF_PACKET, mode, 0); | ||
130 | |||
131 | The protocol can optionally be 0 in case we only want to transmit | ||
132 | via this socket, which avoids an expensive call to packet_rcv(). | ||
133 | In this case, you also need to bind(2) the TX_RING with sll_protocol = 0 | ||
134 | set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example. | ||
135 | |||
126 | Binding the socket to your network interface is mandatory (with zero copy) to | 136 | Binding the socket to your network interface is mandatory (with zero copy) to |
127 | know the header size of frames used in the circular buffer. | 137 | know the header size of frames used in the circular buffer. |
128 | 138 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 13c15c83a46e..31a046213274 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts | |||
783 | F: arch/arm/boot/dts/sama*.dtsi | 783 | F: arch/arm/boot/dts/sama*.dtsi |
784 | 784 | ||
785 | ARM/CALXEDA HIGHBANK ARCHITECTURE | 785 | ARM/CALXEDA HIGHBANK ARCHITECTURE |
786 | M: Rob Herring <rob.herring@calxeda.com> | 786 | M: Rob Herring <robh@kernel.org> |
787 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 787 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
788 | S: Maintained | 788 | S: Maintained |
789 | F: arch/arm/mach-highbank/ | 789 | F: arch/arm/mach-highbank/ |
@@ -893,20 +893,15 @@ F: arch/arm/include/asm/hardware/dec21285.h | |||
893 | F: arch/arm/mach-footbridge/ | 893 | F: arch/arm/mach-footbridge/ |
894 | 894 | ||
895 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE | 895 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE |
896 | M: Shawn Guo <shawn.guo@linaro.org> | ||
896 | M: Sascha Hauer <kernel@pengutronix.de> | 897 | M: Sascha Hauer <kernel@pengutronix.de> |
897 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 898 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
898 | S: Maintained | 899 | S: Maintained |
899 | T: git git://git.pengutronix.de/git/imx/linux-2.6.git | 900 | T: git git://git.linaro.org/people/shawnguo/linux-2.6.git |
900 | F: arch/arm/mach-imx/ | 901 | F: arch/arm/mach-imx/ |
902 | F: arch/arm/boot/dts/imx* | ||
901 | F: arch/arm/configs/imx*_defconfig | 903 | F: arch/arm/configs/imx*_defconfig |
902 | 904 | ||
903 | ARM/FREESCALE IMX6 | ||
904 | M: Shawn Guo <shawn.guo@linaro.org> | ||
905 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
906 | S: Maintained | ||
907 | T: git git://git.linaro.org/people/shawnguo/linux-2.6.git | ||
908 | F: arch/arm/mach-imx/*imx6* | ||
909 | |||
910 | ARM/FREESCALE MXS ARM ARCHITECTURE | 905 | ARM/FREESCALE MXS ARM ARCHITECTURE |
911 | M: Shawn Guo <shawn.guo@linaro.org> | 906 | M: Shawn Guo <shawn.guo@linaro.org> |
912 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 907 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1013,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com> | |||
1013 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1008 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1014 | S: Maintained | 1009 | S: Maintained |
1015 | F: arch/arm/mach-keystone/ | 1010 | F: arch/arm/mach-keystone/ |
1011 | F: drivers/clk/keystone/ | ||
1012 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git | ||
1016 | 1013 | ||
1017 | ARM/LOGICPD PXA270 MACHINE SUPPORT | 1014 | ARM/LOGICPD PXA270 MACHINE SUPPORT |
1018 | M: Lennert Buytenhek <kernel@wantstofly.org> | 1015 | M: Lennert Buytenhek <kernel@wantstofly.org> |
@@ -1371,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git | |||
1371 | S: Supported | 1368 | S: Supported |
1372 | F: arch/arm/mach-zynq/ | 1369 | F: arch/arm/mach-zynq/ |
1373 | F: drivers/cpuidle/cpuidle-zynq.c | 1370 | F: drivers/cpuidle/cpuidle-zynq.c |
1371 | N: zynq | ||
1372 | N: xilinx | ||
1373 | F: drivers/clocksource/cadence_ttc_timer.c | ||
1374 | 1374 | ||
1375 | ARM SMMU DRIVER | 1375 | ARM SMMU DRIVER |
1376 | M: Will Deacon <will.deacon@arm.com> | 1376 | M: Will Deacon <will.deacon@arm.com> |
@@ -2138,7 +2138,8 @@ S: Maintained | |||
2138 | F: Documentation/zh_CN/ | 2138 | F: Documentation/zh_CN/ |
2139 | 2139 | ||
2140 | CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER | 2140 | CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER |
2141 | M: Alexander Shishkin <alexander.shishkin@linux.intel.com> | 2141 | M: Peter Chen <Peter.Chen@freescale.com> |
2142 | T: git://github.com/hzpeterchen/linux-usb.git | ||
2142 | L: linux-usb@vger.kernel.org | 2143 | L: linux-usb@vger.kernel.org |
2143 | S: Maintained | 2144 | S: Maintained |
2144 | F: drivers/usb/chipidea/ | 2145 | F: drivers/usb/chipidea/ |
@@ -2827,8 +2828,10 @@ F: include/uapi/drm/ | |||
2827 | 2828 | ||
2828 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) | 2829 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) |
2829 | M: Daniel Vetter <daniel.vetter@ffwll.ch> | 2830 | M: Daniel Vetter <daniel.vetter@ffwll.ch> |
2831 | M: Jani Nikula <jani.nikula@linux.intel.com> | ||
2830 | L: intel-gfx@lists.freedesktop.org | 2832 | L: intel-gfx@lists.freedesktop.org |
2831 | L: dri-devel@lists.freedesktop.org | 2833 | L: dri-devel@lists.freedesktop.org |
2834 | Q: http://patchwork.freedesktop.org/project/intel-gfx/ | ||
2832 | T: git git://people.freedesktop.org/~danvet/drm-intel | 2835 | T: git git://people.freedesktop.org/~danvet/drm-intel |
2833 | S: Supported | 2836 | S: Supported |
2834 | F: drivers/gpu/drm/i915/ | 2837 | F: drivers/gpu/drm/i915/ |
@@ -3765,9 +3768,11 @@ F: include/uapi/linux/gigaset_dev.h | |||
3765 | 3768 | ||
3766 | GPIO SUBSYSTEM | 3769 | GPIO SUBSYSTEM |
3767 | M: Linus Walleij <linus.walleij@linaro.org> | 3770 | M: Linus Walleij <linus.walleij@linaro.org> |
3768 | S: Maintained | 3771 | M: Alexandre Courbot <gnurou@gmail.com> |
3769 | L: linux-gpio@vger.kernel.org | 3772 | L: linux-gpio@vger.kernel.org |
3770 | F: Documentation/gpio.txt | 3773 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git |
3774 | S: Maintained | ||
3775 | F: Documentation/gpio/ | ||
3771 | F: drivers/gpio/ | 3776 | F: drivers/gpio/ |
3772 | F: include/linux/gpio* | 3777 | F: include/linux/gpio* |
3773 | F: include/asm-generic/gpio.h | 3778 | F: include/asm-generic/gpio.h |
@@ -3835,6 +3840,12 @@ T: git git://linuxtv.org/media_tree.git | |||
3835 | S: Maintained | 3840 | S: Maintained |
3836 | F: drivers/media/usb/gspca/ | 3841 | F: drivers/media/usb/gspca/ |
3837 | 3842 | ||
3843 | GUID PARTITION TABLE (GPT) | ||
3844 | M: Davidlohr Bueso <davidlohr@hp.com> | ||
3845 | L: linux-efi@vger.kernel.org | ||
3846 | S: Maintained | ||
3847 | F: block/partitions/efi.* | ||
3848 | |||
3838 | STK1160 USB VIDEO CAPTURE DRIVER | 3849 | STK1160 USB VIDEO CAPTURE DRIVER |
3839 | M: Ezequiel Garcia <elezegarcia@gmail.com> | 3850 | M: Ezequiel Garcia <elezegarcia@gmail.com> |
3840 | L: linux-media@vger.kernel.org | 3851 | L: linux-media@vger.kernel.org |
@@ -4044,6 +4055,14 @@ W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi | |||
4044 | S: Maintained | 4055 | S: Maintained |
4045 | F: fs/hpfs/ | 4056 | F: fs/hpfs/ |
4046 | 4057 | ||
4058 | HSI SUBSYSTEM | ||
4059 | M: Sebastian Reichel <sre@debian.org> | ||
4060 | S: Maintained | ||
4061 | F: Documentation/ABI/testing/sysfs-bus-hsi | ||
4062 | F: drivers/hsi/ | ||
4063 | F: include/linux/hsi/ | ||
4064 | F: include/uapi/linux/hsi/ | ||
4065 | |||
4047 | HSO 3G MODEM DRIVER | 4066 | HSO 3G MODEM DRIVER |
4048 | M: Jan Dumon <j.dumon@option.com> | 4067 | M: Jan Dumon <j.dumon@option.com> |
4049 | W: http://www.pharscape.org | 4068 | W: http://www.pharscape.org |
@@ -4462,10 +4481,8 @@ M: Bruce Allan <bruce.w.allan@intel.com> | |||
4462 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> | 4481 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> |
4463 | M: Don Skidmore <donald.c.skidmore@intel.com> | 4482 | M: Don Skidmore <donald.c.skidmore@intel.com> |
4464 | M: Greg Rose <gregory.v.rose@intel.com> | 4483 | M: Greg Rose <gregory.v.rose@intel.com> |
4465 | M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> | ||
4466 | M: Alex Duyck <alexander.h.duyck@intel.com> | 4484 | M: Alex Duyck <alexander.h.duyck@intel.com> |
4467 | M: John Ronciak <john.ronciak@intel.com> | 4485 | M: John Ronciak <john.ronciak@intel.com> |
4468 | M: Tushar Dave <tushar.n.dave@intel.com> | ||
4469 | L: e1000-devel@lists.sourceforge.net | 4486 | L: e1000-devel@lists.sourceforge.net |
4470 | W: http://www.intel.com/support/feedback.htm | 4487 | W: http://www.intel.com/support/feedback.htm |
4471 | W: http://e1000.sourceforge.net/ | 4488 | W: http://e1000.sourceforge.net/ |
@@ -5909,12 +5926,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com> | |||
5909 | M: Herbert Xu <herbert@gondor.apana.org.au> | 5926 | M: Herbert Xu <herbert@gondor.apana.org.au> |
5910 | M: "David S. Miller" <davem@davemloft.net> | 5927 | M: "David S. Miller" <davem@davemloft.net> |
5911 | L: netdev@vger.kernel.org | 5928 | L: netdev@vger.kernel.org |
5912 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git | 5929 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git |
5930 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git | ||
5913 | S: Maintained | 5931 | S: Maintained |
5914 | F: net/xfrm/ | 5932 | F: net/xfrm/ |
5915 | F: net/key/ | 5933 | F: net/key/ |
5916 | F: net/ipv4/xfrm* | 5934 | F: net/ipv4/xfrm* |
5935 | F: net/ipv4/esp4.c | ||
5936 | F: net/ipv4/ah4.c | ||
5937 | F: net/ipv4/ipcomp.c | ||
5938 | F: net/ipv4/ip_vti.c | ||
5917 | F: net/ipv6/xfrm* | 5939 | F: net/ipv6/xfrm* |
5940 | F: net/ipv6/esp6.c | ||
5941 | F: net/ipv6/ah6.c | ||
5942 | F: net/ipv6/ipcomp6.c | ||
5943 | F: net/ipv6/ip6_vti.c | ||
5918 | F: include/uapi/linux/xfrm.h | 5944 | F: include/uapi/linux/xfrm.h |
5919 | F: include/net/xfrm.h | 5945 | F: include/net/xfrm.h |
5920 | 5946 | ||
@@ -6235,7 +6261,7 @@ F: drivers/i2c/busses/i2c-ocores.c | |||
6235 | 6261 | ||
6236 | OPEN FIRMWARE AND FLATTENED DEVICE TREE | 6262 | OPEN FIRMWARE AND FLATTENED DEVICE TREE |
6237 | M: Grant Likely <grant.likely@linaro.org> | 6263 | M: Grant Likely <grant.likely@linaro.org> |
6238 | M: Rob Herring <rob.herring@calxeda.com> | 6264 | M: Rob Herring <robh+dt@kernel.org> |
6239 | L: devicetree@vger.kernel.org | 6265 | L: devicetree@vger.kernel.org |
6240 | W: http://fdt.secretlab.ca | 6266 | W: http://fdt.secretlab.ca |
6241 | T: git git://git.secretlab.ca/git/linux-2.6.git | 6267 | T: git git://git.secretlab.ca/git/linux-2.6.git |
@@ -6247,7 +6273,7 @@ K: of_get_property | |||
6247 | K: of_match_table | 6273 | K: of_match_table |
6248 | 6274 | ||
6249 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS | 6275 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS |
6250 | M: Rob Herring <rob.herring@calxeda.com> | 6276 | M: Rob Herring <robh+dt@kernel.org> |
6251 | M: Pawel Moll <pawel.moll@arm.com> | 6277 | M: Pawel Moll <pawel.moll@arm.com> |
6252 | M: Mark Rutland <mark.rutland@arm.com> | 6278 | M: Mark Rutland <mark.rutland@arm.com> |
6253 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> | 6279 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> |
@@ -6461,19 +6487,52 @@ F: drivers/pci/ | |||
6461 | F: include/linux/pci* | 6487 | F: include/linux/pci* |
6462 | F: arch/x86/pci/ | 6488 | F: arch/x86/pci/ |
6463 | 6489 | ||
6490 | PCI DRIVER FOR IMX6 | ||
6491 | M: Richard Zhu <r65037@freescale.com> | ||
6492 | M: Shawn Guo <shawn.guo@linaro.org> | ||
6493 | L: linux-pci@vger.kernel.org | ||
6494 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
6495 | S: Maintained | ||
6496 | F: drivers/pci/host/*imx6* | ||
6497 | |||
6498 | PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) | ||
6499 | M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
6500 | M: Jason Cooper <jason@lakedaemon.net> | ||
6501 | L: linux-pci@vger.kernel.org | ||
6502 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
6503 | S: Maintained | ||
6504 | F: drivers/pci/host/*mvebu* | ||
6505 | |||
6464 | PCI DRIVER FOR NVIDIA TEGRA | 6506 | PCI DRIVER FOR NVIDIA TEGRA |
6465 | M: Thierry Reding <thierry.reding@gmail.com> | 6507 | M: Thierry Reding <thierry.reding@gmail.com> |
6466 | L: linux-tegra@vger.kernel.org | 6508 | L: linux-tegra@vger.kernel.org |
6509 | L: linux-pci@vger.kernel.org | ||
6467 | S: Supported | 6510 | S: Supported |
6468 | F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt | 6511 | F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt |
6469 | F: drivers/pci/host/pci-tegra.c | 6512 | F: drivers/pci/host/pci-tegra.c |
6470 | 6513 | ||
6514 | PCI DRIVER FOR RENESAS R-CAR | ||
6515 | M: Simon Horman <horms@verge.net.au> | ||
6516 | L: linux-pci@vger.kernel.org | ||
6517 | L: linux-sh@vger.kernel.org | ||
6518 | S: Maintained | ||
6519 | F: drivers/pci/host/*rcar* | ||
6520 | |||
6471 | PCI DRIVER FOR SAMSUNG EXYNOS | 6521 | PCI DRIVER FOR SAMSUNG EXYNOS |
6472 | M: Jingoo Han <jg1.han@samsung.com> | 6522 | M: Jingoo Han <jg1.han@samsung.com> |
6473 | L: linux-pci@vger.kernel.org | 6523 | L: linux-pci@vger.kernel.org |
6524 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
6525 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | ||
6474 | S: Maintained | 6526 | S: Maintained |
6475 | F: drivers/pci/host/pci-exynos.c | 6527 | F: drivers/pci/host/pci-exynos.c |
6476 | 6528 | ||
6529 | PCI DRIVER FOR SYNOPSIS DESIGNWARE | ||
6530 | M: Mohit Kumar <mohit.kumar@st.com> | ||
6531 | M: Jingoo Han <jg1.han@samsung.com> | ||
6532 | L: linux-pci@vger.kernel.org | ||
6533 | S: Maintained | ||
6534 | F: drivers/pci/host/*designware* | ||
6535 | |||
6477 | PCMCIA SUBSYSTEM | 6536 | PCMCIA SUBSYSTEM |
6478 | P: Linux PCMCIA Team | 6537 | P: Linux PCMCIA Team |
6479 | L: linux-pcmcia@lists.infradead.org | 6538 | L: linux-pcmcia@lists.infradead.org |
@@ -9536,7 +9595,7 @@ F: drivers/xen/*swiotlb* | |||
9536 | 9595 | ||
9537 | XFS FILESYSTEM | 9596 | XFS FILESYSTEM |
9538 | P: Silicon Graphics Inc | 9597 | P: Silicon Graphics Inc |
9539 | M: Dave Chinner <dchinner@fromorbit.com> | 9598 | M: Dave Chinner <david@fromorbit.com> |
9540 | M: Ben Myers <bpm@sgi.com> | 9599 | M: Ben Myers <bpm@sgi.com> |
9541 | M: xfs@oss.sgi.com | 9600 | M: xfs@oss.sgi.com |
9542 | L: xfs@oss.sgi.com | 9601 | L: xfs@oss.sgi.com |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 13 | 2 | PATCHLEVEL = 13 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc8 |
5 | NAME = One Giant Leap for Frogkind | 5 | NAME = One Giant Leap for Frogkind |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -732,19 +732,15 @@ export mod_strip_cmd | |||
732 | # Select initial ramdisk compression format, default is gzip(1). | 732 | # Select initial ramdisk compression format, default is gzip(1). |
733 | # This shall be used by the dracut(8) tool while creating an initramfs image. | 733 | # This shall be used by the dracut(8) tool while creating an initramfs image. |
734 | # | 734 | # |
735 | INITRD_COMPRESS=gzip | 735 | INITRD_COMPRESS-y := gzip |
736 | ifeq ($(CONFIG_RD_BZIP2), y) | 736 | INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2 |
737 | INITRD_COMPRESS=bzip2 | 737 | INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma |
738 | else ifeq ($(CONFIG_RD_LZMA), y) | 738 | INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz |
739 | INITRD_COMPRESS=lzma | 739 | INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo |
740 | else ifeq ($(CONFIG_RD_XZ), y) | 740 | INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 |
741 | INITRD_COMPRESS=xz | 741 | # do not export INITRD_COMPRESS, since we didn't actually |
742 | else ifeq ($(CONFIG_RD_LZO), y) | 742 | # choose a sane default compression above. |
743 | INITRD_COMPRESS=lzo | 743 | # export INITRD_COMPRESS := $(INITRD_COMPRESS-y) |
744 | else ifeq ($(CONFIG_RD_LZ4), y) | ||
745 | INITRD_COMPRESS=lz4 | ||
746 | endif | ||
747 | export INITRD_COMPRESS | ||
748 | 744 | ||
749 | ifdef CONFIG_MODULE_SIG_ALL | 745 | ifdef CONFIG_MODULE_SIG_ALL |
750 | MODSECKEY = ./signing_key.priv | 746 | MODSECKEY = ./signing_key.priv |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 2ee0c9bfd032..9063ae6553cc 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | config ARC | 9 | config ARC |
10 | def_bool y | 10 | def_bool y |
11 | select BUILDTIME_EXTABLE_SORT | ||
11 | select CLONE_BACKWARDS | 12 | select CLONE_BACKWARDS |
12 | # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev | 13 | # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev |
13 | select DEVTMPFS if !INITRAMFS_SOURCE="" | 14 | select DEVTMPFS if !INITRAMFS_SOURCE="" |
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 6f30484f34b7..39e58d1cdf90 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h | |||
@@ -8,6 +8,13 @@ | |||
8 | 8 | ||
9 | /******** no-legacy-syscalls-ABI *******/ | 9 | /******** no-legacy-syscalls-ABI *******/ |
10 | 10 | ||
11 | /* | ||
12 | * Non-typical guard macro to enable inclusion twice in ARCH sys.c | ||
13 | * That is how the Generic syscall wrapper generator works | ||
14 | */ | ||
15 | #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) | ||
16 | #define _UAPI_ASM_ARC_UNISTD_H | ||
17 | |||
11 | #define __ARCH_WANT_SYS_EXECVE | 18 | #define __ARCH_WANT_SYS_EXECVE |
12 | #define __ARCH_WANT_SYS_CLONE | 19 | #define __ARCH_WANT_SYS_CLONE |
13 | #define __ARCH_WANT_SYS_VFORK | 20 | #define __ARCH_WANT_SYS_VFORK |
@@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) | |||
32 | /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ | 39 | /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ |
33 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) | 40 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) |
34 | __SYSCALL(__NR_sysfs, sys_sysfs) | 41 | __SYSCALL(__NR_sysfs, sys_sysfs) |
42 | |||
43 | #undef __SYSCALL | ||
44 | |||
45 | #endif | ||
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index e46d81f70979..63177e4cb66d 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
@@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config) | |||
79 | cache_result = (config >> 16) & 0xff; | 79 | cache_result = (config >> 16) & 0xff; |
80 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | 80 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
81 | return -EINVAL; | 81 | return -EINVAL; |
82 | if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX) | 82 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
83 | return -EINVAL; | 83 | return -EINVAL; |
84 | if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 84 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
85 | return -EINVAL; | 85 | return -EINVAL; |
86 | 86 | ||
87 | ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; | 87 | ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; |
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index e99dfaf70052..03fcbf0a88a8 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts | |||
@@ -7,11 +7,11 @@ | |||
7 | */ | 7 | */ |
8 | /dts-v1/; | 8 | /dts-v1/; |
9 | 9 | ||
10 | #include "omap34xx.dtsi" | 10 | #include "am3517.dtsi" |
11 | 11 | ||
12 | / { | 12 | / { |
13 | model = "TI AM3517 EVM (AM3517/05)"; | 13 | model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)"; |
14 | compatible = "ti,am3517-evm", "ti,omap3"; | 14 | compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3"; |
15 | 15 | ||
16 | memory { | 16 | memory { |
17 | device_type = "memory"; | 17 | device_type = "memory"; |
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi new file mode 100644 index 000000000000..2fbe02faa8b1 --- /dev/null +++ b/arch/arm/boot/dts/am3517.dtsi | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Device Tree Source for am3517 SoC | ||
3 | * | ||
4 | * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public License | ||
7 | * version 2. This program is licensed "as is" without any warranty of any | ||
8 | * kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include "omap3.dtsi" | ||
12 | |||
13 | / { | ||
14 | aliases { | ||
15 | serial3 = &uart4; | ||
16 | }; | ||
17 | |||
18 | ocp { | ||
19 | am35x_otg_hs: am35x_otg_hs@5c040000 { | ||
20 | compatible = "ti,omap3-musb"; | ||
21 | ti,hwmods = "am35x_otg_hs"; | ||
22 | status = "disabled"; | ||
23 | reg = <0x5c040000 0x1000>; | ||
24 | interrupts = <71>; | ||
25 | interrupt-names = "mc"; | ||
26 | }; | ||
27 | |||
28 | davinci_emac: ethernet@0x5c000000 { | ||
29 | compatible = "ti,am3517-emac"; | ||
30 | ti,hwmods = "davinci_emac"; | ||
31 | status = "disabled"; | ||
32 | reg = <0x5c000000 0x30000>; | ||
33 | interrupts = <67 68 69 70>; | ||
34 | ti,davinci-ctrl-reg-offset = <0x10000>; | ||
35 | ti,davinci-ctrl-mod-reg-offset = <0>; | ||
36 | ti,davinci-ctrl-ram-offset = <0x20000>; | ||
37 | ti,davinci-ctrl-ram-size = <0x2000>; | ||
38 | ti,davinci-rmii-en = /bits/ 8 <1>; | ||
39 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
40 | }; | ||
41 | |||
42 | davinci_mdio: ethernet@0x5c030000 { | ||
43 | compatible = "ti,davinci_mdio"; | ||
44 | ti,hwmods = "davinci_mdio"; | ||
45 | status = "disabled"; | ||
46 | reg = <0x5c030000 0x1000>; | ||
47 | bus_freq = <1000000>; | ||
48 | #address-cells = <1>; | ||
49 | #size-cells = <0>; | ||
50 | }; | ||
51 | |||
52 | uart4: serial@4809e000 { | ||
53 | compatible = "ti,omap3-uart"; | ||
54 | ti,hwmods = "uart4"; | ||
55 | status = "disabled"; | ||
56 | reg = <0x4809e000 0x400>; | ||
57 | interrupts = <84>; | ||
58 | dmas = <&sdma 55 &sdma 54>; | ||
59 | dma-names = "tx", "rx"; | ||
60 | clock-frequency = <48000000>; | ||
61 | }; | ||
62 | }; | ||
63 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9db5047812f3..177becde7a26 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -559,7 +559,7 @@ | |||
559 | compatible = "arm,pl330", "arm,primecell"; | 559 | compatible = "arm,pl330", "arm,primecell"; |
560 | reg = <0x10800000 0x1000>; | 560 | reg = <0x10800000 0x1000>; |
561 | interrupts = <0 33 0>; | 561 | interrupts = <0 33 0>; |
562 | clocks = <&clock 271>; | 562 | clocks = <&clock 346>; |
563 | clock-names = "apb_pclk"; | 563 | clock-names = "apb_pclk"; |
564 | #dma-cells = <1>; | 564 | #dma-cells = <1>; |
565 | #dma-channels = <8>; | 565 | #dma-channels = <8>; |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index c2c306d13b87..6fc85f963530 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | /dts-v1/; | 10 | /dts-v1/; |
11 | 11 | ||
12 | #include "omap34xx.dtsi" | 12 | #include "omap34xx-hs.dtsi" |
13 | 13 | ||
14 | / { | 14 | / { |
15 | model = "Nokia N900"; | 15 | model = "Nokia N900"; |
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 94eb77d3b9dd..5c26c184f2c1 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include "omap36xx.dtsi" | 11 | #include "omap36xx-hs.dtsi" |
12 | 12 | ||
13 | / { | 13 | / { |
14 | cpus { | 14 | cpus { |
diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi new file mode 100644 index 000000000000..1ff626489546 --- /dev/null +++ b/arch/arm/boot/dts/omap34xx-hs.dtsi | |||
@@ -0,0 +1,16 @@ | |||
1 | /* Disabled modules for secure omaps */ | ||
2 | |||
3 | #include "omap34xx.dtsi" | ||
4 | |||
5 | /* Secure omaps have some devices inaccessible depending on the firmware */ | ||
6 | &aes { | ||
7 | status = "disabled"; | ||
8 | }; | ||
9 | |||
10 | &sham { | ||
11 | status = "disabled"; | ||
12 | }; | ||
13 | |||
14 | &timer12 { | ||
15 | status = "disabled"; | ||
16 | }; | ||
diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi new file mode 100644 index 000000000000..2c7febb0e016 --- /dev/null +++ b/arch/arm/boot/dts/omap36xx-hs.dtsi | |||
@@ -0,0 +1,16 @@ | |||
1 | /* Disabled modules for secure omaps */ | ||
2 | |||
3 | #include "omap36xx.dtsi" | ||
4 | |||
5 | /* Secure omaps have some devices inaccessible depending on the firmware */ | ||
6 | &aes { | ||
7 | status = "disabled"; | ||
8 | }; | ||
9 | |||
10 | &sham { | ||
11 | status = "disabled"; | ||
12 | }; | ||
13 | |||
14 | &timer12 { | ||
15 | status = "disabled"; | ||
16 | }; | ||
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fad939b..9987dd0e9c59 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
@@ -87,9 +87,9 @@ | |||
87 | interrupts = <1 9 0xf04>; | 87 | interrupts = <1 9 0xf04>; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | gpio0: gpio@ffc40000 { | 90 | gpio0: gpio@e6050000 { |
91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
92 | reg = <0 0xffc40000 0 0x2c>; | 92 | reg = <0 0xe6050000 0 0x50>; |
93 | interrupt-parent = <&gic>; | 93 | interrupt-parent = <&gic>; |
94 | interrupts = <0 4 0x4>; | 94 | interrupts = <0 4 0x4>; |
95 | #gpio-cells = <2>; | 95 | #gpio-cells = <2>; |
@@ -99,9 +99,9 @@ | |||
99 | interrupt-controller; | 99 | interrupt-controller; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | gpio1: gpio@ffc41000 { | 102 | gpio1: gpio@e6051000 { |
103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
104 | reg = <0 0xffc41000 0 0x2c>; | 104 | reg = <0 0xe6051000 0 0x50>; |
105 | interrupt-parent = <&gic>; | 105 | interrupt-parent = <&gic>; |
106 | interrupts = <0 5 0x4>; | 106 | interrupts = <0 5 0x4>; |
107 | #gpio-cells = <2>; | 107 | #gpio-cells = <2>; |
@@ -111,9 +111,9 @@ | |||
111 | interrupt-controller; | 111 | interrupt-controller; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | gpio2: gpio@ffc42000 { | 114 | gpio2: gpio@e6052000 { |
115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
116 | reg = <0 0xffc42000 0 0x2c>; | 116 | reg = <0 0xe6052000 0 0x50>; |
117 | interrupt-parent = <&gic>; | 117 | interrupt-parent = <&gic>; |
118 | interrupts = <0 6 0x4>; | 118 | interrupts = <0 6 0x4>; |
119 | #gpio-cells = <2>; | 119 | #gpio-cells = <2>; |
@@ -123,9 +123,9 @@ | |||
123 | interrupt-controller; | 123 | interrupt-controller; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | gpio3: gpio@ffc43000 { | 126 | gpio3: gpio@e6053000 { |
127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
128 | reg = <0 0xffc43000 0 0x2c>; | 128 | reg = <0 0xe6053000 0 0x50>; |
129 | interrupt-parent = <&gic>; | 129 | interrupt-parent = <&gic>; |
130 | interrupts = <0 7 0x4>; | 130 | interrupts = <0 7 0x4>; |
131 | #gpio-cells = <2>; | 131 | #gpio-cells = <2>; |
@@ -135,9 +135,9 @@ | |||
135 | interrupt-controller; | 135 | interrupt-controller; |
136 | }; | 136 | }; |
137 | 137 | ||
138 | gpio4: gpio@ffc44000 { | 138 | gpio4: gpio@e6054000 { |
139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
140 | reg = <0 0xffc44000 0 0x2c>; | 140 | reg = <0 0xe6054000 0 0x50>; |
141 | interrupt-parent = <&gic>; | 141 | interrupt-parent = <&gic>; |
142 | interrupts = <0 8 0x4>; | 142 | interrupts = <0 8 0x4>; |
143 | #gpio-cells = <2>; | 143 | #gpio-cells = <2>; |
@@ -147,9 +147,9 @@ | |||
147 | interrupt-controller; | 147 | interrupt-controller; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | gpio5: gpio@ffc45000 { | 150 | gpio5: gpio@e6055000 { |
151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
152 | reg = <0 0xffc45000 0 0x2c>; | 152 | reg = <0 0xe6055000 0 0x50>; |
153 | interrupt-parent = <&gic>; | 153 | interrupt-parent = <&gic>; |
154 | interrupts = <0 9 0x4>; | 154 | interrupts = <0 9 0x4>; |
155 | #gpio-cells = <2>; | 155 | #gpio-cells = <2>; |
@@ -241,7 +241,7 @@ | |||
241 | 241 | ||
242 | sdhi0: sdhi@ee100000 { | 242 | sdhi0: sdhi@ee100000 { |
243 | compatible = "renesas,sdhi-r8a7790"; | 243 | compatible = "renesas,sdhi-r8a7790"; |
244 | reg = <0 0xee100000 0 0x100>; | 244 | reg = <0 0xee100000 0 0x200>; |
245 | interrupt-parent = <&gic>; | 245 | interrupt-parent = <&gic>; |
246 | interrupts = <0 165 4>; | 246 | interrupts = <0 165 4>; |
247 | cap-sd-highspeed; | 247 | cap-sd-highspeed; |
@@ -250,7 +250,7 @@ | |||
250 | 250 | ||
251 | sdhi1: sdhi@ee120000 { | 251 | sdhi1: sdhi@ee120000 { |
252 | compatible = "renesas,sdhi-r8a7790"; | 252 | compatible = "renesas,sdhi-r8a7790"; |
253 | reg = <0 0xee120000 0 0x100>; | 253 | reg = <0 0xee120000 0 0x200>; |
254 | interrupt-parent = <&gic>; | 254 | interrupt-parent = <&gic>; |
255 | interrupts = <0 166 4>; | 255 | interrupts = <0 166 4>; |
256 | cap-sd-highspeed; | 256 | cap-sd-highspeed; |
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index c1751a64889a..7f5878c2784a 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
@@ -193,7 +193,10 @@ | |||
193 | pio: pinctrl@01c20800 { | 193 | pio: pinctrl@01c20800 { |
194 | compatible = "allwinner,sun6i-a31-pinctrl"; | 194 | compatible = "allwinner,sun6i-a31-pinctrl"; |
195 | reg = <0x01c20800 0x400>; | 195 | reg = <0x01c20800 0x400>; |
196 | interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>; | 196 | interrupts = <0 11 4>, |
197 | <0 15 4>, | ||
198 | <0 16 4>, | ||
199 | <0 17 4>; | ||
197 | clocks = <&apb1_gates 5>; | 200 | clocks = <&apb1_gates 5>; |
198 | gpio-controller; | 201 | gpio-controller; |
199 | interrupt-controller; | 202 | interrupt-controller; |
@@ -212,11 +215,11 @@ | |||
212 | timer@01c20c00 { | 215 | timer@01c20c00 { |
213 | compatible = "allwinner,sun4i-timer"; | 216 | compatible = "allwinner,sun4i-timer"; |
214 | reg = <0x01c20c00 0xa0>; | 217 | reg = <0x01c20c00 0xa0>; |
215 | interrupts = <0 18 1>, | 218 | interrupts = <0 18 4>, |
216 | <0 19 1>, | 219 | <0 19 4>, |
217 | <0 20 1>, | 220 | <0 20 4>, |
218 | <0 21 1>, | 221 | <0 21 4>, |
219 | <0 22 1>; | 222 | <0 22 4>; |
220 | clocks = <&osc24M>; | 223 | clocks = <&osc24M>; |
221 | }; | 224 | }; |
222 | 225 | ||
@@ -228,7 +231,7 @@ | |||
228 | uart0: serial@01c28000 { | 231 | uart0: serial@01c28000 { |
229 | compatible = "snps,dw-apb-uart"; | 232 | compatible = "snps,dw-apb-uart"; |
230 | reg = <0x01c28000 0x400>; | 233 | reg = <0x01c28000 0x400>; |
231 | interrupts = <0 0 1>; | 234 | interrupts = <0 0 4>; |
232 | reg-shift = <2>; | 235 | reg-shift = <2>; |
233 | reg-io-width = <4>; | 236 | reg-io-width = <4>; |
234 | clocks = <&apb2_gates 16>; | 237 | clocks = <&apb2_gates 16>; |
@@ -238,7 +241,7 @@ | |||
238 | uart1: serial@01c28400 { | 241 | uart1: serial@01c28400 { |
239 | compatible = "snps,dw-apb-uart"; | 242 | compatible = "snps,dw-apb-uart"; |
240 | reg = <0x01c28400 0x400>; | 243 | reg = <0x01c28400 0x400>; |
241 | interrupts = <0 1 1>; | 244 | interrupts = <0 1 4>; |
242 | reg-shift = <2>; | 245 | reg-shift = <2>; |
243 | reg-io-width = <4>; | 246 | reg-io-width = <4>; |
244 | clocks = <&apb2_gates 17>; | 247 | clocks = <&apb2_gates 17>; |
@@ -248,7 +251,7 @@ | |||
248 | uart2: serial@01c28800 { | 251 | uart2: serial@01c28800 { |
249 | compatible = "snps,dw-apb-uart"; | 252 | compatible = "snps,dw-apb-uart"; |
250 | reg = <0x01c28800 0x400>; | 253 | reg = <0x01c28800 0x400>; |
251 | interrupts = <0 2 1>; | 254 | interrupts = <0 2 4>; |
252 | reg-shift = <2>; | 255 | reg-shift = <2>; |
253 | reg-io-width = <4>; | 256 | reg-io-width = <4>; |
254 | clocks = <&apb2_gates 18>; | 257 | clocks = <&apb2_gates 18>; |
@@ -258,7 +261,7 @@ | |||
258 | uart3: serial@01c28c00 { | 261 | uart3: serial@01c28c00 { |
259 | compatible = "snps,dw-apb-uart"; | 262 | compatible = "snps,dw-apb-uart"; |
260 | reg = <0x01c28c00 0x400>; | 263 | reg = <0x01c28c00 0x400>; |
261 | interrupts = <0 3 1>; | 264 | interrupts = <0 3 4>; |
262 | reg-shift = <2>; | 265 | reg-shift = <2>; |
263 | reg-io-width = <4>; | 266 | reg-io-width = <4>; |
264 | clocks = <&apb2_gates 19>; | 267 | clocks = <&apb2_gates 19>; |
@@ -268,7 +271,7 @@ | |||
268 | uart4: serial@01c29000 { | 271 | uart4: serial@01c29000 { |
269 | compatible = "snps,dw-apb-uart"; | 272 | compatible = "snps,dw-apb-uart"; |
270 | reg = <0x01c29000 0x400>; | 273 | reg = <0x01c29000 0x400>; |
271 | interrupts = <0 4 1>; | 274 | interrupts = <0 4 4>; |
272 | reg-shift = <2>; | 275 | reg-shift = <2>; |
273 | reg-io-width = <4>; | 276 | reg-io-width = <4>; |
274 | clocks = <&apb2_gates 20>; | 277 | clocks = <&apb2_gates 20>; |
@@ -278,7 +281,7 @@ | |||
278 | uart5: serial@01c29400 { | 281 | uart5: serial@01c29400 { |
279 | compatible = "snps,dw-apb-uart"; | 282 | compatible = "snps,dw-apb-uart"; |
280 | reg = <0x01c29400 0x400>; | 283 | reg = <0x01c29400 0x400>; |
281 | interrupts = <0 5 1>; | 284 | interrupts = <0 5 4>; |
282 | reg-shift = <2>; | 285 | reg-shift = <2>; |
283 | reg-io-width = <4>; | 286 | reg-io-width = <4>; |
284 | clocks = <&apb2_gates 21>; | 287 | clocks = <&apb2_gates 21>; |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e46cfedde74c..367611a0730b 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -170,7 +170,7 @@ | |||
170 | emac: ethernet@01c0b000 { | 170 | emac: ethernet@01c0b000 { |
171 | compatible = "allwinner,sun4i-emac"; | 171 | compatible = "allwinner,sun4i-emac"; |
172 | reg = <0x01c0b000 0x1000>; | 172 | reg = <0x01c0b000 0x1000>; |
173 | interrupts = <0 55 1>; | 173 | interrupts = <0 55 4>; |
174 | clocks = <&ahb_gates 17>; | 174 | clocks = <&ahb_gates 17>; |
175 | status = "disabled"; | 175 | status = "disabled"; |
176 | }; | 176 | }; |
@@ -186,7 +186,7 @@ | |||
186 | pio: pinctrl@01c20800 { | 186 | pio: pinctrl@01c20800 { |
187 | compatible = "allwinner,sun7i-a20-pinctrl"; | 187 | compatible = "allwinner,sun7i-a20-pinctrl"; |
188 | reg = <0x01c20800 0x400>; | 188 | reg = <0x01c20800 0x400>; |
189 | interrupts = <0 28 1>; | 189 | interrupts = <0 28 4>; |
190 | clocks = <&apb0_gates 5>; | 190 | clocks = <&apb0_gates 5>; |
191 | gpio-controller; | 191 | gpio-controller; |
192 | interrupt-controller; | 192 | interrupt-controller; |
@@ -251,12 +251,12 @@ | |||
251 | timer@01c20c00 { | 251 | timer@01c20c00 { |
252 | compatible = "allwinner,sun4i-timer"; | 252 | compatible = "allwinner,sun4i-timer"; |
253 | reg = <0x01c20c00 0x90>; | 253 | reg = <0x01c20c00 0x90>; |
254 | interrupts = <0 22 1>, | 254 | interrupts = <0 22 4>, |
255 | <0 23 1>, | 255 | <0 23 4>, |
256 | <0 24 1>, | 256 | <0 24 4>, |
257 | <0 25 1>, | 257 | <0 25 4>, |
258 | <0 67 1>, | 258 | <0 67 4>, |
259 | <0 68 1>; | 259 | <0 68 4>; |
260 | clocks = <&osc24M>; | 260 | clocks = <&osc24M>; |
261 | }; | 261 | }; |
262 | 262 | ||
@@ -273,7 +273,7 @@ | |||
273 | uart0: serial@01c28000 { | 273 | uart0: serial@01c28000 { |
274 | compatible = "snps,dw-apb-uart"; | 274 | compatible = "snps,dw-apb-uart"; |
275 | reg = <0x01c28000 0x400>; | 275 | reg = <0x01c28000 0x400>; |
276 | interrupts = <0 1 1>; | 276 | interrupts = <0 1 4>; |
277 | reg-shift = <2>; | 277 | reg-shift = <2>; |
278 | reg-io-width = <4>; | 278 | reg-io-width = <4>; |
279 | clocks = <&apb1_gates 16>; | 279 | clocks = <&apb1_gates 16>; |
@@ -283,7 +283,7 @@ | |||
283 | uart1: serial@01c28400 { | 283 | uart1: serial@01c28400 { |
284 | compatible = "snps,dw-apb-uart"; | 284 | compatible = "snps,dw-apb-uart"; |
285 | reg = <0x01c28400 0x400>; | 285 | reg = <0x01c28400 0x400>; |
286 | interrupts = <0 2 1>; | 286 | interrupts = <0 2 4>; |
287 | reg-shift = <2>; | 287 | reg-shift = <2>; |
288 | reg-io-width = <4>; | 288 | reg-io-width = <4>; |
289 | clocks = <&apb1_gates 17>; | 289 | clocks = <&apb1_gates 17>; |
@@ -293,7 +293,7 @@ | |||
293 | uart2: serial@01c28800 { | 293 | uart2: serial@01c28800 { |
294 | compatible = "snps,dw-apb-uart"; | 294 | compatible = "snps,dw-apb-uart"; |
295 | reg = <0x01c28800 0x400>; | 295 | reg = <0x01c28800 0x400>; |
296 | interrupts = <0 3 1>; | 296 | interrupts = <0 3 4>; |
297 | reg-shift = <2>; | 297 | reg-shift = <2>; |
298 | reg-io-width = <4>; | 298 | reg-io-width = <4>; |
299 | clocks = <&apb1_gates 18>; | 299 | clocks = <&apb1_gates 18>; |
@@ -303,7 +303,7 @@ | |||
303 | uart3: serial@01c28c00 { | 303 | uart3: serial@01c28c00 { |
304 | compatible = "snps,dw-apb-uart"; | 304 | compatible = "snps,dw-apb-uart"; |
305 | reg = <0x01c28c00 0x400>; | 305 | reg = <0x01c28c00 0x400>; |
306 | interrupts = <0 4 1>; | 306 | interrupts = <0 4 4>; |
307 | reg-shift = <2>; | 307 | reg-shift = <2>; |
308 | reg-io-width = <4>; | 308 | reg-io-width = <4>; |
309 | clocks = <&apb1_gates 19>; | 309 | clocks = <&apb1_gates 19>; |
@@ -313,7 +313,7 @@ | |||
313 | uart4: serial@01c29000 { | 313 | uart4: serial@01c29000 { |
314 | compatible = "snps,dw-apb-uart"; | 314 | compatible = "snps,dw-apb-uart"; |
315 | reg = <0x01c29000 0x400>; | 315 | reg = <0x01c29000 0x400>; |
316 | interrupts = <0 17 1>; | 316 | interrupts = <0 17 4>; |
317 | reg-shift = <2>; | 317 | reg-shift = <2>; |
318 | reg-io-width = <4>; | 318 | reg-io-width = <4>; |
319 | clocks = <&apb1_gates 20>; | 319 | clocks = <&apb1_gates 20>; |
@@ -323,7 +323,7 @@ | |||
323 | uart5: serial@01c29400 { | 323 | uart5: serial@01c29400 { |
324 | compatible = "snps,dw-apb-uart"; | 324 | compatible = "snps,dw-apb-uart"; |
325 | reg = <0x01c29400 0x400>; | 325 | reg = <0x01c29400 0x400>; |
326 | interrupts = <0 18 1>; | 326 | interrupts = <0 18 4>; |
327 | reg-shift = <2>; | 327 | reg-shift = <2>; |
328 | reg-io-width = <4>; | 328 | reg-io-width = <4>; |
329 | clocks = <&apb1_gates 21>; | 329 | clocks = <&apb1_gates 21>; |
@@ -333,7 +333,7 @@ | |||
333 | uart6: serial@01c29800 { | 333 | uart6: serial@01c29800 { |
334 | compatible = "snps,dw-apb-uart"; | 334 | compatible = "snps,dw-apb-uart"; |
335 | reg = <0x01c29800 0x400>; | 335 | reg = <0x01c29800 0x400>; |
336 | interrupts = <0 19 1>; | 336 | interrupts = <0 19 4>; |
337 | reg-shift = <2>; | 337 | reg-shift = <2>; |
338 | reg-io-width = <4>; | 338 | reg-io-width = <4>; |
339 | clocks = <&apb1_gates 22>; | 339 | clocks = <&apb1_gates 22>; |
@@ -343,7 +343,7 @@ | |||
343 | uart7: serial@01c29c00 { | 343 | uart7: serial@01c29c00 { |
344 | compatible = "snps,dw-apb-uart"; | 344 | compatible = "snps,dw-apb-uart"; |
345 | reg = <0x01c29c00 0x400>; | 345 | reg = <0x01c29c00 0x400>; |
346 | interrupts = <0 20 1>; | 346 | interrupts = <0 20 4>; |
347 | reg-shift = <2>; | 347 | reg-shift = <2>; |
348 | reg-io-width = <4>; | 348 | reg-io-width = <4>; |
349 | clocks = <&apb1_gates 23>; | 349 | clocks = <&apb1_gates 23>; |
@@ -353,7 +353,7 @@ | |||
353 | i2c0: i2c@01c2ac00 { | 353 | i2c0: i2c@01c2ac00 { |
354 | compatible = "allwinner,sun4i-i2c"; | 354 | compatible = "allwinner,sun4i-i2c"; |
355 | reg = <0x01c2ac00 0x400>; | 355 | reg = <0x01c2ac00 0x400>; |
356 | interrupts = <0 7 1>; | 356 | interrupts = <0 7 4>; |
357 | clocks = <&apb1_gates 0>; | 357 | clocks = <&apb1_gates 0>; |
358 | clock-frequency = <100000>; | 358 | clock-frequency = <100000>; |
359 | status = "disabled"; | 359 | status = "disabled"; |
@@ -362,7 +362,7 @@ | |||
362 | i2c1: i2c@01c2b000 { | 362 | i2c1: i2c@01c2b000 { |
363 | compatible = "allwinner,sun4i-i2c"; | 363 | compatible = "allwinner,sun4i-i2c"; |
364 | reg = <0x01c2b000 0x400>; | 364 | reg = <0x01c2b000 0x400>; |
365 | interrupts = <0 8 1>; | 365 | interrupts = <0 8 4>; |
366 | clocks = <&apb1_gates 1>; | 366 | clocks = <&apb1_gates 1>; |
367 | clock-frequency = <100000>; | 367 | clock-frequency = <100000>; |
368 | status = "disabled"; | 368 | status = "disabled"; |
@@ -371,7 +371,7 @@ | |||
371 | i2c2: i2c@01c2b400 { | 371 | i2c2: i2c@01c2b400 { |
372 | compatible = "allwinner,sun4i-i2c"; | 372 | compatible = "allwinner,sun4i-i2c"; |
373 | reg = <0x01c2b400 0x400>; | 373 | reg = <0x01c2b400 0x400>; |
374 | interrupts = <0 9 1>; | 374 | interrupts = <0 9 4>; |
375 | clocks = <&apb1_gates 2>; | 375 | clocks = <&apb1_gates 2>; |
376 | clock-frequency = <100000>; | 376 | clock-frequency = <100000>; |
377 | status = "disabled"; | 377 | status = "disabled"; |
@@ -380,7 +380,7 @@ | |||
380 | i2c3: i2c@01c2b800 { | 380 | i2c3: i2c@01c2b800 { |
381 | compatible = "allwinner,sun4i-i2c"; | 381 | compatible = "allwinner,sun4i-i2c"; |
382 | reg = <0x01c2b800 0x400>; | 382 | reg = <0x01c2b800 0x400>; |
383 | interrupts = <0 88 1>; | 383 | interrupts = <0 88 4>; |
384 | clocks = <&apb1_gates 3>; | 384 | clocks = <&apb1_gates 3>; |
385 | clock-frequency = <100000>; | 385 | clock-frequency = <100000>; |
386 | status = "disabled"; | 386 | status = "disabled"; |
@@ -389,7 +389,7 @@ | |||
389 | i2c4: i2c@01c2bc00 { | 389 | i2c4: i2c@01c2bc00 { |
390 | compatible = "allwinner,sun4i-i2c"; | 390 | compatible = "allwinner,sun4i-i2c"; |
391 | reg = <0x01c2bc00 0x400>; | 391 | reg = <0x01c2bc00 0x400>; |
392 | interrupts = <0 89 1>; | 392 | interrupts = <0 89 4>; |
393 | clocks = <&apb1_gates 15>; | 393 | clocks = <&apb1_gates 15>; |
394 | clock-frequency = <100000>; | 394 | clock-frequency = <100000>; |
395 | status = "disabled"; | 395 | status = "disabled"; |
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 64205d453260..71e5fc7cfb18 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped | |||
@@ -58,7 +58,7 @@ | |||
58 | # define VFP_ABI_FRAME 0 | 58 | # define VFP_ABI_FRAME 0 |
59 | # define BSAES_ASM_EXTENDED_KEY | 59 | # define BSAES_ASM_EXTENDED_KEY |
60 | # define XTS_CHAIN_TWEAK | 60 | # define XTS_CHAIN_TWEAK |
61 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | 61 | # define __ARM_ARCH__ 7 |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef __thumb__ | 64 | #ifdef __thumb__ |
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index f3d96d932573..be068db960ee 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl | |||
@@ -701,7 +701,7 @@ $code.=<<___; | |||
701 | # define VFP_ABI_FRAME 0 | 701 | # define VFP_ABI_FRAME 0 |
702 | # define BSAES_ASM_EXTENDED_KEY | 702 | # define BSAES_ASM_EXTENDED_KEY |
703 | # define XTS_CHAIN_TWEAK | 703 | # define XTS_CHAIN_TWEAK |
704 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | 704 | # define __ARM_ARCH__ 7 |
705 | #endif | 705 | #endif |
706 | 706 | ||
707 | #ifdef __thumb__ | 707 | #ifdef __thumb__ |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3c597c222ef2..fbeb39c869e9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); | |||
329 | */ | 329 | */ |
330 | #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) | 330 | #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) |
331 | #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) | 331 | #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) |
332 | #define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) | 332 | #define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) |
333 | #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) | 333 | #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) |
334 | #define iounmap __arm_iounmap | 334 | #define iounmap __arm_iounmap |
335 | 335 | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 9ecccc865046..8756e4bcdba0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -100,23 +100,19 @@ | |||
100 | #define TASK_UNMAPPED_BASE UL(0x00000000) | 100 | #define TASK_UNMAPPED_BASE UL(0x00000000) |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | #ifndef PHYS_OFFSET | ||
104 | #define PHYS_OFFSET UL(CONFIG_DRAM_BASE) | ||
105 | #endif | ||
106 | |||
107 | #ifndef END_MEM | 103 | #ifndef END_MEM |
108 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) | 104 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) |
109 | #endif | 105 | #endif |
110 | 106 | ||
111 | #ifndef PAGE_OFFSET | 107 | #ifndef PAGE_OFFSET |
112 | #define PAGE_OFFSET (PHYS_OFFSET) | 108 | #define PAGE_OFFSET PLAT_PHYS_OFFSET |
113 | #endif | 109 | #endif |
114 | 110 | ||
115 | /* | 111 | /* |
116 | * The module can be at any place in ram in nommu mode. | 112 | * The module can be at any place in ram in nommu mode. |
117 | */ | 113 | */ |
118 | #define MODULES_END (END_MEM) | 114 | #define MODULES_END (END_MEM) |
119 | #define MODULES_VADDR (PHYS_OFFSET) | 115 | #define MODULES_VADDR PAGE_OFFSET |
120 | 116 | ||
121 | #define XIP_VIRT_ADDR(physaddr) (physaddr) | 117 | #define XIP_VIRT_ADDR(physaddr) (physaddr) |
122 | 118 | ||
@@ -157,6 +153,16 @@ | |||
157 | #endif | 153 | #endif |
158 | #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) | 154 | #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) |
159 | 155 | ||
156 | /* | ||
157 | * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical | ||
158 | * memory. This is used for XIP and NoMMU kernels, or by kernels which | ||
159 | * have their own mach/memory.h. Assembly code must always use | ||
160 | * PLAT_PHYS_OFFSET and not PHYS_OFFSET. | ||
161 | */ | ||
162 | #ifndef PLAT_PHYS_OFFSET | ||
163 | #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) | ||
164 | #endif | ||
165 | |||
160 | #ifndef __ASSEMBLY__ | 166 | #ifndef __ASSEMBLY__ |
161 | 167 | ||
162 | /* | 168 | /* |
@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) | |||
239 | 245 | ||
240 | #else | 246 | #else |
241 | 247 | ||
248 | #define PHYS_OFFSET PLAT_PHYS_OFFSET | ||
249 | |||
242 | static inline phys_addr_t __virt_to_phys(unsigned long x) | 250 | static inline phys_addr_t __virt_to_phys(unsigned long x) |
243 | { | 251 | { |
244 | return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; | 252 | return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; |
@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) | |||
251 | 259 | ||
252 | #endif | 260 | #endif |
253 | #endif | 261 | #endif |
254 | #endif /* __ASSEMBLY__ */ | ||
255 | |||
256 | #ifndef PHYS_OFFSET | ||
257 | #ifdef PLAT_PHYS_OFFSET | ||
258 | #define PHYS_OFFSET PLAT_PHYS_OFFSET | ||
259 | #else | ||
260 | #define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) | ||
261 | #endif | ||
262 | #endif | ||
263 | |||
264 | #ifndef __ASSEMBLY__ | ||
265 | 262 | ||
266 | /* | 263 | /* |
267 | * PFNs are used to describe any physical page; this means | 264 | * PFNs are used to describe any physical page; this means |
@@ -350,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
350 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET | 347 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET |
351 | 348 | ||
352 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 349 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
353 | #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) | 350 | #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ |
351 | && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) | ||
354 | 352 | ||
355 | #endif | 353 | #endif |
356 | 354 | ||
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 75579a9d6f76..3759cacdd7f8 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
117 | return __set_phys_to_machine(pfn, mfn); | 117 | return __set_phys_to_machine(pfn, mfn); |
118 | } | 118 | } |
119 | 119 | ||
120 | #define xen_remap(cookie, size) ioremap_cached((cookie), (size)); | 120 | #define xen_remap(cookie, size) ioremap_cache((cookie), (size)); |
121 | 121 | ||
122 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 122 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 14235ba64a90..716249cc2ee1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -68,7 +68,7 @@ ENTRY(stext) | |||
68 | 68 | ||
69 | #ifdef CONFIG_ARM_MPU | 69 | #ifdef CONFIG_ARM_MPU |
70 | /* Calculate the size of a region covering just the kernel */ | 70 | /* Calculate the size of a region covering just the kernel */ |
71 | ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET | 71 | ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET |
72 | ldr r6, =(_end) @ Cover whole kernel | 72 | ldr r6, =(_end) @ Cover whole kernel |
73 | sub r6, r6, r5 @ Minimum size of region to map | 73 | sub r6, r6, r5 @ Minimum size of region to map |
74 | clz r6, r6 @ Region size must be 2^N... | 74 | clz r6, r6 @ Region size must be 2^N... |
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu) | |||
213 | set_region_nr r0, #MPU_RAM_REGION | 213 | set_region_nr r0, #MPU_RAM_REGION |
214 | isb | 214 | isb |
215 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ | 215 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ |
216 | ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET | 216 | ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET |
217 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) | 217 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) |
218 | 218 | ||
219 | setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled | 219 | setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 11d59b32fb8d..32f317e5828a 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -110,7 +110,7 @@ ENTRY(stext) | |||
110 | sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) | 110 | sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) |
111 | add r8, r8, r4 @ PHYS_OFFSET | 111 | add r8, r8, r4 @ PHYS_OFFSET |
112 | #else | 112 | #else |
113 | ldr r8, =PHYS_OFFSET @ always constant in this case | 113 | ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case |
114 | #endif | 114 | #endif |
115 | 115 | ||
116 | /* | 116 | /* |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94f6b05f9e24..92f7b15dd221 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu); | |||
404 | unsigned long get_wchan(struct task_struct *p) | 404 | unsigned long get_wchan(struct task_struct *p) |
405 | { | 405 | { |
406 | struct stackframe frame; | 406 | struct stackframe frame; |
407 | unsigned long stack_page; | ||
407 | int count = 0; | 408 | int count = 0; |
408 | if (!p || p == current || p->state == TASK_RUNNING) | 409 | if (!p || p == current || p->state == TASK_RUNNING) |
409 | return 0; | 410 | return 0; |
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p) | |||
412 | frame.sp = thread_saved_sp(p); | 413 | frame.sp = thread_saved_sp(p); |
413 | frame.lr = 0; /* recovered from the stack */ | 414 | frame.lr = 0; /* recovered from the stack */ |
414 | frame.pc = thread_saved_pc(p); | 415 | frame.pc = thread_saved_pc(p); |
416 | stack_page = (unsigned long)task_stack_page(p); | ||
415 | do { | 417 | do { |
416 | int ret = unwind_frame(&frame); | 418 | if (frame.sp < stack_page || |
417 | if (ret < 0) | 419 | frame.sp >= stack_page + THREAD_SIZE || |
420 | unwind_frame(&frame) < 0) | ||
418 | return 0; | 421 | return 0; |
419 | if (!in_sched_functions(frame.pc)) | 422 | if (!in_sched_functions(frame.pc)) |
420 | return frame.pc; | 423 | return frame.pc; |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6a1b8a81b1ae..987a7f5bce5f 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p) | |||
873 | machine_desc = mdesc; | 873 | machine_desc = mdesc; |
874 | machine_name = mdesc->name; | 874 | machine_name = mdesc->name; |
875 | 875 | ||
876 | setup_dma_zone(mdesc); | ||
877 | |||
878 | if (mdesc->reboot_mode != REBOOT_HARD) | 876 | if (mdesc->reboot_mode != REBOOT_HARD) |
879 | reboot_mode = mdesc->reboot_mode; | 877 | reboot_mode = mdesc->reboot_mode; |
880 | 878 | ||
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p) | |||
892 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | 890 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); |
893 | 891 | ||
894 | early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); | 892 | early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); |
893 | setup_dma_zone(mdesc); | ||
895 | sanity_check_meminfo(); | 894 | sanity_check_meminfo(); |
896 | arm_memblock_init(&meminfo, mdesc); | 895 | arm_memblock_init(&meminfo, mdesc); |
897 | 896 | ||
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 00f79e59985b..af4e8c8a5422 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c | |||
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame) | |||
31 | high = ALIGN(low, THREAD_SIZE); | 31 | high = ALIGN(low, THREAD_SIZE); |
32 | 32 | ||
33 | /* check current frame pointer is within bounds */ | 33 | /* check current frame pointer is within bounds */ |
34 | if (fp < (low + 12) || fp + 4 >= high) | 34 | if (fp < low + 12 || fp > high - 4) |
35 | return -EINVAL; | 35 | return -EINVAL; |
36 | 36 | ||
37 | /* restore the registers from the stack frame */ | 37 | /* restore the registers from the stack frame */ |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index dbf0923e8d76..6eda3bf85c52 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -36,7 +36,13 @@ | |||
36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
37 | #include <asm/opcodes.h> | 37 | #include <asm/opcodes.h> |
38 | 38 | ||
39 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 39 | static const char *handler[]= { |
40 | "prefetch abort", | ||
41 | "data abort", | ||
42 | "address exception", | ||
43 | "interrupt", | ||
44 | "undefined instruction", | ||
45 | }; | ||
40 | 46 | ||
41 | void *vectors_page; | 47 | void *vectors_page; |
42 | 48 | ||
@@ -509,9 +515,10 @@ static inline int | |||
509 | __do_cache_op(unsigned long start, unsigned long end) | 515 | __do_cache_op(unsigned long start, unsigned long end) |
510 | { | 516 | { |
511 | int ret; | 517 | int ret; |
512 | unsigned long chunk = PAGE_SIZE; | ||
513 | 518 | ||
514 | do { | 519 | do { |
520 | unsigned long chunk = min(PAGE_SIZE, end - start); | ||
521 | |||
515 | if (signal_pending(current)) { | 522 | if (signal_pending(current)) { |
516 | struct thread_info *ti = current_thread_info(); | 523 | struct thread_info *ti = current_thread_info(); |
517 | 524 | ||
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index c46eccbbd512..78829c513fdc 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -487,7 +487,7 @@ int __init da8xx_register_emac(void) | |||
487 | 487 | ||
488 | static struct resource da830_mcasp1_resources[] = { | 488 | static struct resource da830_mcasp1_resources[] = { |
489 | { | 489 | { |
490 | .name = "mcasp1", | 490 | .name = "mpu", |
491 | .start = DAVINCI_DA830_MCASP1_REG_BASE, | 491 | .start = DAVINCI_DA830_MCASP1_REG_BASE, |
492 | .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, | 492 | .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, |
493 | .flags = IORESOURCE_MEM, | 493 | .flags = IORESOURCE_MEM, |
@@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = { | |||
515 | 515 | ||
516 | static struct resource da850_mcasp_resources[] = { | 516 | static struct resource da850_mcasp_resources[] = { |
517 | { | 517 | { |
518 | .name = "mcasp", | 518 | .name = "mpu", |
519 | .start = DAVINCI_DA8XX_MCASP0_REG_BASE, | 519 | .start = DAVINCI_DA8XX_MCASP0_REG_BASE, |
520 | .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, | 520 | .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, |
521 | .flags = IORESOURCE_MEM, | 521 | .flags = IORESOURCE_MEM, |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index ef9ff1fb6f52..6117fc644188 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = { | |||
641 | 641 | ||
642 | static struct resource dm355_asp1_resources[] = { | 642 | static struct resource dm355_asp1_resources[] = { |
643 | { | 643 | { |
644 | .name = "mpu", | ||
644 | .start = DAVINCI_ASP1_BASE, | 645 | .start = DAVINCI_ASP1_BASE, |
645 | .end = DAVINCI_ASP1_BASE + SZ_8K - 1, | 646 | .end = DAVINCI_ASP1_BASE + SZ_8K - 1, |
646 | .flags = IORESOURCE_MEM, | 647 | .flags = IORESOURCE_MEM, |
@@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = { | |||
906 | int __init dm355_gpio_register(void) | 907 | int __init dm355_gpio_register(void) |
907 | { | 908 | { |
908 | return davinci_gpio_register(dm355_gpio_resources, | 909 | return davinci_gpio_register(dm355_gpio_resources, |
909 | sizeof(dm355_gpio_resources), | 910 | ARRAY_SIZE(dm355_gpio_resources), |
910 | &dm355_gpio_platform_data); | 911 | &dm355_gpio_platform_data); |
911 | } | 912 | } |
912 | /*----------------------------------------------------------------------*/ | 913 | /*----------------------------------------------------------------------*/ |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 1511a0680f9a..d7c6f85d3fc9 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = { | |||
720 | int __init dm365_gpio_register(void) | 720 | int __init dm365_gpio_register(void) |
721 | { | 721 | { |
722 | return davinci_gpio_register(dm365_gpio_resources, | 722 | return davinci_gpio_register(dm365_gpio_resources, |
723 | sizeof(dm365_gpio_resources), | 723 | ARRAY_SIZE(dm365_gpio_resources), |
724 | &dm365_gpio_platform_data); | 724 | &dm365_gpio_platform_data); |
725 | } | 725 | } |
726 | 726 | ||
@@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = { | |||
942 | 942 | ||
943 | static struct resource dm365_asp_resources[] = { | 943 | static struct resource dm365_asp_resources[] = { |
944 | { | 944 | { |
945 | .name = "mpu", | ||
945 | .start = DAVINCI_DM365_ASP0_BASE, | 946 | .start = DAVINCI_DM365_ASP0_BASE, |
946 | .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, | 947 | .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, |
947 | .flags = IORESOURCE_MEM, | 948 | .flags = IORESOURCE_MEM, |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 143a3217e8ef..3ce47997bb46 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = { | |||
572 | /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ | 572 | /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ |
573 | static struct resource dm644x_asp_resources[] = { | 573 | static struct resource dm644x_asp_resources[] = { |
574 | { | 574 | { |
575 | .name = "mpu", | ||
575 | .start = DAVINCI_ASP0_BASE, | 576 | .start = DAVINCI_ASP0_BASE, |
576 | .end = DAVINCI_ASP0_BASE + SZ_8K - 1, | 577 | .end = DAVINCI_ASP0_BASE + SZ_8K - 1, |
577 | .flags = IORESOURCE_MEM, | 578 | .flags = IORESOURCE_MEM, |
@@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = { | |||
792 | int __init dm644x_gpio_register(void) | 793 | int __init dm644x_gpio_register(void) |
793 | { | 794 | { |
794 | return davinci_gpio_register(dm644_gpio_resources, | 795 | return davinci_gpio_register(dm644_gpio_resources, |
795 | sizeof(dm644_gpio_resources), | 796 | ARRAY_SIZE(dm644_gpio_resources), |
796 | &dm644_gpio_platform_data); | 797 | &dm644_gpio_platform_data); |
797 | } | 798 | } |
798 | /*----------------------------------------------------------------------*/ | 799 | /*----------------------------------------------------------------------*/ |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 2a73f299c1d0..0e81fea65e7f 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = { | |||
621 | 621 | ||
622 | static struct resource dm646x_mcasp0_resources[] = { | 622 | static struct resource dm646x_mcasp0_resources[] = { |
623 | { | 623 | { |
624 | .name = "mcasp0", | 624 | .name = "mpu", |
625 | .start = DAVINCI_DM646X_MCASP0_REG_BASE, | 625 | .start = DAVINCI_DM646X_MCASP0_REG_BASE, |
626 | .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1, | 626 | .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1, |
627 | .flags = IORESOURCE_MEM, | 627 | .flags = IORESOURCE_MEM, |
@@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = { | |||
641 | 641 | ||
642 | static struct resource dm646x_mcasp1_resources[] = { | 642 | static struct resource dm646x_mcasp1_resources[] = { |
643 | { | 643 | { |
644 | .name = "mcasp1", | 644 | .name = "mpu", |
645 | .start = DAVINCI_DM646X_MCASP1_REG_BASE, | 645 | .start = DAVINCI_DM646X_MCASP1_REG_BASE, |
646 | .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1, | 646 | .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1, |
647 | .flags = IORESOURCE_MEM, | 647 | .flags = IORESOURCE_MEM, |
@@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { | |||
769 | int __init dm646x_gpio_register(void) | 769 | int __init dm646x_gpio_register(void) |
770 | { | 770 | { |
771 | return davinci_gpio_register(dm646x_gpio_resources, | 771 | return davinci_gpio_register(dm646x_gpio_resources, |
772 | sizeof(dm646x_gpio_resources), | 772 | ARRAY_SIZE(dm646x_gpio_resources), |
773 | &dm646x_gpio_platform_data); | 773 | &dm646x_gpio_platform_data); |
774 | } | 774 | } |
775 | /*----------------------------------------------------------------------*/ | 775 | /*----------------------------------------------------------------------*/ |
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 9ee78f7b4990..782f6c71fa0a 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c | |||
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = { | |||
96 | void __init footbridge_timer_init(void) | 96 | void __init footbridge_timer_init(void) |
97 | { | 97 | { |
98 | struct clock_event_device *ce = &ckevt_dc21285; | 98 | struct clock_event_device *ce = &ckevt_dc21285; |
99 | unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); | ||
99 | 100 | ||
100 | clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); | 101 | clocksource_register_hz(&cksrc_dc21285, rate); |
101 | 102 | ||
102 | setup_irq(ce->irq, &footbridge_timer_irq); | 103 | setup_irq(ce->irq, &footbridge_timer_irq); |
103 | 104 | ||
104 | ce->cpumask = cpumask_of(smp_processor_id()); | 105 | ce->cpumask = cpumask_of(smp_processor_id()); |
105 | clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); | 106 | clockevents_config_and_register(ce, rate, 0x4, 0xffffff); |
106 | } | 107 | } |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index b3d7e5634b83..bd3bf66ce344 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -17,12 +17,15 @@ | |||
17 | #include <linux/clkdev.h> | 17 | #include <linux/clkdev.h> |
18 | #include <linux/clocksource.h> | 18 | #include <linux/clocksource.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/input.h> | ||
20 | #include <linux/io.h> | 21 | #include <linux/io.h> |
21 | #include <linux/irqchip.h> | 22 | #include <linux/irqchip.h> |
23 | #include <linux/mailbox.h> | ||
22 | #include <linux/of.h> | 24 | #include <linux/of.h> |
23 | #include <linux/of_irq.h> | 25 | #include <linux/of_irq.h> |
24 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
25 | #include <linux/of_address.h> | 27 | #include <linux/of_address.h> |
28 | #include <linux/reboot.h> | ||
26 | #include <linux/amba/bus.h> | 29 | #include <linux/amba/bus.h> |
27 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
28 | 31 | ||
@@ -130,6 +133,24 @@ static struct platform_device highbank_cpuidle_device = { | |||
130 | .name = "cpuidle-calxeda", | 133 | .name = "cpuidle-calxeda", |
131 | }; | 134 | }; |
132 | 135 | ||
136 | static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data) | ||
137 | { | ||
138 | u32 key = *(u32 *)data; | ||
139 | |||
140 | if (event != 0x1000) | ||
141 | return 0; | ||
142 | |||
143 | if (key == KEY_POWER) | ||
144 | orderly_poweroff(false); | ||
145 | else if (key == 0xffff) | ||
146 | ctrl_alt_del(); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | static struct notifier_block hb_keys_nb = { | ||
151 | .notifier_call = hb_keys_notifier, | ||
152 | }; | ||
153 | |||
133 | static void __init highbank_init(void) | 154 | static void __init highbank_init(void) |
134 | { | 155 | { |
135 | struct device_node *np; | 156 | struct device_node *np; |
@@ -145,6 +166,8 @@ static void __init highbank_init(void) | |||
145 | bus_register_notifier(&platform_bus_type, &highbank_platform_nb); | 166 | bus_register_notifier(&platform_bus_type, &highbank_platform_nb); |
146 | bus_register_notifier(&amba_bustype, &highbank_amba_nb); | 167 | bus_register_notifier(&amba_bustype, &highbank_amba_nb); |
147 | 168 | ||
169 | pl320_ipc_register_notifier(&hb_keys_nb); | ||
170 | |||
148 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 171 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
149 | 172 | ||
150 | if (psci_ops.cpu_suspend) | 173 | if (psci_ops.cpu_suspend) |
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 19f1652e94cf..8d972ff18c56 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c | |||
@@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)") | |||
131 | .dt_compat = omap3_gp_boards_compat, | 131 | .dt_compat = omap3_gp_boards_compat, |
132 | .restart = omap3xxx_restart, | 132 | .restart = omap3xxx_restart, |
133 | MACHINE_END | 133 | MACHINE_END |
134 | |||
135 | static const char *am3517_boards_compat[] __initdata = { | ||
136 | "ti,am3517", | ||
137 | NULL, | ||
138 | }; | ||
139 | |||
140 | DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)") | ||
141 | .reserve = omap_reserve, | ||
142 | .map_io = omap3_map_io, | ||
143 | .init_early = am35xx_init_early, | ||
144 | .init_irq = omap_intc_of_init, | ||
145 | .handle_irq = omap3_intc_handle_irq, | ||
146 | .init_machine = omap_generic_init, | ||
147 | .init_late = omap3_init_late, | ||
148 | .init_time = omap3_gptimer_timer_init, | ||
149 | .dt_compat = am3517_boards_compat, | ||
150 | .restart = omap3xxx_restart, | ||
151 | MACHINE_END | ||
134 | #endif | 152 | #endif |
135 | 153 | ||
136 | #ifdef CONFIG_SOC_AM33XX | 154 | #ifdef CONFIG_SOC_AM33XX |
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492..44a59c3abfb0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c | |||
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void) | |||
242 | 242 | ||
243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) |
244 | { | 244 | { |
245 | int res; | ||
246 | |||
245 | /* LCD enable GPIO */ | 247 | /* LCD enable GPIO */ |
246 | ldp_lcd_pdata.enable_gpio = gpio + 7; | 248 | ldp_lcd_pdata.enable_gpio = gpio + 7; |
247 | 249 | ||
248 | /* Backlight enable GPIO */ | 250 | /* Backlight enable GPIO */ |
249 | ldp_lcd_pdata.backlight_gpio = gpio + 15; | 251 | ldp_lcd_pdata.backlight_gpio = gpio + 15; |
250 | 252 | ||
253 | res = platform_device_register(&ldp_lcd_device); | ||
254 | if (res) | ||
255 | pr_err("Unable to register LCD: %d\n", res); | ||
256 | |||
251 | return 0; | 257 | return 0; |
252 | } | 258 | } |
253 | 259 | ||
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
346 | 352 | ||
347 | static struct platform_device *ldp_devices[] __initdata = { | 353 | static struct platform_device *ldp_devices[] __initdata = { |
348 | &ldp_gpio_keys_device, | 354 | &ldp_gpio_keys_device, |
349 | &ldp_lcd_device, | ||
350 | }; | 355 | }; |
351 | 356 | ||
352 | #ifdef CONFIG_OMAP_MUX | 357 | #ifdef CONFIG_OMAP_MUX |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 58347bb874a0..4cf165502b35 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { | |||
101 | { "dss_hdmi", "omapdss_hdmi", -1 }, | 101 | { "dss_hdmi", "omapdss_hdmi", -1 }, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | ||
105 | { | ||
106 | u32 enable_mask, enable_shift; | ||
107 | u32 pipd_mask, pipd_shift; | ||
108 | u32 reg; | ||
109 | |||
110 | if (dsi_id == 0) { | ||
111 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | ||
112 | enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; | ||
113 | pipd_mask = OMAP4_DSI1_PIPD_MASK; | ||
114 | pipd_shift = OMAP4_DSI1_PIPD_SHIFT; | ||
115 | } else if (dsi_id == 1) { | ||
116 | enable_mask = OMAP4_DSI2_LANEENABLE_MASK; | ||
117 | enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; | ||
118 | pipd_mask = OMAP4_DSI2_PIPD_MASK; | ||
119 | pipd_shift = OMAP4_DSI2_PIPD_SHIFT; | ||
120 | } else { | ||
121 | return -ENODEV; | ||
122 | } | ||
123 | |||
124 | reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
125 | |||
126 | reg &= ~enable_mask; | ||
127 | reg &= ~pipd_mask; | ||
128 | |||
129 | reg |= (lanes << enable_shift) & enable_mask; | ||
130 | reg |= (lanes << pipd_shift) & pipd_mask; | ||
131 | |||
132 | omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
104 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) | 137 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) |
105 | { | 138 | { |
139 | if (cpu_is_omap44xx()) | ||
140 | return omap4_dsi_mux_pads(dsi_id, lane_mask); | ||
141 | |||
106 | return 0; | 142 | return 0; |
107 | } | 143 | } |
108 | 144 | ||
109 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) | 145 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) |
110 | { | 146 | { |
147 | if (cpu_is_omap44xx()) | ||
148 | omap4_dsi_mux_pads(dsi_id, 0); | ||
111 | } | 149 | } |
112 | 150 | ||
113 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) | 151 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) |
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 53f0735817bb..e0a398cf28d8 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c | |||
@@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
183 | odbfd_exit1: | 183 | odbfd_exit1: |
184 | kfree(hwmods); | 184 | kfree(hwmods); |
185 | odbfd_exit: | 185 | odbfd_exit: |
186 | /* if data/we are at fault.. load up a fail handler */ | ||
187 | if (ret) | ||
188 | pdev->dev.pm_domain = &omap_device_fail_pm_domain; | ||
189 | |||
186 | return ret; | 190 | return ret; |
187 | } | 191 | } |
188 | 192 | ||
@@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev) | |||
604 | 608 | ||
605 | return pm_generic_runtime_resume(dev); | 609 | return pm_generic_runtime_resume(dev); |
606 | } | 610 | } |
611 | |||
612 | static int _od_fail_runtime_suspend(struct device *dev) | ||
613 | { | ||
614 | dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); | ||
615 | return -ENODEV; | ||
616 | } | ||
617 | |||
618 | static int _od_fail_runtime_resume(struct device *dev) | ||
619 | { | ||
620 | dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); | ||
621 | return -ENODEV; | ||
622 | } | ||
623 | |||
607 | #endif | 624 | #endif |
608 | 625 | ||
609 | #ifdef CONFIG_SUSPEND | 626 | #ifdef CONFIG_SUSPEND |
@@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev) | |||
657 | #define _od_resume_noirq NULL | 674 | #define _od_resume_noirq NULL |
658 | #endif | 675 | #endif |
659 | 676 | ||
677 | struct dev_pm_domain omap_device_fail_pm_domain = { | ||
678 | .ops = { | ||
679 | SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend, | ||
680 | _od_fail_runtime_resume, NULL) | ||
681 | } | ||
682 | }; | ||
683 | |||
660 | struct dev_pm_domain omap_device_pm_domain = { | 684 | struct dev_pm_domain omap_device_pm_domain = { |
661 | .ops = { | 685 | .ops = { |
662 | SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, | 686 | SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, |
diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h index 17ca1aec2710..78c02b355179 100644 --- a/arch/arm/mach-omap2/omap_device.h +++ b/arch/arm/mach-omap2/omap_device.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "omap_hwmod.h" | 29 | #include "omap_hwmod.h" |
30 | 30 | ||
31 | extern struct dev_pm_domain omap_device_pm_domain; | 31 | extern struct dev_pm_domain omap_device_pm_domain; |
32 | extern struct dev_pm_domain omap_device_fail_pm_domain; | ||
32 | 33 | ||
33 | /* omap_device._state values */ | 34 | /* omap_device._state values */ |
34 | #define OMAP_DEVICE_STATE_UNKNOWN 0 | 35 | #define OMAP_DEVICE_STATE_UNKNOWN 0 |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index e3f0ecaf87dd..8a1b5e0bad40 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v) | |||
399 | } | 399 | } |
400 | 400 | ||
401 | /** | 401 | /** |
402 | * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v | 402 | * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v |
403 | * @oh: struct omap_hwmod * | 403 | * @oh: struct omap_hwmod * |
404 | * @v: pointer to register contents to modify | 404 | * @v: pointer to register contents to modify |
405 | * | 405 | * |
@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v) | |||
427 | } | 427 | } |
428 | 428 | ||
429 | /** | 429 | /** |
430 | * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v | ||
431 | * @oh: struct omap_hwmod * | ||
432 | * @v: pointer to register contents to modify | ||
433 | * | ||
434 | * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon | ||
435 | * error or 0 upon success. | ||
436 | */ | ||
437 | static int _clear_softreset(struct omap_hwmod *oh, u32 *v) | ||
438 | { | ||
439 | u32 softrst_mask; | ||
440 | |||
441 | if (!oh->class->sysc || | ||
442 | !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) | ||
443 | return -EINVAL; | ||
444 | |||
445 | if (!oh->class->sysc->sysc_fields) { | ||
446 | WARN(1, | ||
447 | "omap_hwmod: %s: sysc_fields absent for sysconfig class\n", | ||
448 | oh->name); | ||
449 | return -EINVAL; | ||
450 | } | ||
451 | |||
452 | softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); | ||
453 | |||
454 | *v &= ~softrst_mask; | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
430 | * _wait_softreset_complete - wait for an OCP softreset to complete | 460 | * _wait_softreset_complete - wait for an OCP softreset to complete |
431 | * @oh: struct omap_hwmod * to wait on | 461 | * @oh: struct omap_hwmod * to wait on |
432 | * | 462 | * |
@@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh) | |||
785 | pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", | 815 | pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", |
786 | oh->name, os->clk); | 816 | oh->name, os->clk); |
787 | ret = -EINVAL; | 817 | ret = -EINVAL; |
818 | continue; | ||
788 | } | 819 | } |
789 | os->_clk = c; | 820 | os->_clk = c; |
790 | /* | 821 | /* |
@@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh) | |||
821 | pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", | 852 | pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", |
822 | oh->name, oc->clk); | 853 | oh->name, oc->clk); |
823 | ret = -EINVAL; | 854 | ret = -EINVAL; |
855 | continue; | ||
824 | } | 856 | } |
825 | oc->_clk = c; | 857 | oc->_clk = c; |
826 | /* | 858 | /* |
@@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh) | |||
1911 | ret = _set_softreset(oh, &v); | 1943 | ret = _set_softreset(oh, &v); |
1912 | if (ret) | 1944 | if (ret) |
1913 | goto dis_opt_clks; | 1945 | goto dis_opt_clks; |
1946 | |||
1947 | _write_sysconfig(v, oh); | ||
1948 | ret = _clear_softreset(oh, &v); | ||
1949 | if (ret) | ||
1950 | goto dis_opt_clks; | ||
1951 | |||
1914 | _write_sysconfig(v, oh); | 1952 | _write_sysconfig(v, oh); |
1915 | 1953 | ||
1916 | if (oh->class->sysc->srst_udelay) | 1954 | if (oh->class->sysc->srst_udelay) |
@@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh) | |||
2326 | return 0; | 2364 | return 0; |
2327 | } | 2365 | } |
2328 | 2366 | ||
2367 | static int of_dev_find_hwmod(struct device_node *np, | ||
2368 | struct omap_hwmod *oh) | ||
2369 | { | ||
2370 | int count, i, res; | ||
2371 | const char *p; | ||
2372 | |||
2373 | count = of_property_count_strings(np, "ti,hwmods"); | ||
2374 | if (count < 1) | ||
2375 | return -ENODEV; | ||
2376 | |||
2377 | for (i = 0; i < count; i++) { | ||
2378 | res = of_property_read_string_index(np, "ti,hwmods", | ||
2379 | i, &p); | ||
2380 | if (res) | ||
2381 | continue; | ||
2382 | if (!strcmp(p, oh->name)) { | ||
2383 | pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n", | ||
2384 | np->name, i, oh->name); | ||
2385 | return i; | ||
2386 | } | ||
2387 | } | ||
2388 | |||
2389 | return -ENODEV; | ||
2390 | } | ||
2391 | |||
2329 | /** | 2392 | /** |
2330 | * of_dev_hwmod_lookup - look up needed hwmod from dt blob | 2393 | * of_dev_hwmod_lookup - look up needed hwmod from dt blob |
2331 | * @np: struct device_node * | 2394 | * @np: struct device_node * |
2332 | * @oh: struct omap_hwmod * | 2395 | * @oh: struct omap_hwmod * |
2396 | * @index: index of the entry found | ||
2397 | * @found: struct device_node * found or NULL | ||
2333 | * | 2398 | * |
2334 | * Parse the dt blob and find out needed hwmod. Recursive function is | 2399 | * Parse the dt blob and find out needed hwmod. Recursive function is |
2335 | * implemented to take care hierarchical dt blob parsing. | 2400 | * implemented to take care hierarchical dt blob parsing. |
2336 | * Return: The device node on success or NULL on failure. | 2401 | * Return: Returns 0 on success, -ENODEV when not found. |
2337 | */ | 2402 | */ |
2338 | static struct device_node *of_dev_hwmod_lookup(struct device_node *np, | 2403 | static int of_dev_hwmod_lookup(struct device_node *np, |
2339 | struct omap_hwmod *oh) | 2404 | struct omap_hwmod *oh, |
2405 | int *index, | ||
2406 | struct device_node **found) | ||
2340 | { | 2407 | { |
2341 | struct device_node *np0 = NULL, *np1 = NULL; | 2408 | struct device_node *np0 = NULL; |
2342 | const char *p; | 2409 | int res; |
2410 | |||
2411 | res = of_dev_find_hwmod(np, oh); | ||
2412 | if (res >= 0) { | ||
2413 | *found = np; | ||
2414 | *index = res; | ||
2415 | return 0; | ||
2416 | } | ||
2343 | 2417 | ||
2344 | for_each_child_of_node(np, np0) { | 2418 | for_each_child_of_node(np, np0) { |
2345 | if (of_find_property(np0, "ti,hwmods", NULL)) { | 2419 | struct device_node *fc; |
2346 | p = of_get_property(np0, "ti,hwmods", NULL); | 2420 | int i; |
2347 | if (!strcmp(p, oh->name)) | 2421 | |
2348 | return np0; | 2422 | res = of_dev_hwmod_lookup(np0, oh, &i, &fc); |
2349 | np1 = of_dev_hwmod_lookup(np0, oh); | 2423 | if (res == 0) { |
2350 | if (np1) | 2424 | *found = fc; |
2351 | return np1; | 2425 | *index = i; |
2426 | return 0; | ||
2352 | } | 2427 | } |
2353 | } | 2428 | } |
2354 | return NULL; | 2429 | |
2430 | *found = NULL; | ||
2431 | *index = 0; | ||
2432 | |||
2433 | return -ENODEV; | ||
2355 | } | 2434 | } |
2356 | 2435 | ||
2357 | /** | 2436 | /** |
2358 | * _init_mpu_rt_base - populate the virtual address for a hwmod | 2437 | * _init_mpu_rt_base - populate the virtual address for a hwmod |
2359 | * @oh: struct omap_hwmod * to locate the virtual address | 2438 | * @oh: struct omap_hwmod * to locate the virtual address |
2360 | * @data: (unused, caller should pass NULL) | 2439 | * @data: (unused, caller should pass NULL) |
2440 | * @index: index of the reg entry iospace in device tree | ||
2361 | * @np: struct device_node * of the IP block's device node in the DT data | 2441 | * @np: struct device_node * of the IP block's device node in the DT data |
2362 | * | 2442 | * |
2363 | * Cache the virtual address used by the MPU to access this IP block's | 2443 | * Cache the virtual address used by the MPU to access this IP block's |
@@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np, | |||
2368 | * -ENXIO on absent or invalid register target address space. | 2448 | * -ENXIO on absent or invalid register target address space. |
2369 | */ | 2449 | */ |
2370 | static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, | 2450 | static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, |
2371 | struct device_node *np) | 2451 | int index, struct device_node *np) |
2372 | { | 2452 | { |
2373 | struct omap_hwmod_addr_space *mem; | 2453 | struct omap_hwmod_addr_space *mem; |
2374 | void __iomem *va_start = NULL; | 2454 | void __iomem *va_start = NULL; |
@@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, | |||
2390 | if (!np) | 2470 | if (!np) |
2391 | return -ENXIO; | 2471 | return -ENXIO; |
2392 | 2472 | ||
2393 | va_start = of_iomap(np, oh->mpu_rt_idx); | 2473 | va_start = of_iomap(np, index + oh->mpu_rt_idx); |
2394 | } else { | 2474 | } else { |
2395 | va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); | 2475 | va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); |
2396 | } | 2476 | } |
2397 | 2477 | ||
2398 | if (!va_start) { | 2478 | if (!va_start) { |
2399 | pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); | 2479 | if (mem) |
2480 | pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); | ||
2481 | else | ||
2482 | pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n", | ||
2483 | oh->name, index, np->full_name); | ||
2400 | return -ENXIO; | 2484 | return -ENXIO; |
2401 | } | 2485 | } |
2402 | 2486 | ||
@@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, | |||
2422 | */ | 2506 | */ |
2423 | static int __init _init(struct omap_hwmod *oh, void *data) | 2507 | static int __init _init(struct omap_hwmod *oh, void *data) |
2424 | { | 2508 | { |
2425 | int r; | 2509 | int r, index; |
2426 | struct device_node *np = NULL; | 2510 | struct device_node *np = NULL; |
2427 | 2511 | ||
2428 | if (oh->_state != _HWMOD_STATE_REGISTERED) | 2512 | if (oh->_state != _HWMOD_STATE_REGISTERED) |
2429 | return 0; | 2513 | return 0; |
2430 | 2514 | ||
2431 | if (of_have_populated_dt()) | 2515 | if (of_have_populated_dt()) { |
2432 | np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); | 2516 | struct device_node *bus; |
2517 | |||
2518 | bus = of_find_node_by_name(NULL, "ocp"); | ||
2519 | if (!bus) | ||
2520 | return -ENODEV; | ||
2521 | |||
2522 | r = of_dev_hwmod_lookup(bus, oh, &index, &np); | ||
2523 | if (r) | ||
2524 | pr_debug("omap_hwmod: %s missing dt data\n", oh->name); | ||
2525 | else if (np && index) | ||
2526 | pr_warn("omap_hwmod: %s using broken dt data from %s\n", | ||
2527 | oh->name, np->name); | ||
2528 | } | ||
2433 | 2529 | ||
2434 | if (oh->class->sysc) { | 2530 | if (oh->class->sysc) { |
2435 | r = _init_mpu_rt_base(oh, NULL, np); | 2531 | r = _init_mpu_rt_base(oh, NULL, index, np); |
2436 | if (r < 0) { | 2532 | if (r < 0) { |
2437 | WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", | 2533 | WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", |
2438 | oh->name); | 2534 | oh->name); |
@@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh) | |||
3169 | goto error; | 3265 | goto error; |
3170 | _write_sysconfig(v, oh); | 3266 | _write_sysconfig(v, oh); |
3171 | 3267 | ||
3268 | ret = _clear_softreset(oh, &v); | ||
3269 | if (ret) | ||
3270 | goto error; | ||
3271 | _write_sysconfig(v, oh); | ||
3272 | |||
3172 | error: | 3273 | error: |
3173 | return ret; | 3274 | return ret; |
3174 | } | 3275 | } |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e..d23c77fadb31 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { | |||
796 | 796 | ||
797 | /* gpmc */ | 797 | /* gpmc */ |
798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { |
799 | { .irq = 20 }, | 799 | { .irq = 20 + OMAP_INTC_START, }, |
800 | { .irq = -1 } | 800 | { .irq = -1 } |
801 | }; | 801 | }; |
802 | 802 | ||
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { | |||
841 | }; | 841 | }; |
842 | 842 | ||
843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { |
844 | { .irq = 52 }, | 844 | { .irq = 52 + OMAP_INTC_START, }, |
845 | { .irq = -1 } | 845 | { .irq = -1 } |
846 | }; | 846 | }; |
847 | 847 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 9e56fabd7fa3..4c3b1e6df508 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = { | |||
1943 | .syss_offs = 0x0014, | 1943 | .syss_offs = 0x0014, |
1944 | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | | 1944 | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | |
1945 | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | | 1945 | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | |
1946 | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), | 1946 | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | |
1947 | SYSS_HAS_RESET_STATUS), | ||
1947 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | | 1948 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | |
1948 | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), | 1949 | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), |
1949 | .sysc_fields = &omap_hwmod_sysc_type1, | 1950 | .sysc_fields = &omap_hwmod_sysc_type1, |
@@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = { | |||
2021 | * hence HWMOD_SWSUP_MSTANDBY | 2022 | * hence HWMOD_SWSUP_MSTANDBY |
2022 | */ | 2023 | */ |
2023 | 2024 | ||
2024 | /* | 2025 | .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, |
2025 | * During system boot; If the hwmod framework resets the module | ||
2026 | * the module will have smart idle settings; which can lead to deadlock | ||
2027 | * (above Errata Id:i660); so, dont reset the module during boot; | ||
2028 | * Use HWMOD_INIT_NO_RESET. | ||
2029 | */ | ||
2030 | |||
2031 | .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | | ||
2032 | HWMOD_INIT_NO_RESET, | ||
2033 | }; | 2026 | }; |
2034 | 2027 | ||
2035 | /* | 2028 | /* |
@@ -2172,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { | |||
2172 | }; | 2165 | }; |
2173 | 2166 | ||
2174 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { |
2175 | { .irq = 20 }, | 2168 | { .irq = 20 + OMAP_INTC_START, }, |
2176 | { .irq = -1 } | 2169 | { .irq = -1 } |
2177 | }; | 2170 | }; |
2178 | 2171 | ||
@@ -3006,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | |||
3006 | 2999 | ||
3007 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; |
3008 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { |
3009 | { .irq = 24 }, | 3002 | { .irq = 24 + OMAP_INTC_START, }, |
3010 | { .irq = -1 } | 3003 | { .irq = -1 } |
3011 | }; | 3004 | }; |
3012 | 3005 | ||
@@ -3048,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | |||
3048 | 3041 | ||
3049 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; |
3050 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { |
3051 | { .irq = 28 }, | 3044 | { .irq = 28 + OMAP_INTC_START, }, |
3052 | { .irq = -1 } | 3045 | { .irq = -1 } |
3053 | }; | 3046 | }; |
3054 | 3047 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 1e5b12cb8246..3318cae96e7d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = { | |||
2937 | .sysc_offs = 0x0010, | 2937 | .sysc_offs = 0x0010, |
2938 | .syss_offs = 0x0014, | 2938 | .syss_offs = 0x0014, |
2939 | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | | 2939 | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | |
2940 | SYSC_HAS_SOFTRESET), | 2940 | SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS), |
2941 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | | 2941 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | |
2942 | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | | 2942 | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | |
2943 | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), | 2943 | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), |
@@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = { | |||
3001 | * hence HWMOD_SWSUP_MSTANDBY | 3001 | * hence HWMOD_SWSUP_MSTANDBY |
3002 | */ | 3002 | */ |
3003 | 3003 | ||
3004 | /* | 3004 | .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, |
3005 | * During system boot; If the hwmod framework resets the module | ||
3006 | * the module will have smart idle settings; which can lead to deadlock | ||
3007 | * (above Errata Id:i660); so, dont reset the module during boot; | ||
3008 | * Use HWMOD_INIT_NO_RESET. | ||
3009 | */ | ||
3010 | |||
3011 | .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | | ||
3012 | HWMOD_INIT_NO_RESET, | ||
3013 | }; | 3005 | }; |
3014 | 3006 | ||
3015 | /* | 3007 | /* |
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index 9e08d6994a0b..e297d6231c3a 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c | |||
@@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = { | |||
1544 | .rev_offs = 0x0000, | 1544 | .rev_offs = 0x0000, |
1545 | .sysc_offs = 0x0010, | 1545 | .sysc_offs = 0x0010, |
1546 | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | | 1546 | .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | |
1547 | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), | 1547 | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | |
1548 | SYSC_HAS_RESET_STATUS), | ||
1548 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | | 1549 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | |
1549 | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | | 1550 | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | |
1550 | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), | 1551 | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), |
@@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = { | |||
1598 | * hence HWMOD_SWSUP_MSTANDBY | 1599 | * hence HWMOD_SWSUP_MSTANDBY |
1599 | */ | 1600 | */ |
1600 | 1601 | ||
1601 | /* | 1602 | .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, |
1602 | * During system boot; If the hwmod framework resets the module | ||
1603 | * the module will have smart idle settings; which can lead to deadlock | ||
1604 | * (above Errata Id:i660); so, dont reset the module during boot; | ||
1605 | * Use HWMOD_INIT_NO_RESET. | ||
1606 | */ | ||
1607 | |||
1608 | .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | | ||
1609 | HWMOD_INIT_NO_RESET, | ||
1610 | .main_clk = "l3init_60m_fclk", | 1603 | .main_clk = "l3init_60m_fclk", |
1611 | .prcm = { | 1604 | .prcm = { |
1612 | .omap4 = { | 1605 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b11..18f333c440db 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { | |||
1637 | .class = &dra7xx_uart_hwmod_class, | 1637 | .class = &dra7xx_uart_hwmod_class, |
1638 | .clkdm_name = "l4per_clkdm", | 1638 | .clkdm_name = "l4per_clkdm", |
1639 | .main_clk = "uart1_gfclk_mux", | 1639 | .main_clk = "uart1_gfclk_mux", |
1640 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, |
1641 | .prcm = { | 1641 | .prcm = { |
1642 | .omap4 = { | 1642 | .omap4 = { |
1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb..958cd6af9384 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h | |||
@@ -10,6 +10,8 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <mach/irqs.h> | ||
14 | |||
13 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS | 15 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS |
14 | 16 | ||
15 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS | 17 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS |
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 0d5dd646f61f..263b15249b5b 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <mach/regs-ost.h> | 14 | #include <mach/regs-ost.h> |
15 | #include <mach/reset.h> | 15 | #include <mach/reset.h> |
16 | #include <mach/smemc.h> | ||
16 | 17 | ||
17 | unsigned int reset_status; | 18 | unsigned int reset_status; |
18 | EXPORT_SYMBOL(reset_status); | 19 | EXPORT_SYMBOL(reset_status); |
@@ -81,6 +82,12 @@ static void do_hw_reset(void) | |||
81 | writel_relaxed(OSSR_M3, OSSR); | 82 | writel_relaxed(OSSR_M3, OSSR); |
82 | /* ... in 100 ms */ | 83 | /* ... in 100 ms */ |
83 | writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); | 84 | writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); |
85 | /* | ||
86 | * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71) | ||
87 | * we put SDRAM into self-refresh to prevent that | ||
88 | */ | ||
89 | while (1) | ||
90 | writel_relaxed(MDREFR_SLFRSH, MDREFR); | ||
84 | } | 91 | } |
85 | 92 | ||
86 | void pxa_restart(enum reboot_mode mode, const char *cmd) | 93 | void pxa_restart(enum reboot_mode mode, const char *cmd) |
@@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd) | |||
104 | break; | 111 | break; |
105 | } | 112 | } |
106 | } | 113 | } |
107 | |||
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 0206b915a6f6..ef5557b807ed 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
@@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = { | |||
425 | * Tosa Keyboard | 425 | * Tosa Keyboard |
426 | */ | 426 | */ |
427 | static const uint32_t tosakbd_keymap[] = { | 427 | static const uint32_t tosakbd_keymap[] = { |
428 | KEY(0, 2, KEY_W), | 428 | KEY(0, 1, KEY_W), |
429 | KEY(0, 6, KEY_K), | 429 | KEY(0, 5, KEY_K), |
430 | KEY(0, 7, KEY_BACKSPACE), | 430 | KEY(0, 6, KEY_BACKSPACE), |
431 | KEY(0, 8, KEY_P), | 431 | KEY(0, 7, KEY_P), |
432 | KEY(1, 1, KEY_Q), | 432 | KEY(1, 0, KEY_Q), |
433 | KEY(1, 2, KEY_E), | 433 | KEY(1, 1, KEY_E), |
434 | KEY(1, 3, KEY_T), | 434 | KEY(1, 2, KEY_T), |
435 | KEY(1, 4, KEY_Y), | 435 | KEY(1, 3, KEY_Y), |
436 | KEY(1, 6, KEY_O), | 436 | KEY(1, 5, KEY_O), |
437 | KEY(1, 7, KEY_I), | 437 | KEY(1, 6, KEY_I), |
438 | KEY(1, 8, KEY_COMMA), | 438 | KEY(1, 7, KEY_COMMA), |
439 | KEY(2, 1, KEY_A), | 439 | KEY(2, 0, KEY_A), |
440 | KEY(2, 2, KEY_D), | 440 | KEY(2, 1, KEY_D), |
441 | KEY(2, 3, KEY_G), | 441 | KEY(2, 2, KEY_G), |
442 | KEY(2, 4, KEY_U), | 442 | KEY(2, 3, KEY_U), |
443 | KEY(2, 6, KEY_L), | 443 | KEY(2, 5, KEY_L), |
444 | KEY(2, 7, KEY_ENTER), | 444 | KEY(2, 6, KEY_ENTER), |
445 | KEY(2, 8, KEY_DOT), | 445 | KEY(2, 7, KEY_DOT), |
446 | KEY(3, 1, KEY_Z), | 446 | KEY(3, 0, KEY_Z), |
447 | KEY(3, 2, KEY_C), | 447 | KEY(3, 1, KEY_C), |
448 | KEY(3, 3, KEY_V), | 448 | KEY(3, 2, KEY_V), |
449 | KEY(3, 4, KEY_J), | 449 | KEY(3, 3, KEY_J), |
450 | KEY(3, 5, TOSA_KEY_ADDRESSBOOK), | 450 | KEY(3, 4, TOSA_KEY_ADDRESSBOOK), |
451 | KEY(3, 6, TOSA_KEY_CANCEL), | 451 | KEY(3, 5, TOSA_KEY_CANCEL), |
452 | KEY(3, 7, TOSA_KEY_CENTER), | 452 | KEY(3, 6, TOSA_KEY_CENTER), |
453 | KEY(3, 8, TOSA_KEY_OK), | 453 | KEY(3, 7, TOSA_KEY_OK), |
454 | KEY(3, 9, KEY_LEFTSHIFT), | 454 | KEY(3, 8, KEY_LEFTSHIFT), |
455 | KEY(4, 1, KEY_S), | 455 | KEY(4, 0, KEY_S), |
456 | KEY(4, 2, KEY_R), | 456 | KEY(4, 1, KEY_R), |
457 | KEY(4, 3, KEY_B), | 457 | KEY(4, 2, KEY_B), |
458 | KEY(4, 4, KEY_N), | 458 | KEY(4, 3, KEY_N), |
459 | KEY(4, 5, TOSA_KEY_CALENDAR), | 459 | KEY(4, 4, TOSA_KEY_CALENDAR), |
460 | KEY(4, 6, TOSA_KEY_HOMEPAGE), | 460 | KEY(4, 5, TOSA_KEY_HOMEPAGE), |
461 | KEY(4, 7, KEY_LEFTCTRL), | 461 | KEY(4, 6, KEY_LEFTCTRL), |
462 | KEY(4, 8, TOSA_KEY_LIGHT), | 462 | KEY(4, 7, TOSA_KEY_LIGHT), |
463 | KEY(4, 10, KEY_RIGHTSHIFT), | 463 | KEY(4, 9, KEY_RIGHTSHIFT), |
464 | KEY(5, 1, KEY_TAB), | 464 | KEY(5, 0, KEY_TAB), |
465 | KEY(5, 2, KEY_SLASH), | 465 | KEY(5, 1, KEY_SLASH), |
466 | KEY(5, 3, KEY_H), | 466 | KEY(5, 2, KEY_H), |
467 | KEY(5, 4, KEY_M), | 467 | KEY(5, 3, KEY_M), |
468 | KEY(5, 5, TOSA_KEY_MENU), | 468 | KEY(5, 4, TOSA_KEY_MENU), |
469 | KEY(5, 7, KEY_UP), | 469 | KEY(5, 6, KEY_UP), |
470 | KEY(5, 11, TOSA_KEY_FN), | 470 | KEY(5, 10, TOSA_KEY_FN), |
471 | KEY(6, 1, KEY_X), | 471 | KEY(6, 0, KEY_X), |
472 | KEY(6, 2, KEY_F), | 472 | KEY(6, 1, KEY_F), |
473 | KEY(6, 3, KEY_SPACE), | 473 | KEY(6, 2, KEY_SPACE), |
474 | KEY(6, 4, KEY_APOSTROPHE), | 474 | KEY(6, 3, KEY_APOSTROPHE), |
475 | KEY(6, 5, TOSA_KEY_MAIL), | 475 | KEY(6, 4, TOSA_KEY_MAIL), |
476 | KEY(6, 6, KEY_LEFT), | 476 | KEY(6, 5, KEY_LEFT), |
477 | KEY(6, 7, KEY_DOWN), | 477 | KEY(6, 6, KEY_DOWN), |
478 | KEY(6, 8, KEY_RIGHT), | 478 | KEY(6, 7, KEY_RIGHT), |
479 | }; | 479 | }; |
480 | 480 | ||
481 | static struct matrix_keymap_data tosakbd_keymap_data = { | 481 | static struct matrix_keymap_data tosakbd_keymap_data = { |
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10fc1af..2fddf38192df 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c | |||
@@ -8,8 +8,6 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/clk-provider.h> | ||
12 | #include <linux/irqchip.h> | ||
13 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
14 | 12 | ||
15 | #include <asm/mach/arch.h> | 13 | #include <asm/mach/arch.h> |
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) | |||
48 | panic("SoC is not S3C64xx!"); | 46 | panic("SoC is not S3C64xx!"); |
49 | } | 47 | } |
50 | 48 | ||
51 | static void __init s3c64xx_dt_init_irq(void) | ||
52 | { | ||
53 | of_clk_init(NULL); | ||
54 | samsung_wdt_reset_of_init(); | ||
55 | irqchip_init(); | ||
56 | }; | ||
57 | |||
58 | static void __init s3c64xx_dt_init_machine(void) | 49 | static void __init s3c64xx_dt_init_machine(void) |
59 | { | 50 | { |
51 | samsung_wdt_reset_of_init(); | ||
60 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 52 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
61 | } | 53 | } |
62 | 54 | ||
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") | |||
79 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ | 71 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ |
80 | .dt_compat = s3c64xx_dt_compat, | 72 | .dt_compat = s3c64xx_dt_compat, |
81 | .map_io = s3c64xx_dt_map_io, | 73 | .map_io = s3c64xx_dt_map_io, |
82 | .init_irq = s3c64xx_dt_init_irq, | ||
83 | .init_machine = s3c64xx_dt_init_machine, | 74 | .init_machine = s3c64xx_dt_init_machine, |
84 | .restart = s3c64xx_dt_restart, | 75 | .restart = s3c64xx_dt_restart, |
85 | MACHINE_END | 76 | MACHINE_END |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2..8ea87bd45c33 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = { | |||
483 | .id = 0, | 483 | .id = 0, |
484 | .dev = { | 484 | .dev = { |
485 | .platform_data = &lcdc0_info, | 485 | .platform_data = &lcdc0_info, |
486 | .coherent_dma_mask = ~0, | 486 | .coherent_dma_mask = DMA_BIT_MASK(32), |
487 | }, | 487 | }, |
488 | }; | 488 | }; |
489 | 489 | ||
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = { | |||
580 | .id = 1, | 580 | .id = 1, |
581 | .dev = { | 581 | .dev = { |
582 | .platform_data = &hdmi_lcdc_info, | 582 | .platform_data = &hdmi_lcdc_info, |
583 | .coherent_dma_mask = ~0, | 583 | .coherent_dma_mask = DMA_BIT_MASK(32), |
584 | }, | 584 | }, |
585 | }; | 585 | }; |
586 | 586 | ||
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { | |||
614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), |
615 | }; | 615 | }; |
616 | 616 | ||
617 | /* Fixed 3.3V regulator used by LCD backlight */ | ||
618 | static struct regulator_consumer_supply fixed5v0_power_consumers[] = { | ||
619 | REGULATOR_SUPPLY("power", "pwm-backlight.0"), | ||
620 | }; | ||
621 | |||
617 | /* Fixed 3.3V regulator to be used by SDHI0 */ | 622 | /* Fixed 3.3V regulator to be used by SDHI0 */ |
618 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | 623 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { |
619 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | 624 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), |
@@ -1196,6 +1201,8 @@ static void __init eva_init(void) | |||
1196 | 1201 | ||
1197 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | 1202 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, |
1198 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | 1203 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); |
1204 | regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, | ||
1205 | ARRAY_SIZE(fixed5v0_power_consumers), 5000000); | ||
1199 | 1206 | ||
1200 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | 1207 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); |
1201 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | 1208 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); |
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a..3c4995aebd22 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
@@ -679,7 +679,7 @@ static void __init bockw_init(void) | |||
679 | .id = i, | 679 | .id = i, |
680 | .data = &rsnd_card_info[i], | 680 | .data = &rsnd_card_info[i], |
681 | .size_data = sizeof(struct asoc_simple_card_info), | 681 | .size_data = sizeof(struct asoc_simple_card_info), |
682 | .dma_mask = ~0, | 682 | .dma_mask = DMA_BIT_MASK(32), |
683 | }; | 683 | }; |
684 | 684 | ||
685 | platform_device_register_full(&cardinfo); | 685 | platform_device_register_full(&cardinfo); |
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index fe689b7fdc9e..bc40b853ffd3 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c | |||
@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = { | |||
334 | .resource = lcdc_resources, | 334 | .resource = lcdc_resources, |
335 | .dev = { | 335 | .dev = { |
336 | .platform_data = &lcdc_info, | 336 | .platform_data = &lcdc_info, |
337 | .coherent_dma_mask = ~0, | 337 | .coherent_dma_mask = DMA_BIT_MASK(32), |
338 | }, | 338 | }, |
339 | }; | 339 | }; |
340 | 340 | ||
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce646fb9..e0406fd37390 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -245,7 +245,9 @@ static void __init lager_init(void) | |||
245 | { | 245 | { |
246 | lager_add_standard_devices(); | 246 | lager_add_standard_devices(); |
247 | 247 | ||
248 | phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); | 248 | if (IS_ENABLED(CONFIG_PHYLIB)) |
249 | phy_register_fixup_for_id("r8a7790-ether-ff:01", | ||
250 | lager_ksz8041_fixup); | ||
249 | } | 251 | } |
250 | 252 | ||
251 | static const char * const lager_boards_compat_dt[] __initconst = { | 253 | static const char * const lager_boards_compat_dt[] __initconst = { |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index af06753eb809..e721d2ccceae 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = { | |||
409 | .resource = lcdc_resources, | 409 | .resource = lcdc_resources, |
410 | .dev = { | 410 | .dev = { |
411 | .platform_data = &lcdc_info, | 411 | .platform_data = &lcdc_info, |
412 | .coherent_dma_mask = ~0, | 412 | .coherent_dma_mask = DMA_BIT_MASK(32), |
413 | }, | 413 | }, |
414 | }; | 414 | }; |
415 | 415 | ||
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = { | |||
499 | .id = 1, | 499 | .id = 1, |
500 | .dev = { | 500 | .dev = { |
501 | .platform_data = &hdmi_lcdc_info, | 501 | .platform_data = &hdmi_lcdc_info, |
502 | .coherent_dma_mask = ~0, | 502 | .coherent_dma_mask = DMA_BIT_MASK(32), |
503 | }, | 503 | }, |
504 | }; | 504 | }; |
505 | 505 | ||
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c index 9a4e910c3796..3a9c1f1c219d 100644 --- a/arch/arm/mach-tegra/fuse.c +++ b/arch/arm/mach-tegra/fuse.c | |||
@@ -198,10 +198,12 @@ void __init tegra_init_fuse(void) | |||
198 | switch (tegra_chip_id) { | 198 | switch (tegra_chip_id) { |
199 | case TEGRA20: | 199 | case TEGRA20: |
200 | tegra20_fuse_init_randomness(); | 200 | tegra20_fuse_init_randomness(); |
201 | break; | ||
201 | case TEGRA30: | 202 | case TEGRA30: |
202 | case TEGRA114: | 203 | case TEGRA114: |
203 | default: | 204 | default: |
204 | tegra30_fuse_init_randomness(); | 205 | tegra30_fuse_init_randomness(); |
206 | break; | ||
205 | } | 207 | } |
206 | 208 | ||
207 | pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", | 209 | pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f6b6bfa88ecf..f61a5707823a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = { | |||
158 | }; | 158 | }; |
159 | EXPORT_SYMBOL(arm_coherent_dma_ops); | 159 | EXPORT_SYMBOL(arm_coherent_dma_ops); |
160 | 160 | ||
161 | static int __dma_supported(struct device *dev, u64 mask, bool warn) | ||
162 | { | ||
163 | unsigned long max_dma_pfn; | ||
164 | |||
165 | /* | ||
166 | * If the mask allows for more memory than we can address, | ||
167 | * and we actually have that much memory, then we must | ||
168 | * indicate that DMA to this device is not supported. | ||
169 | */ | ||
170 | if (sizeof(mask) != sizeof(dma_addr_t) && | ||
171 | mask > (dma_addr_t)~0 && | ||
172 | dma_to_pfn(dev, ~0) < max_pfn) { | ||
173 | if (warn) { | ||
174 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", | ||
175 | mask); | ||
176 | dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); | ||
177 | } | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); | ||
182 | |||
183 | /* | ||
184 | * Translate the device's DMA mask to a PFN limit. This | ||
185 | * PFN number includes the page which we can DMA to. | ||
186 | */ | ||
187 | if (dma_to_pfn(dev, mask) < max_dma_pfn) { | ||
188 | if (warn) | ||
189 | dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", | ||
190 | mask, | ||
191 | dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, | ||
192 | max_dma_pfn + 1); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | return 1; | ||
197 | } | ||
198 | |||
161 | static u64 get_coherent_dma_mask(struct device *dev) | 199 | static u64 get_coherent_dma_mask(struct device *dev) |
162 | { | 200 | { |
163 | u64 mask = (u64)DMA_BIT_MASK(32); | 201 | u64 mask = (u64)DMA_BIT_MASK(32); |
164 | 202 | ||
165 | if (dev) { | 203 | if (dev) { |
166 | unsigned long max_dma_pfn; | ||
167 | |||
168 | mask = dev->coherent_dma_mask; | 204 | mask = dev->coherent_dma_mask; |
169 | 205 | ||
170 | /* | 206 | /* |
@@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
176 | return 0; | 212 | return 0; |
177 | } | 213 | } |
178 | 214 | ||
179 | max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); | 215 | if (!__dma_supported(dev, mask, true)) |
180 | |||
181 | /* | ||
182 | * If the mask allows for more memory than we can address, | ||
183 | * and we actually have that much memory, then fail the | ||
184 | * allocation. | ||
185 | */ | ||
186 | if (sizeof(mask) != sizeof(dma_addr_t) && | ||
187 | mask > (dma_addr_t)~0 && | ||
188 | dma_to_pfn(dev, ~0) > max_dma_pfn) { | ||
189 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", | ||
190 | mask); | ||
191 | dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Now check that the mask, when translated to a PFN, | ||
197 | * fits within the allowable addresses which we can | ||
198 | * allocate. | ||
199 | */ | ||
200 | if (dma_to_pfn(dev, mask) < max_dma_pfn) { | ||
201 | dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", | ||
202 | mask, | ||
203 | dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, | ||
204 | arm_dma_pfn_limit + 1); | ||
205 | return 0; | 216 | return 0; |
206 | } | ||
207 | } | 217 | } |
208 | 218 | ||
209 | return mask; | 219 | return mask; |
@@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
1032 | */ | 1042 | */ |
1033 | int dma_supported(struct device *dev, u64 mask) | 1043 | int dma_supported(struct device *dev, u64 mask) |
1034 | { | 1044 | { |
1035 | unsigned long limit; | 1045 | return __dma_supported(dev, mask, false); |
1036 | |||
1037 | /* | ||
1038 | * If the mask allows for more memory than we can address, | ||
1039 | * and we actually have that much memory, then we must | ||
1040 | * indicate that DMA to this device is not supported. | ||
1041 | */ | ||
1042 | if (sizeof(mask) != sizeof(dma_addr_t) && | ||
1043 | mask > (dma_addr_t)~0 && | ||
1044 | dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) | ||
1045 | return 0; | ||
1046 | |||
1047 | /* | ||
1048 | * Translate the device's DMA mask to a PFN limit. This | ||
1049 | * PFN number includes the page which we can DMA to. | ||
1050 | */ | ||
1051 | limit = dma_to_pfn(dev, mask); | ||
1052 | |||
1053 | if (limit < arm_dma_pfn_limit) | ||
1054 | return 0; | ||
1055 | |||
1056 | return 1; | ||
1057 | } | 1046 | } |
1058 | EXPORT_SYMBOL(dma_supported); | 1047 | EXPORT_SYMBOL(dma_supported); |
1059 | 1048 | ||
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6d5ba9afb16a..3387e60e4ea3 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
175 | unsigned long i; | 175 | unsigned long i; |
176 | if (cache_is_vipt_nonaliasing()) { | 176 | if (cache_is_vipt_nonaliasing()) { |
177 | for (i = 0; i < (1 << compound_order(page)); i++) { | 177 | for (i = 0; i < (1 << compound_order(page)); i++) { |
178 | void *addr = kmap_atomic(page); | 178 | void *addr = kmap_atomic(page + i); |
179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
180 | kunmap_atomic(addr); | 180 | kunmap_atomic(addr); |
181 | } | 181 | } |
182 | } else { | 182 | } else { |
183 | for (i = 0; i < (1 << compound_order(page)); i++) { | 183 | for (i = 0; i < (1 << compound_order(page)); i++) { |
184 | void *addr = kmap_high_get(page); | 184 | void *addr = kmap_high_get(page + i); |
185 | if (addr) { | 185 | if (addr) { |
186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
187 | kunmap_high(page); | 187 | kunmap_high(page + i); |
188 | } | 188 | } |
189 | } | 189 | } |
190 | } | 190 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3e8f106ee5fe..1f7b19a47060 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) | |||
229 | #ifdef CONFIG_ZONE_DMA | 229 | #ifdef CONFIG_ZONE_DMA |
230 | if (mdesc->dma_zone_size) { | 230 | if (mdesc->dma_zone_size) { |
231 | arm_dma_zone_size = mdesc->dma_zone_size; | 231 | arm_dma_zone_size = mdesc->dma_zone_size; |
232 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; | 232 | arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; |
233 | } else | 233 | } else |
234 | arm_dma_limit = 0xffffffff; | 234 | arm_dma_limit = 0xffffffff; |
235 | arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; | 235 | arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f959ee47..85501238b425 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, | |||
96 | struct remap_data *info = data; | 96 | struct remap_data *info = data; |
97 | struct page *page = info->pages[info->index++]; | 97 | struct page *page = info->pages[info->index++]; |
98 | unsigned long pfn = page_to_pfn(page); | 98 | unsigned long pfn = page_to_pfn(page); |
99 | pte_t pte = pfn_pte(pfn, info->prot); | 99 | pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); |
100 | 100 | ||
101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) | 101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) |
102 | return -EFAULT; | 102 | return -EFAULT; |
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void) | |||
224 | } | 224 | } |
225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) | 225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) |
226 | return 0; | 226 | return 0; |
227 | xen_hvm_resume_frames = res.start >> PAGE_SHIFT; | 227 | xen_hvm_resume_frames = res.start; |
228 | xen_events_irq = irq_of_parse_and_map(node, 0); | 228 | xen_events_irq = irq_of_parse_and_map(node, 0); |
229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", | 229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", |
230 | version, xen_events_irq, xen_hvm_resume_frames); | 230 | version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); |
231 | xen_domain_type = XEN_HVM_DOMAIN; | 231 | xen_domain_type = XEN_HVM_DOMAIN; |
232 | 232 | ||
233 | xen_setup_features(); | 233 | xen_setup_features(); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 88c8b6c1341a..6d4dd22ee4b7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -159,8 +159,7 @@ config NR_CPUS | |||
159 | range 2 32 | 159 | range 2 32 |
160 | depends on SMP | 160 | depends on SMP |
161 | # These have to remain sorted largest to smallest | 161 | # These have to remain sorted largest to smallest |
162 | default "8" if ARCH_XGENE | 162 | default "8" |
163 | default "4" | ||
164 | 163 | ||
165 | config HOTPLUG_CPU | 164 | config HOTPLUG_CPU |
166 | bool "Support for hot-pluggable CPUs" | 165 | bool "Support for hot-pluggable CPUs" |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4cc813eddacb..572769727227 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot | |||
229 | extern void __iounmap(volatile void __iomem *addr); | 229 | extern void __iounmap(volatile void __iomem *addr); |
230 | extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); | 230 | extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); |
231 | 231 | ||
232 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) | 232 | #define PROT_DEFAULT (pgprot_default | PTE_DIRTY) |
233 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 233 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
234 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) | 234 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) |
235 | #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) | 235 | #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 755f86143320..b1d2e26c3c88 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -43,7 +43,7 @@ | |||
43 | * Section | 43 | * Section |
44 | */ | 44 | */ |
45 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) | 45 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) |
46 | #define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) | 46 | #define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58) |
47 | #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ | 47 | #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ |
48 | #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ | 48 | #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ |
49 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 49 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a6eebe..dde3fc9c49f0 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h | |||
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | 23 | unsigned long offset, size_t size, enum dma_data_direction dir, |
24 | struct dma_attrs *attrs) | 24 | struct dma_attrs *attrs) |
25 | { | 25 | { |
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
27 | } | 26 | } |
28 | 27 | ||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 29 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 30 | struct dma_attrs *attrs) |
32 | { | 31 | { |
33 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
34 | } | 32 | } |
35 | 33 | ||
36 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 34 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
37 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 35 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
38 | { | 36 | { |
39 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
40 | } | 37 | } |
41 | 38 | ||
42 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | 39 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
43 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 40 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
44 | { | 41 | { |
45 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
46 | } | 42 | } |
47 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | 43 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7009387348b7..c68cca5c3523 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen) | |||
282 | * be used where CPUs are brought online dynamically by the kernel. | 282 | * be used where CPUs are brought online dynamically by the kernel. |
283 | */ | 283 | */ |
284 | ENTRY(secondary_entry) | 284 | ENTRY(secondary_entry) |
285 | bl __calc_phys_offset // x2=phys offset | ||
286 | bl el2_setup // Drop to EL1 | 285 | bl el2_setup // Drop to EL1 |
286 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET | ||
287 | bl set_cpu_boot_mode_flag | ||
287 | b secondary_startup | 288 | b secondary_startup |
288 | ENDPROC(secondary_entry) | 289 | ENDPROC(secondary_entry) |
289 | 290 | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a2192b83..6a8928bba03c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | |||
214 | { | 214 | { |
215 | int err, len, type, disabled = !ctrl.enabled; | 215 | int err, len, type, disabled = !ctrl.enabled; |
216 | 216 | ||
217 | if (disabled) { | 217 | attr->disabled = disabled; |
218 | len = 0; | 218 | if (disabled) |
219 | type = HW_BREAKPOINT_EMPTY; | 219 | return 0; |
220 | } else { | 220 | |
221 | err = arch_bp_generic_fields(ctrl, &len, &type); | 221 | err = arch_bp_generic_fields(ctrl, &len, &type); |
222 | if (err) | 222 | if (err) |
223 | return err; | 223 | return err; |
224 | 224 | ||
225 | switch (note_type) { | 225 | switch (note_type) { |
226 | case NT_ARM_HW_BREAK: | 226 | case NT_ARM_HW_BREAK: |
227 | if ((type & HW_BREAKPOINT_X) != type) | 227 | if ((type & HW_BREAKPOINT_X) != type) |
228 | return -EINVAL; | ||
229 | break; | ||
230 | case NT_ARM_HW_WATCH: | ||
231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
232 | return -EINVAL; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | 228 | return -EINVAL; |
236 | } | 229 | break; |
230 | case NT_ARM_HW_WATCH: | ||
231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
232 | return -EINVAL; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | ||
237 | } | 236 | } |
238 | 237 | ||
239 | attr->bp_len = len; | 238 | attr->bp_len = len; |
240 | attr->bp_type = type; | 239 | attr->bp_type = type; |
241 | attr->disabled = disabled; | ||
242 | 240 | ||
243 | return 0; | 241 | return 0; |
244 | } | 242 | } |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 421b99fd635d..0f7fec52c7f8 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -111,12 +111,12 @@ ENTRY(__cpu_setup) | |||
111 | bl __flush_dcache_all | 111 | bl __flush_dcache_all |
112 | mov lr, x28 | 112 | mov lr, x28 |
113 | ic iallu // I+BTB cache invalidate | 113 | ic iallu // I+BTB cache invalidate |
114 | tlbi vmalle1is // invalidate I + D TLBs | ||
114 | dsb sy | 115 | dsb sy |
115 | 116 | ||
116 | mov x0, #3 << 20 | 117 | mov x0, #3 << 20 |
117 | msr cpacr_el1, x0 // Enable FP/ASIMD | 118 | msr cpacr_el1, x0 // Enable FP/ASIMD |
118 | msr mdscr_el1, xzr // Reset mdscr_el1 | 119 | msr mdscr_el1, xzr // Reset mdscr_el1 |
119 | tlbi vmalle1is // invalidate I + D TLBs | ||
120 | /* | 120 | /* |
121 | * Memory region attributes for LPAE: | 121 | * Memory region attributes for LPAE: |
122 | * | 122 | * |
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c index 7b1f2cd85400..1f121497b517 100644 --- a/arch/avr32/boards/favr-32/setup.c +++ b/arch/avr32/boards/favr-32/setup.c | |||
@@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev) | |||
298 | */ | 298 | */ |
299 | retval = clk_round_rate(pll1, | 299 | retval = clk_round_rate(pll1, |
300 | CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16); | 300 | CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16); |
301 | if (retval < 0) | 301 | if (retval <= 0) { |
302 | retval = -EINVAL; | ||
302 | goto out_abdac; | 303 | goto out_abdac; |
304 | } | ||
303 | 305 | ||
304 | retval = clk_set_rate(pll1, retval); | 306 | retval = clk_set_rate(pll1, retval); |
305 | if (retval != 0) | 307 | if (retval != 0) |
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index d5aff36ade92..4733e38e7ae6 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig | |||
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
59 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 59 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
60 | # CONFIG_FW_LOADER is not set | 60 | # CONFIG_FW_LOADER is not set |
61 | CONFIG_MTD=y | 61 | CONFIG_MTD=y |
62 | CONFIG_MTD_PARTITIONS=y | ||
63 | CONFIG_MTD_CMDLINE_PARTS=y | 62 | CONFIG_MTD_CMDLINE_PARTS=y |
64 | CONFIG_MTD_CHAR=y | 63 | CONFIG_MTD_CHAR=y |
65 | CONFIG_MTD_BLOCK=y | 64 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index 4abcf435d599..1be0ee31bd91 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig | |||
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
61 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 61 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
62 | # CONFIG_FW_LOADER is not set | 62 | # CONFIG_FW_LOADER is not set |
63 | CONFIG_MTD=y | 63 | CONFIG_MTD=y |
64 | CONFIG_MTD_PARTITIONS=y | ||
65 | CONFIG_MTD_CMDLINE_PARTS=y | 64 | CONFIG_MTD_CMDLINE_PARTS=y |
66 | CONFIG_MTD_CHAR=y | 65 | CONFIG_MTD_CHAR=y |
67 | CONFIG_MTD_BLOCK=y | 66 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index 18f3fa0470ff..796e536f7bc4 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig | |||
@@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
60 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 60 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
61 | # CONFIG_FW_LOADER is not set | 61 | # CONFIG_FW_LOADER is not set |
62 | CONFIG_MTD=y | 62 | CONFIG_MTD=y |
63 | CONFIG_MTD_PARTITIONS=y | ||
64 | CONFIG_MTD_CMDLINE_PARTS=y | 63 | CONFIG_MTD_CMDLINE_PARTS=y |
65 | CONFIG_MTD_CHAR=y | 64 | CONFIG_MTD_CHAR=y |
66 | CONFIG_MTD_BLOCK=y | 65 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig index 06e389cfcd12..9a57da44eb6f 100644 --- a/arch/avr32/configs/atngw100_mrmt_defconfig +++ b/arch/avr32/configs/atngw100_mrmt_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
48 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 48 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
49 | # CONFIG_FW_LOADER is not set | 49 | # CONFIG_FW_LOADER is not set |
50 | CONFIG_MTD=y | 50 | CONFIG_MTD=y |
51 | CONFIG_MTD_PARTITIONS=y | ||
52 | CONFIG_MTD_CMDLINE_PARTS=y | 51 | CONFIG_MTD_CMDLINE_PARTS=y |
53 | CONFIG_MTD_CHAR=y | 52 | CONFIG_MTD_CHAR=y |
54 | CONFIG_MTD_BLOCK=y | 53 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig index 2518a1368d7c..97fe1b399b06 100644 --- a/arch/avr32/configs/atngw100mkii_defconfig +++ b/arch/avr32/configs/atngw100mkii_defconfig | |||
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
59 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 59 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
60 | # CONFIG_FW_LOADER is not set | 60 | # CONFIG_FW_LOADER is not set |
61 | CONFIG_MTD=y | 61 | CONFIG_MTD=y |
62 | CONFIG_MTD_PARTITIONS=y | ||
63 | CONFIG_MTD_CMDLINE_PARTS=y | 62 | CONFIG_MTD_CMDLINE_PARTS=y |
64 | CONFIG_MTD_CHAR=y | 63 | CONFIG_MTD_CHAR=y |
65 | CONFIG_MTD_BLOCK=y | 64 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig index 245ef6bd0fa6..a176d24467e9 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig | |||
@@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
62 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 62 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
63 | # CONFIG_FW_LOADER is not set | 63 | # CONFIG_FW_LOADER is not set |
64 | CONFIG_MTD=y | 64 | CONFIG_MTD=y |
65 | CONFIG_MTD_PARTITIONS=y | ||
66 | CONFIG_MTD_CMDLINE_PARTS=y | 65 | CONFIG_MTD_CMDLINE_PARTS=y |
67 | CONFIG_MTD_CHAR=y | 66 | CONFIG_MTD_CHAR=y |
68 | CONFIG_MTD_BLOCK=y | 67 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig index fa6cbac6e418..d1bf6dcfc47d 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig | |||
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
61 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 61 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
62 | # CONFIG_FW_LOADER is not set | 62 | # CONFIG_FW_LOADER is not set |
63 | CONFIG_MTD=y | 63 | CONFIG_MTD=y |
64 | CONFIG_MTD_PARTITIONS=y | ||
65 | CONFIG_MTD_CMDLINE_PARTS=y | 64 | CONFIG_MTD_CMDLINE_PARTS=y |
66 | CONFIG_MTD_CHAR=y | 65 | CONFIG_MTD_CHAR=y |
67 | CONFIG_MTD_BLOCK=y | 66 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index bbd5131021a5..2813dd2b9138 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig | |||
@@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
53 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 53 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
54 | # CONFIG_FW_LOADER is not set | 54 | # CONFIG_FW_LOADER is not set |
55 | CONFIG_MTD=y | 55 | CONFIG_MTD=y |
56 | CONFIG_MTD_PARTITIONS=y | ||
57 | CONFIG_MTD_CMDLINE_PARTS=y | 56 | CONFIG_MTD_CMDLINE_PARTS=y |
58 | CONFIG_MTD_CHAR=y | 57 | CONFIG_MTD_CHAR=y |
59 | CONFIG_MTD_BLOCK=y | 58 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index c1cd726f9012..f8ff3a3baad4 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig | |||
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
42 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 42 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
43 | # CONFIG_FW_LOADER is not set | 43 | # CONFIG_FW_LOADER is not set |
44 | CONFIG_MTD=y | 44 | CONFIG_MTD=y |
45 | CONFIG_MTD_PARTITIONS=y | ||
46 | CONFIG_MTD_CMDLINE_PARTS=y | 45 | CONFIG_MTD_CMDLINE_PARTS=y |
47 | CONFIG_MTD_CHAR=y | 46 | CONFIG_MTD_CHAR=y |
48 | CONFIG_MTD_BLOCK=y | 47 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 754ae56b2767..992228e54e38 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig | |||
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
42 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 42 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
43 | # CONFIG_FW_LOADER is not set | 43 | # CONFIG_FW_LOADER is not set |
44 | CONFIG_MTD=y | 44 | CONFIG_MTD=y |
45 | CONFIG_MTD_PARTITIONS=y | ||
46 | CONFIG_MTD_CMDLINE_PARTS=y | 45 | CONFIG_MTD_CMDLINE_PARTS=y |
47 | CONFIG_MTD_CHAR=y | 46 | CONFIG_MTD_CHAR=y |
48 | CONFIG_MTD_BLOCK=y | 47 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index 58589d8cc0ac..b8e698b0d1fa 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig | |||
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
54 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 54 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
55 | # CONFIG_FW_LOADER is not set | 55 | # CONFIG_FW_LOADER is not set |
56 | CONFIG_MTD=y | 56 | CONFIG_MTD=y |
57 | CONFIG_MTD_PARTITIONS=y | ||
58 | CONFIG_MTD_CMDLINE_PARTS=y | 57 | CONFIG_MTD_CMDLINE_PARTS=y |
59 | CONFIG_MTD_CHAR=y | 58 | CONFIG_MTD_CHAR=y |
60 | CONFIG_MTD_BLOCK=y | 59 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index c90fbf6d35bc..07bed3f7eb5e 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig | |||
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
58 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 58 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
59 | # CONFIG_FW_LOADER is not set | 59 | # CONFIG_FW_LOADER is not set |
60 | CONFIG_MTD=y | 60 | CONFIG_MTD=y |
61 | CONFIG_MTD_PARTITIONS=y | ||
62 | CONFIG_MTD_CMDLINE_PARTS=y | 61 | CONFIG_MTD_CMDLINE_PARTS=y |
63 | CONFIG_MTD_CHAR=y | 62 | CONFIG_MTD_CHAR=y |
64 | CONFIG_MTD_BLOCK=y | 63 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index ba7c31e269cb..18db853386c8 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig | |||
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
58 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 58 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
59 | # CONFIG_FW_LOADER is not set | 59 | # CONFIG_FW_LOADER is not set |
60 | CONFIG_MTD=y | 60 | CONFIG_MTD=y |
61 | CONFIG_MTD_PARTITIONS=y | ||
62 | CONFIG_MTD_CMDLINE_PARTS=y | 61 | CONFIG_MTD_CMDLINE_PARTS=y |
63 | CONFIG_MTD_CHAR=y | 62 | CONFIG_MTD_CHAR=y |
64 | CONFIG_MTD_BLOCK=y | 63 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig index 65de4431108c..91df6b2986be 100644 --- a/arch/avr32/configs/merisc_defconfig +++ b/arch/avr32/configs/merisc_defconfig | |||
@@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
46 | # CONFIG_FW_LOADER is not set | 46 | # CONFIG_FW_LOADER is not set |
47 | CONFIG_MTD=y | 47 | CONFIG_MTD=y |
48 | CONFIG_MTD_CONCAT=y | 48 | CONFIG_MTD_CONCAT=y |
49 | CONFIG_MTD_PARTITIONS=y | ||
50 | CONFIG_MTD_CHAR=y | 49 | CONFIG_MTD_CHAR=y |
51 | CONFIG_MTD_BLOCK=y | 50 | CONFIG_MTD_BLOCK=y |
52 | CONFIG_MTD_CFI=y | 51 | CONFIG_MTD_CFI=y |
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig index 0a8bfdc420e0..d630e089dd32 100644 --- a/arch/avr32/configs/mimc200_defconfig +++ b/arch/avr32/configs/mimc200_defconfig | |||
@@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
49 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 49 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
50 | # CONFIG_FW_LOADER is not set | 50 | # CONFIG_FW_LOADER is not set |
51 | CONFIG_MTD=y | 51 | CONFIG_MTD=y |
52 | CONFIG_MTD_PARTITIONS=y | ||
53 | CONFIG_MTD_CMDLINE_PARTS=y | 52 | CONFIG_MTD_CMDLINE_PARTS=y |
54 | CONFIG_MTD_CHAR=y | 53 | CONFIG_MTD_CHAR=y |
55 | CONFIG_MTD_BLOCK=y | 54 | CONFIG_MTD_BLOCK=y |
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 12f828ad5058..d0f771be9e96 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c | |||
@@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
59 | static struct irqaction timer_irqaction = { | 59 | static struct irqaction timer_irqaction = { |
60 | .handler = timer_interrupt, | 60 | .handler = timer_interrupt, |
61 | /* Oprofile uses the same irq as the timer, so allow it to be shared */ | 61 | /* Oprofile uses the same irq as the timer, so allow it to be shared */ |
62 | .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED, | 62 | .flags = IRQF_TIMER | IRQF_SHARED, |
63 | .name = "avr32_comparator", | 63 | .name = "avr32_comparator", |
64 | }; | 64 | }; |
65 | 65 | ||
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c index 32d680eb6f48..db190842b80c 100644 --- a/arch/avr32/mach-at32ap/pm.c +++ b/arch/avr32/mach-at32ap/pm.c | |||
@@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = { | |||
181 | .enter = avr32_pm_enter, | 181 | .enter = avr32_pm_enter, |
182 | }; | 182 | }; |
183 | 183 | ||
184 | static unsigned long avr32_pm_offset(void *symbol) | 184 | static unsigned long __init avr32_pm_offset(void *symbol) |
185 | { | 185 | { |
186 | extern u8 pm_exception[]; | 186 | extern u8 pm_exception[]; |
187 | 187 | ||
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f0e2784e7cca..2f9b751878ba 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma | |||
125 | void mark_rodata_ro(void); | 125 | void mark_rodata_ro(void); |
126 | #endif | 126 | #endif |
127 | 127 | ||
128 | #ifdef CONFIG_PA8X00 | ||
129 | /* Only pa8800, pa8900 needs this */ | ||
130 | |||
131 | #include <asm/kmap_types.h> | 128 | #include <asm/kmap_types.h> |
132 | 129 | ||
133 | #define ARCH_HAS_KMAP | 130 | #define ARCH_HAS_KMAP |
134 | 131 | ||
135 | void kunmap_parisc(void *addr); | ||
136 | |||
137 | static inline void *kmap(struct page *page) | 132 | static inline void *kmap(struct page *page) |
138 | { | 133 | { |
139 | might_sleep(); | 134 | might_sleep(); |
135 | flush_dcache_page(page); | ||
140 | return page_address(page); | 136 | return page_address(page); |
141 | } | 137 | } |
142 | 138 | ||
143 | static inline void kunmap(struct page *page) | 139 | static inline void kunmap(struct page *page) |
144 | { | 140 | { |
145 | kunmap_parisc(page_address(page)); | 141 | flush_kernel_dcache_page_addr(page_address(page)); |
146 | } | 142 | } |
147 | 143 | ||
148 | static inline void *kmap_atomic(struct page *page) | 144 | static inline void *kmap_atomic(struct page *page) |
149 | { | 145 | { |
150 | pagefault_disable(); | 146 | pagefault_disable(); |
147 | flush_dcache_page(page); | ||
151 | return page_address(page); | 148 | return page_address(page); |
152 | } | 149 | } |
153 | 150 | ||
154 | static inline void __kunmap_atomic(void *addr) | 151 | static inline void __kunmap_atomic(void *addr) |
155 | { | 152 | { |
156 | kunmap_parisc(addr); | 153 | flush_kernel_dcache_page_addr(addr); |
157 | pagefault_enable(); | 154 | pagefault_enable(); |
158 | } | 155 | } |
159 | 156 | ||
160 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) | 157 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
161 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) | 158 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
162 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | 159 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
163 | #endif | ||
164 | 160 | ||
165 | #endif /* _PARISC_CACHEFLUSH_H */ | 161 | #endif /* _PARISC_CACHEFLUSH_H */ |
166 | 162 | ||
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index b7adb2ac049c..c53fc63149e8 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h | |||
@@ -28,9 +28,8 @@ struct page; | |||
28 | 28 | ||
29 | void clear_page_asm(void *page); | 29 | void clear_page_asm(void *page); |
30 | void copy_page_asm(void *to, void *from); | 30 | void copy_page_asm(void *to, void *from); |
31 | void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); | 31 | #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) |
32 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | 32 | #define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) |
33 | struct page *pg); | ||
34 | 33 | ||
35 | /* #define CONFIG_PARISC_TMPALIAS */ | 34 | /* #define CONFIG_PARISC_TMPALIAS */ |
36 | 35 | ||
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c035673209f7..a72545554a31 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) | |||
388 | } | 388 | } |
389 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); | 389 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); |
390 | 390 | ||
391 | void clear_user_page(void *vto, unsigned long vaddr, struct page *page) | ||
392 | { | ||
393 | clear_page_asm(vto); | ||
394 | if (!parisc_requires_coherency()) | ||
395 | flush_kernel_dcache_page_asm(vto); | ||
396 | } | ||
397 | EXPORT_SYMBOL(clear_user_page); | ||
398 | |||
399 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
400 | struct page *pg) | ||
401 | { | ||
402 | /* Copy using kernel mapping. No coherency is needed | ||
403 | (all in kmap/kunmap) on machines that don't support | ||
404 | non-equivalent aliasing. However, the `from' page | ||
405 | needs to be flushed before it can be accessed through | ||
406 | the kernel mapping. */ | ||
407 | preempt_disable(); | ||
408 | flush_dcache_page_asm(__pa(vfrom), vaddr); | ||
409 | preempt_enable(); | ||
410 | copy_page_asm(vto, vfrom); | ||
411 | if (!parisc_requires_coherency()) | ||
412 | flush_kernel_dcache_page_asm(vto); | ||
413 | } | ||
414 | EXPORT_SYMBOL(copy_user_page); | ||
415 | |||
416 | #ifdef CONFIG_PA8X00 | ||
417 | |||
418 | void kunmap_parisc(void *addr) | ||
419 | { | ||
420 | if (parisc_requires_coherency()) | ||
421 | flush_kernel_dcache_page_addr(addr); | ||
422 | } | ||
423 | EXPORT_SYMBOL(kunmap_parisc); | ||
424 | #endif | ||
425 | |||
426 | void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | 391 | void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) |
427 | { | 392 | { |
428 | unsigned long flags; | 393 | unsigned long flags; |
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi index bd14c00e5146..2d7cb04ac962 100644 --- a/arch/powerpc/boot/dts/mpc5121.dtsi +++ b/arch/powerpc/boot/dts/mpc5121.dtsi | |||
@@ -77,7 +77,6 @@ | |||
77 | compatible = "fsl,mpc5121-immr"; | 77 | compatible = "fsl,mpc5121-immr"; |
78 | #address-cells = <1>; | 78 | #address-cells = <1>; |
79 | #size-cells = <1>; | 79 | #size-cells = <1>; |
80 | #interrupt-cells = <2>; | ||
81 | ranges = <0x0 0x80000000 0x400000>; | 80 | ranges = <0x0 0x80000000 0x400000>; |
82 | reg = <0x80000000 0x400000>; | 81 | reg = <0x80000000 0x400000>; |
83 | bus-frequency = <66000000>; /* 66 MHz ips bus */ | 82 | bus-frequency = <66000000>; /* 66 MHz ips bus */ |
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62240c2..a618dfc13e4c 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts | |||
@@ -58,7 +58,6 @@ | |||
58 | compatible = "fsl,mpc5121-immr"; | 58 | compatible = "fsl,mpc5121-immr"; |
59 | #address-cells = <1>; | 59 | #address-cells = <1>; |
60 | #size-cells = <1>; | 60 | #size-cells = <1>; |
61 | #interrupt-cells = <2>; | ||
62 | ranges = <0x0 0x80000000 0x400000>; | 61 | ranges = <0x0 0x80000000 0x400000>; |
63 | reg = <0x80000000 0x400000>; | 62 | reg = <0x80000000 0x400000>; |
64 | bus-frequency = <66000000>; // 66 MHz ips bus | 63 | bus-frequency = <66000000>; // 66 MHz ips bus |
@@ -189,6 +188,10 @@ | |||
189 | reg = <0xA000 0x1000>; | 188 | reg = <0xA000 0x1000>; |
190 | }; | 189 | }; |
191 | 190 | ||
191 | // disable USB1 port | ||
192 | // TODO: | ||
193 | // correct pinmux config and fix USB3320 ulpi dependency | ||
194 | // before re-enabling it | ||
192 | usb@3000 { | 195 | usb@3000 { |
193 | compatible = "fsl,mpc5121-usb2-dr"; | 196 | compatible = "fsl,mpc5121-usb2-dr"; |
194 | reg = <0x3000 0x400>; | 197 | reg = <0x3000 0x400>; |
@@ -197,6 +200,7 @@ | |||
197 | interrupts = <43 0x8>; | 200 | interrupts = <43 0x8>; |
198 | dr_mode = "host"; | 201 | dr_mode = "host"; |
199 | phy_type = "ulpi"; | 202 | phy_type = "ulpi"; |
203 | status = "disabled"; | ||
200 | }; | 204 | }; |
201 | 205 | ||
202 | // 5125 PSCs are not 52xx or 5121 PSC compatible | 206 | // 5125 PSCs are not 52xx or 5121 PSC compatible |
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig index 69b57daf402e..0b88c7b30bb9 100644 --- a/arch/powerpc/configs/52xx/cm5200_defconfig +++ b/arch/powerpc/configs/52xx/cm5200_defconfig | |||
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y | |||
12 | CONFIG_PPC_MPC52xx=y | 12 | CONFIG_PPC_MPC52xx=y |
13 | CONFIG_PPC_MPC5200_SIMPLE=y | 13 | CONFIG_PPC_MPC5200_SIMPLE=y |
14 | # CONFIG_PPC_PMAC is not set | 14 | # CONFIG_PPC_PMAC is not set |
15 | CONFIG_PPC_BESTCOMM=y | ||
16 | CONFIG_SPARSE_IRQ=y | 15 | CONFIG_SPARSE_IRQ=y |
17 | CONFIG_PM=y | 16 | CONFIG_PM=y |
18 | # CONFIG_PCI is not set | 17 | # CONFIG_PCI is not set |
@@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y | |||
71 | CONFIG_USB_OHCI_HCD=y | 70 | CONFIG_USB_OHCI_HCD=y |
72 | CONFIG_USB_OHCI_HCD_PPC_OF_BE=y | 71 | CONFIG_USB_OHCI_HCD_PPC_OF_BE=y |
73 | CONFIG_USB_STORAGE=y | 72 | CONFIG_USB_STORAGE=y |
73 | CONFIG_DMADEVICES=y | ||
74 | CONFIG_PPC_BESTCOMM=y | ||
74 | CONFIG_EXT2_FS=y | 75 | CONFIG_EXT2_FS=y |
75 | CONFIG_EXT3_FS=y | 76 | CONFIG_EXT3_FS=y |
76 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 77 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig index f3638ae0a627..104a332e79ab 100644 --- a/arch/powerpc/configs/52xx/lite5200b_defconfig +++ b/arch/powerpc/configs/52xx/lite5200b_defconfig | |||
@@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y | |||
15 | CONFIG_PPC_MPC5200_SIMPLE=y | 15 | CONFIG_PPC_MPC5200_SIMPLE=y |
16 | CONFIG_PPC_LITE5200=y | 16 | CONFIG_PPC_LITE5200=y |
17 | # CONFIG_PPC_PMAC is not set | 17 | # CONFIG_PPC_PMAC is not set |
18 | CONFIG_PPC_BESTCOMM=y | ||
19 | CONFIG_NO_HZ=y | 18 | CONFIG_NO_HZ=y |
20 | CONFIG_HIGH_RES_TIMERS=y | 19 | CONFIG_HIGH_RES_TIMERS=y |
21 | CONFIG_SPARSE_IRQ=y | 20 | CONFIG_SPARSE_IRQ=y |
@@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y | |||
59 | CONFIG_I2C_MPC=y | 58 | CONFIG_I2C_MPC=y |
60 | # CONFIG_HWMON is not set | 59 | # CONFIG_HWMON is not set |
61 | CONFIG_VIDEO_OUTPUT_CONTROL=m | 60 | CONFIG_VIDEO_OUTPUT_CONTROL=m |
61 | CONFIG_DMADEVICES=y | ||
62 | CONFIG_PPC_BESTCOMM=y | ||
62 | CONFIG_EXT2_FS=y | 63 | CONFIG_EXT2_FS=y |
63 | CONFIG_EXT3_FS=y | 64 | CONFIG_EXT3_FS=y |
64 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 65 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index 0c7de9620ea6..0d13ad7e4478 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig | |||
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y | |||
12 | CONFIG_PPC_MPC52xx=y | 12 | CONFIG_PPC_MPC52xx=y |
13 | CONFIG_PPC_MPC5200_SIMPLE=y | 13 | CONFIG_PPC_MPC5200_SIMPLE=y |
14 | # CONFIG_PPC_PMAC is not set | 14 | # CONFIG_PPC_PMAC is not set |
15 | CONFIG_PPC_BESTCOMM=y | ||
16 | CONFIG_SPARSE_IRQ=y | 15 | CONFIG_SPARSE_IRQ=y |
17 | CONFIG_PM=y | 16 | CONFIG_PM=y |
18 | # CONFIG_PCI is not set | 17 | # CONFIG_PCI is not set |
@@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y | |||
84 | CONFIG_LEDS_TRIGGER_TIMER=y | 83 | CONFIG_LEDS_TRIGGER_TIMER=y |
85 | CONFIG_RTC_CLASS=y | 84 | CONFIG_RTC_CLASS=y |
86 | CONFIG_RTC_DRV_DS1307=y | 85 | CONFIG_RTC_DRV_DS1307=y |
86 | CONFIG_DMADEVICES=y | ||
87 | CONFIG_PPC_BESTCOMM=y | ||
87 | CONFIG_EXT2_FS=y | 88 | CONFIG_EXT2_FS=y |
88 | CONFIG_EXT3_FS=y | 89 | CONFIG_EXT3_FS=y |
89 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 90 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 22e719575c60..430aa182fa1c 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig | |||
@@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y | |||
21 | CONFIG_PPC_MPC52xx=y | 21 | CONFIG_PPC_MPC52xx=y |
22 | CONFIG_PPC_MPC5200_SIMPLE=y | 22 | CONFIG_PPC_MPC5200_SIMPLE=y |
23 | # CONFIG_PPC_PMAC is not set | 23 | # CONFIG_PPC_PMAC is not set |
24 | CONFIG_PPC_BESTCOMM=y | ||
25 | CONFIG_NO_HZ=y | 24 | CONFIG_NO_HZ=y |
26 | CONFIG_HIGH_RES_TIMERS=y | 25 | CONFIG_HIGH_RES_TIMERS=y |
27 | CONFIG_HZ_100=y | 26 | CONFIG_HZ_100=y |
@@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y | |||
87 | CONFIG_USB_STORAGE=m | 86 | CONFIG_USB_STORAGE=m |
88 | CONFIG_RTC_CLASS=y | 87 | CONFIG_RTC_CLASS=y |
89 | CONFIG_RTC_DRV_PCF8563=m | 88 | CONFIG_RTC_DRV_PCF8563=m |
89 | CONFIG_DMADEVICES=y | ||
90 | CONFIG_PPC_BESTCOMM=y | ||
90 | CONFIG_EXT2_FS=m | 91 | CONFIG_EXT2_FS=m |
91 | CONFIG_EXT3_FS=m | 92 | CONFIG_EXT3_FS=m |
92 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 93 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig index 716a37be16e3..7af4c5bb7c63 100644 --- a/arch/powerpc/configs/52xx/tqm5200_defconfig +++ b/arch/powerpc/configs/52xx/tqm5200_defconfig | |||
@@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y | |||
17 | CONFIG_PPC_MPC5200_SIMPLE=y | 17 | CONFIG_PPC_MPC5200_SIMPLE=y |
18 | CONFIG_PPC_MPC5200_BUGFIX=y | 18 | CONFIG_PPC_MPC5200_BUGFIX=y |
19 | # CONFIG_PPC_PMAC is not set | 19 | # CONFIG_PPC_PMAC is not set |
20 | CONFIG_PPC_BESTCOMM=y | ||
21 | CONFIG_PM=y | 20 | CONFIG_PM=y |
22 | # CONFIG_PCI is not set | 21 | # CONFIG_PCI is not set |
23 | CONFIG_NET=y | 22 | CONFIG_NET=y |
@@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y | |||
86 | CONFIG_RTC_CLASS=y | 85 | CONFIG_RTC_CLASS=y |
87 | CONFIG_RTC_DRV_DS1307=y | 86 | CONFIG_RTC_DRV_DS1307=y |
88 | CONFIG_RTC_DRV_DS1374=y | 87 | CONFIG_RTC_DRV_DS1374=y |
88 | CONFIG_DMADEVICES=y | ||
89 | CONFIG_PPC_BESTCOMM=y | ||
89 | CONFIG_EXT2_FS=y | 90 | CONFIG_EXT2_FS=y |
90 | CONFIG_EXT3_FS=y | 91 | CONFIG_EXT3_FS=y |
91 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 92 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 6640a35bebb7..8b682d1cf4d6 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig | |||
@@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y | |||
15 | CONFIG_PPC_MPC5200_BUGFIX=y | 15 | CONFIG_PPC_MPC5200_BUGFIX=y |
16 | CONFIG_PPC_MPC5200_LPBFIFO=m | 16 | CONFIG_PPC_MPC5200_LPBFIFO=m |
17 | # CONFIG_PPC_PMAC is not set | 17 | # CONFIG_PPC_PMAC is not set |
18 | CONFIG_PPC_BESTCOMM=y | ||
19 | CONFIG_SIMPLE_GPIO=y | 18 | CONFIG_SIMPLE_GPIO=y |
20 | CONFIG_NO_HZ=y | 19 | CONFIG_NO_HZ=y |
21 | CONFIG_HIGH_RES_TIMERS=y | 20 | CONFIG_HIGH_RES_TIMERS=y |
@@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y | |||
125 | CONFIG_RTC_DRV_DS1307=y | 124 | CONFIG_RTC_DRV_DS1307=y |
126 | CONFIG_RTC_DRV_DS1374=y | 125 | CONFIG_RTC_DRV_DS1374=y |
127 | CONFIG_RTC_DRV_PCF8563=m | 126 | CONFIG_RTC_DRV_PCF8563=m |
127 | CONFIG_DMADEVICES=y | ||
128 | CONFIG_PPC_BESTCOMM=y | ||
128 | CONFIG_EXT2_FS=y | 129 | CONFIG_EXT2_FS=y |
129 | CONFIG_EXT3_FS=y | 130 | CONFIG_EXT3_FS=y |
130 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 131 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index bd8a6f71944f..cec044a3ff69 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -2,7 +2,6 @@ CONFIG_PPC64=y | |||
2 | CONFIG_ALTIVEC=y | 2 | CONFIG_ALTIVEC=y |
3 | CONFIG_SMP=y | 3 | CONFIG_SMP=y |
4 | CONFIG_NR_CPUS=2 | 4 | CONFIG_NR_CPUS=2 |
5 | CONFIG_EXPERIMENTAL=y | ||
6 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
7 | CONFIG_NO_HZ=y | 6 | CONFIG_NO_HZ=y |
8 | CONFIG_HIGH_RES_TIMERS=y | 7 | CONFIG_HIGH_RES_TIMERS=y |
@@ -45,8 +44,9 @@ CONFIG_INET_AH=y | |||
45 | CONFIG_INET_ESP=y | 44 | CONFIG_INET_ESP=y |
46 | # CONFIG_IPV6 is not set | 45 | # CONFIG_IPV6 is not set |
47 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 46 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
47 | CONFIG_DEVTMPFS=y | ||
48 | CONFIG_DEVTMPFS_MOUNT=y | ||
48 | CONFIG_MTD=y | 49 | CONFIG_MTD=y |
49 | CONFIG_MTD_CHAR=y | ||
50 | CONFIG_MTD_BLOCK=y | 50 | CONFIG_MTD_BLOCK=y |
51 | CONFIG_MTD_SLRAM=y | 51 | CONFIG_MTD_SLRAM=y |
52 | CONFIG_MTD_PHRAM=y | 52 | CONFIG_MTD_PHRAM=y |
@@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y | |||
88 | CONFIG_DM_CRYPT=y | 88 | CONFIG_DM_CRYPT=y |
89 | CONFIG_NETDEVICES=y | 89 | CONFIG_NETDEVICES=y |
90 | CONFIG_DUMMY=y | 90 | CONFIG_DUMMY=y |
91 | CONFIG_MII=y | ||
92 | CONFIG_TIGON3=y | 91 | CONFIG_TIGON3=y |
93 | CONFIG_E1000=y | 92 | CONFIG_E1000=y |
94 | CONFIG_PASEMI_MAC=y | 93 | CONFIG_PASEMI_MAC=y |
@@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y | |||
174 | CONFIG_NLS_ISO8859_1=y | 173 | CONFIG_NLS_ISO8859_1=y |
175 | CONFIG_CRC_CCITT=y | 174 | CONFIG_CRC_CCITT=y |
176 | CONFIG_PRINTK_TIME=y | 175 | CONFIG_PRINTK_TIME=y |
177 | CONFIG_MAGIC_SYSRQ=y | ||
178 | CONFIG_DEBUG_FS=y | 176 | CONFIG_DEBUG_FS=y |
177 | CONFIG_MAGIC_SYSRQ=y | ||
179 | CONFIG_DEBUG_KERNEL=y | 178 | CONFIG_DEBUG_KERNEL=y |
180 | CONFIG_DETECT_HUNG_TASK=y | 179 | CONFIG_DETECT_HUNG_TASK=y |
181 | # CONFIG_SCHED_DEBUG is not set | 180 | # CONFIG_SCHED_DEBUG is not set |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5..243ce69ad685 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -284,7 +284,7 @@ do_kvm_##n: \ | |||
284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | 284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ |
285 | beq- 1f; \ | 285 | beq- 1f; \ |
286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | 286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ |
287 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | 287 | 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ |
288 | blt+ cr1,3f; /* abort if it is */ \ | 288 | blt+ cr1,3f; /* abort if it is */ \ |
289 | li r1,(n); /* will be reloaded later */ \ | 289 | li r1,(n); /* will be reloaded later */ \ |
290 | sth r1,PACA_TRAP_SAVE(r13); \ | 290 | sth r1,PACA_TRAP_SAVE(r13); \ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | |||
192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); |
194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
195 | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
196 | struct kvm_vcpu *vcpu); | ||
197 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
198 | struct kvmppc_book3s_shadow_vcpu *svcpu); | ||
195 | 199 | ||
196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 200 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
197 | { | 201 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -79,6 +79,7 @@ struct kvmppc_host_state { | |||
79 | ulong vmhandler; | 79 | ulong vmhandler; |
80 | ulong scratch0; | 80 | ulong scratch0; |
81 | ulong scratch1; | 81 | ulong scratch1; |
82 | ulong scratch2; | ||
82 | u8 in_guest; | 83 | u8 in_guest; |
83 | u8 restore_hid5; | 84 | u8 restore_hid5; |
84 | u8 napping; | 85 | u8 napping; |
@@ -106,6 +107,7 @@ struct kvmppc_host_state { | |||
106 | }; | 107 | }; |
107 | 108 | ||
108 | struct kvmppc_book3s_shadow_vcpu { | 109 | struct kvmppc_book3s_shadow_vcpu { |
110 | bool in_use; | ||
109 | ulong gpr[14]; | 111 | ulong gpr[14]; |
110 | u32 cr; | 112 | u32 cr; |
111 | u32 xer; | 113 | u32 xer; |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06be1d84..7bdcf340016c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | |||
720 | int64_t opal_pci_poll(uint64_t phb_id); | 720 | int64_t opal_pci_poll(uint64_t phb_id); |
721 | int64_t opal_return_cpu(void); | 721 | int64_t opal_return_cpu(void); |
722 | 722 | ||
723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); | 723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); |
724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | 724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); |
725 | 725 | ||
726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
727 | uint32_t addr, uint32_t data, uint32_t sz); | 727 | uint32_t addr, uint32_t data, uint32_t sz); |
728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
729 | uint32_t addr, uint32_t *data, uint32_t sz); | 729 | uint32_t addr, __be32 *data, uint32_t sz); |
730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | 730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); |
731 | int64_t opal_manage_flash(uint8_t op); | 731 | int64_t opal_manage_flash(uint8_t op); |
732 | int64_t opal_update_flash(uint64_t blk_list); | 732 | int64_t opal_update_flash(uint64_t blk_list); |
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 27b2386f738a..842846c1b711 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h | |||
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, | |||
84 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, | 84 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
85 | unsigned long address) | 85 | unsigned long address) |
86 | { | 86 | { |
87 | struct page *page = page_address(table); | ||
88 | |||
89 | tlb_flush_pgtable(tlb, address); | 87 | tlb_flush_pgtable(tlb, address); |
90 | pgtable_page_dtor(page); | 88 | pgtable_page_dtor(table); |
91 | pgtable_free_tlb(tlb, page, 0); | 89 | pgtable_free_tlb(tlb, page_address(table), 0); |
92 | } | 90 | } |
93 | #endif /* _ASM_POWERPC_PGALLOC_32_H */ | 91 | #endif /* _ASM_POWERPC_PGALLOC_32_H */ |
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 694012877bf7..4b0be20fcbfd 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -148,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, | |||
148 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, | 148 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
149 | unsigned long address) | 149 | unsigned long address) |
150 | { | 150 | { |
151 | struct page *page = page_address(table); | ||
152 | |||
153 | tlb_flush_pgtable(tlb, address); | 151 | tlb_flush_pgtable(tlb, address); |
154 | pgtable_page_dtor(page); | 152 | pgtable_page_dtor(table); |
155 | pgtable_free_tlb(tlb, page, 0); | 153 | pgtable_free_tlb(tlb, page_address(table), 0); |
156 | } | 154 | } |
157 | 155 | ||
158 | #else /* if CONFIG_PPC_64K_PAGES */ | 156 | #else /* if CONFIG_PPC_64K_PAGES */ |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | |||
35 | extern void enable_kernel_spe(void); | 35 | extern void enable_kernel_spe(void); |
36 | extern void giveup_spe(struct task_struct *); | 36 | extern void giveup_spe(struct task_struct *); |
37 | extern void load_up_spe(struct task_struct *); | 37 | extern void load_up_spe(struct task_struct *); |
38 | extern void switch_booke_debug_regs(struct thread_struct *new_thread); | 38 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
39 | 39 | ||
40 | #ifndef CONFIG_SMP | 40 | #ifndef CONFIG_SMP |
41 | extern void discard_lazy_cpu_state(void); | 41 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h | |||
@@ -4,13 +4,18 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * The PowerPC can do unaligned accesses itself in big endian mode. | 7 | * The PowerPC can do unaligned accesses itself based on its endian mode. |
8 | */ | 8 | */ |
9 | #include <linux/unaligned/access_ok.h> | 9 | #include <linux/unaligned/access_ok.h> |
10 | #include <linux/unaligned/generic.h> | 10 | #include <linux/unaligned/generic.h> |
11 | 11 | ||
12 | #ifdef __LITTLE_ENDIAN__ | ||
13 | #define get_unaligned __get_unaligned_le | ||
14 | #define put_unaligned __put_unaligned_le | ||
15 | #else | ||
12 | #define get_unaligned __get_unaligned_be | 16 | #define get_unaligned __get_unaligned_be |
13 | #define put_unaligned __put_unaligned_be | 17 | #define put_unaligned __put_unaligned_be |
18 | #endif | ||
14 | 19 | ||
15 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
16 | #endif /* _ASM_POWERPC_UNALIGNED_H */ | 21 | #endif /* _ASM_POWERPC_UNALIGNED_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -576,6 +576,7 @@ int main(void) | |||
576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
579 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||
579 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 580 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
580 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 581 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
581 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 582 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c26435..11c1d069d920 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | 124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) |
125 | { | 125 | { |
126 | unsigned long addr; | 126 | unsigned long addr; |
127 | const u32 *basep, *sizep; | 127 | const __be32 *basep, *sizep; |
128 | unsigned int rtas_start = 0, rtas_end = 0; | 128 | unsigned int rtas_start = 0, rtas_end = 0; |
129 | 129 | ||
130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | 130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); |
131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | 131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); |
132 | 132 | ||
133 | if (basep && sizep) { | 133 | if (basep && sizep) { |
134 | rtas_start = *basep; | 134 | rtas_start = be32_to_cpup(basep); |
135 | rtas_end = *basep + *sizep; | 135 | rtas_end = rtas_start + be32_to_cpup(sizep); |
136 | } | 136 | } |
137 | 137 | ||
138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba4053..4f0946de2d5c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) | |||
80 | * of the function that the cpu should jump to to continue | 80 | * of the function that the cpu should jump to to continue |
81 | * initialization. | 81 | * initialization. |
82 | */ | 82 | */ |
83 | .balign 8 | ||
83 | .globl __secondary_hold_spinloop | 84 | .globl __secondary_hold_spinloop |
84 | __secondary_hold_spinloop: | 85 | __secondary_hold_spinloop: |
85 | .llong 0x0 | 86 | .llong 0x0 |
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start) | |||
470 | mtctr r8 | 471 | mtctr r8 |
471 | bctr | 472 | bctr |
472 | 473 | ||
474 | .balign 8 | ||
473 | p_end: .llong _end - _stext | 475 | p_end: .llong _end - _stext |
474 | 476 | ||
475 | 4: /* Now copy the rest of the kernel up to _end */ | 477 | 4: /* Now copy the rest of the kernel up to _end */ |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 88a7fb458dfd..75d4f7340da8 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -148,7 +148,7 @@ void __init reserve_crashkernel(void) | |||
148 | * a small SLB (128MB) since the crash kernel needs to place | 148 | * a small SLB (128MB) since the crash kernel needs to place |
149 | * itself and some stacks to be in the first segment. | 149 | * itself and some stacks to be in the first segment. |
150 | */ | 150 | */ |
151 | crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); | 151 | crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); |
152 | #else | 152 | #else |
153 | crashk_res.start = KDUMP_KERNELBASE; | 153 | crashk_res.start = KDUMP_KERNELBASE; |
154 | #endif | 154 | #endif |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e59caf874d05..64bf8db12b15 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2) | |||
246 | or r3,r7,r9 | 246 | or r3,r7,r9 |
247 | blr | 247 | blr |
248 | 248 | ||
249 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | ||
250 | 249 | ||
250 | #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX | ||
251 | _GLOBAL(rmci_on) | 251 | _GLOBAL(rmci_on) |
252 | sync | 252 | sync |
253 | isync | 253 | isync |
@@ -277,6 +277,9 @@ _GLOBAL(rmci_off) | |||
277 | isync | 277 | isync |
278 | sync | 278 | sync |
279 | blr | 279 | blr |
280 | #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ | ||
281 | |||
282 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | ||
280 | 283 | ||
281 | /* | 284 | /* |
282 | * Do an IO access in real mode | 285 | * Do an IO access in real mode |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8ab7eb0..4a96556fd2d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
339 | #endif | 339 | #endif |
340 | } | 340 | } |
341 | 341 | ||
342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct debug_reg *debug) |
343 | { | 343 | { |
344 | /* | 344 | /* |
345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
348 | */ | 348 | */ |
349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
350 | 350 | ||
351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, debug->iac1); |
352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, debug->iac2); |
353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, debug->iac3); |
355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, debug->iac4); |
356 | #endif | 356 | #endif |
357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, debug->dac1); |
358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, debug->dac2); |
359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, debug->dvc1); |
361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, debug->dvc2); |
362 | #endif | 362 | #endif |
363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, debug->dbcr0); |
364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, debug->dbcr1); |
365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, debug->dbcr2); |
367 | #endif | 367 | #endif |
368 | } | 368 | } |
369 | /* | 369 | /* |
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
372 | * stored in the new thread. | 372 | * stored in the new thread. |
373 | */ | 373 | */ |
374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
375 | { | 375 | { |
376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_debug->dbcr0 & DBCR0_IDM)) |
378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_debug); |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
684 | 684 | ||
685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread.debug); |
687 | #else | 687 | #else |
688 | /* | 688 | /* |
689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb64a6e1dc51..078145acf7fb 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void) | |||
1986 | /* Get the full OF pathname of the stdout device */ | 1986 | /* Get the full OF pathname of the stdout device */ |
1987 | memset(path, 0, 256); | 1987 | memset(path, 0, 256); |
1988 | call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); | 1988 | call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); |
1989 | stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); | ||
1990 | val = cpu_to_be32(stdout_node); | ||
1991 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", | ||
1992 | &val, sizeof(val)); | ||
1993 | prom_printf("OF stdout device is: %s\n", of_stdout_device); | 1989 | prom_printf("OF stdout device is: %s\n", of_stdout_device); |
1994 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", | 1990 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", |
1995 | path, strlen(path) + 1); | 1991 | path, strlen(path) + 1); |
1996 | 1992 | ||
1997 | /* If it's a display, note it */ | 1993 | /* instance-to-package fails on PA-Semi */ |
1998 | memset(type, 0, sizeof(type)); | 1994 | stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); |
1999 | prom_getprop(stdout_node, "device_type", type, sizeof(type)); | 1995 | if (stdout_node != PROM_ERROR) { |
2000 | if (strcmp(type, "display") == 0) | 1996 | val = cpu_to_be32(stdout_node); |
2001 | prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); | 1997 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", |
1998 | &val, sizeof(val)); | ||
1999 | |||
2000 | /* If it's a display, note it */ | ||
2001 | memset(type, 0, sizeof(type)); | ||
2002 | prom_getprop(stdout_node, "device_type", type, sizeof(type)); | ||
2003 | if (strcmp(type, "display") == 0) | ||
2004 | prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); | ||
2005 | } | ||
2002 | } | 2006 | } |
2003 | 2007 | ||
2004 | static int __init prom_find_machine_type(void) | 2008 | static int __init prom_find_machine_type(void) |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb40498b41..2e3d2bf536c5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1555 | 1555 | ||
1556 | flush_fp_to_thread(child); | 1556 | flush_fp_to_thread(child); |
1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
1558 | memcpy(&tmp, &child->thread.fp_state.fpr, | 1558 | memcpy(&tmp, &child->thread.TS_FPR(fpidx), |
1559 | sizeof(long)); | 1559 | sizeof(long)); |
1560 | else | 1560 | else |
1561 | tmp = child->thread.fp_state.fpscr; | 1561 | tmp = child->thread.fp_state.fpscr; |
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1588 | 1588 | ||
1589 | flush_fp_to_thread(child); | 1589 | flush_fp_to_thread(child); |
1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
1591 | memcpy(&child->thread.fp_state.fpr, &data, | 1591 | memcpy(&child->thread.TS_FPR(fpidx), &data, |
1592 | sizeof(long)); | 1592 | sizeof(long)); |
1593 | else | 1593 | else |
1594 | child->thread.fp_state.fpscr = data; | 1594 | child->thread.fp_state.fpscr = data; |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc80445d25..bc76cc6b419c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) | |||
479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | 479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && |
480 | (dn = of_find_node_by_path("/rtas"))) { | 480 | (dn = of_find_node_by_path("/rtas"))) { |
481 | int num_addr_cell, num_size_cell, maxcpus; | 481 | int num_addr_cell, num_size_cell, maxcpus; |
482 | const unsigned int *ireg; | 482 | const __be32 *ireg; |
483 | 483 | ||
484 | num_addr_cell = of_n_addr_cells(dn); | 484 | num_addr_cell = of_n_addr_cells(dn); |
485 | num_size_cell = of_n_size_cells(dn); | 485 | num_size_cell = of_n_size_cells(dn); |
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) | |||
489 | if (!ireg) | 489 | if (!ireg) |
490 | goto out; | 490 | goto out; |
491 | 491 | ||
492 | maxcpus = ireg[num_addr_cell + num_size_cell]; | 492 | maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); |
493 | 493 | ||
494 | /* Double maxcpus for processors which have SMT capability */ | 494 | /* Double maxcpus for processors which have SMT capability */ |
495 | if (cpu_has_feature(CPU_FTR_SMT)) | 495 | if (cpu_has_feature(CPU_FTR_SMT)) |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3bf9a2..c1cf4a1522d9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
580 | int cpu_to_core_id(int cpu) | 580 | int cpu_to_core_id(int cpu) |
581 | { | 581 | { |
582 | struct device_node *np; | 582 | struct device_node *np; |
583 | const int *reg; | 583 | const __be32 *reg; |
584 | int id = -1; | 584 | int id = -1; |
585 | 585 | ||
586 | np = of_get_cpu_node(cpu, NULL); | 586 | np = of_get_cpu_node(cpu, NULL); |
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) | |||
591 | if (!reg) | 591 | if (!reg) |
592 | goto out; | 592 | goto out; |
593 | 593 | ||
594 | id = *reg; | 594 | id = be32_to_cpup(reg); |
595 | out: | 595 | out: |
596 | of_node_put(np); | 596 | of_node_put(np); |
597 | return id; | 597 | return id; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
469 | slb_v = vcpu->kvm->arch.vrma_slb_v; | 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
470 | } | 470 | } |
471 | 471 | ||
472 | preempt_disable(); | ||
472 | /* Find the HPTE in the hash table */ | 473 | /* Find the HPTE in the hash table */ |
473 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 474 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
474 | HPTE_V_VALID | HPTE_V_ABSENT); | 475 | HPTE_V_VALID | HPTE_V_ABSENT); |
475 | if (index < 0) | 476 | if (index < 0) { |
477 | preempt_enable(); | ||
476 | return -ENOENT; | 478 | return -ENOENT; |
479 | } | ||
477 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 480 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
478 | v = hptep[0] & ~HPTE_V_HVLOCK; | 481 | v = hptep[0] & ~HPTE_V_HVLOCK; |
479 | gr = kvm->arch.revmap[index].guest_rpte; | 482 | gr = kvm->arch.revmap[index].guest_rpte; |
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
481 | /* Unlock the HPTE */ | 484 | /* Unlock the HPTE */ |
482 | asm volatile("lwsync" : : : "memory"); | 485 | asm volatile("lwsync" : : : "memory"); |
483 | hptep[0] = v; | 486 | hptep[0] = v; |
487 | preempt_enable(); | ||
484 | 488 | ||
485 | gpte->eaddr = eaddr; | 489 | gpte->eaddr = eaddr; |
486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 490 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
665 | return -EFAULT; | 669 | return -EFAULT; |
666 | } else { | 670 | } else { |
667 | page = pages[0]; | 671 | page = pages[0]; |
672 | pfn = page_to_pfn(page); | ||
668 | if (PageHuge(page)) { | 673 | if (PageHuge(page)) { |
669 | page = compound_head(page); | 674 | page = compound_head(page); |
670 | pte_size <<= compound_order(page); | 675 | pte_size <<= compound_order(page); |
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
689 | } | 694 | } |
690 | rcu_read_unlock_sched(); | 695 | rcu_read_unlock_sched(); |
691 | } | 696 | } |
692 | pfn = page_to_pfn(page); | ||
693 | } | 697 | } |
694 | 698 | ||
695 | ret = -EFAULT; | 699 | ret = -EFAULT; |
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
707 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 711 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
708 | } | 712 | } |
709 | 713 | ||
710 | /* Set the HPTE to point to pfn */ | 714 | /* |
711 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 715 | * Set the HPTE to point to pfn. |
716 | * Since the pfn is at PAGE_SIZE granularity, make sure we | ||
717 | * don't mask out lower-order bits if psize < PAGE_SIZE. | ||
718 | */ | ||
719 | if (psize < PAGE_SIZE) | ||
720 | psize = PAGE_SIZE; | ||
721 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||
712 | if (hpte_is_writable(r) && !write_ok) | 722 | if (hpte_is_writable(r) && !write_ok) |
713 | r = hpte_make_readonly(r); | 723 | r = hpte_make_readonly(r); |
714 | ret = RESUME_GUEST; | 724 | ret = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | |||
131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
132 | { | 132 | { |
133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
134 | unsigned long flags; | ||
134 | 135 | ||
135 | spin_lock(&vcpu->arch.tbacct_lock); | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
136 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
137 | vc->preempt_tb != TB_NIL) { | 138 | vc->preempt_tb != TB_NIL) { |
138 | vc->stolen_tb += mftb() - vc->preempt_tb; | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | |||
143 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
144 | vcpu->arch.busy_preempt = TB_NIL; | 145 | vcpu->arch.busy_preempt = TB_NIL; |
145 | } | 146 | } |
146 | spin_unlock(&vcpu->arch.tbacct_lock); | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
147 | } | 148 | } |
148 | 149 | ||
149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
150 | { | 151 | { |
151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
153 | unsigned long flags; | ||
152 | 154 | ||
153 | spin_lock(&vcpu->arch.tbacct_lock); | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
154 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
155 | vc->preempt_tb = mftb(); | 157 | vc->preempt_tb = mftb(); |
156 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
157 | vcpu->arch.busy_preempt = mftb(); | 159 | vcpu->arch.busy_preempt = mftb(); |
158 | spin_unlock(&vcpu->arch.tbacct_lock); | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
159 | } | 161 | } |
160 | 162 | ||
161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |||
486 | */ | 488 | */ |
487 | if (vc->vcore_state != VCORE_INACTIVE && | 489 | if (vc->vcore_state != VCORE_INACTIVE && |
488 | vc->runner->arch.run_task != current) { | 490 | vc->runner->arch.run_task != current) { |
489 | spin_lock(&vc->runner->arch.tbacct_lock); | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
490 | p = vc->stolen_tb; | 492 | p = vc->stolen_tb; |
491 | if (vc->preempt_tb != TB_NIL) | 493 | if (vc->preempt_tb != TB_NIL) |
492 | p += now - vc->preempt_tb; | 494 | p += now - vc->preempt_tb; |
493 | spin_unlock(&vc->runner->arch.tbacct_lock); | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
494 | } else { | 496 | } else { |
495 | p = vc->stolen_tb; | 497 | p = vc->stolen_tb; |
496 | } | 498 | } |
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
512 | core_stolen = vcore_stolen_time(vc, now); | 514 | core_stolen = vcore_stolen_time(vc, now); |
513 | stolen = core_stolen - vcpu->arch.stolen_logged; | 515 | stolen = core_stolen - vcpu->arch.stolen_logged; |
514 | vcpu->arch.stolen_logged = core_stolen; | 516 | vcpu->arch.stolen_logged = core_stolen; |
515 | spin_lock(&vcpu->arch.tbacct_lock); | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
516 | stolen += vcpu->arch.busy_stolen; | 518 | stolen += vcpu->arch.busy_stolen; |
517 | vcpu->arch.busy_stolen = 0; | 519 | vcpu->arch.busy_stolen = 0; |
518 | spin_unlock(&vcpu->arch.tbacct_lock); | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
519 | if (!dt || !vpa) | 521 | if (!dt || !vpa) |
520 | return; | 522 | return; |
521 | memset(dt, 0, sizeof(struct dtl_entry)); | 523 | memset(dt, 0, sizeof(struct dtl_entry)); |
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
589 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
590 | return RESUME_HOST; | 592 | return RESUME_HOST; |
591 | 593 | ||
594 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
592 | rc = kvmppc_rtas_hcall(vcpu); | 595 | rc = kvmppc_rtas_hcall(vcpu); |
596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
593 | 597 | ||
594 | if (rc == -ENOENT) | 598 | if (rc == -ENOENT) |
595 | return RESUME_HOST; | 599 | return RESUME_HOST; |
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
1115 | 1119 | ||
1116 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1117 | return; | 1121 | return; |
1118 | spin_lock(&vcpu->arch.tbacct_lock); | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
1119 | now = mftb(); | 1123 | now = mftb(); |
1120 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
1121 | vcpu->arch.stolen_logged; | 1125 | vcpu->arch.stolen_logged; |
1122 | vcpu->arch.busy_preempt = now; | 1126 | vcpu->arch.busy_preempt = now; |
1123 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
1124 | spin_unlock(&vcpu->arch.tbacct_lock); | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
1125 | --vc->n_runnable; | 1129 | --vc->n_runnable; |
1126 | list_del(&vcpu->arch.run_list); | 1130 | list_del(&vcpu->arch.run_list); |
1127 | } | 1131 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
225 | is_io = pa & (HPTE_R_I | HPTE_R_W); | 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
227 | pa &= PAGE_MASK; | 227 | pa &= PAGE_MASK; |
228 | pa |= gpa & ~PAGE_MASK; | ||
228 | } else { | 229 | } else { |
229 | /* Translate to host virtual address */ | 230 | /* Translate to host virtual address */ |
230 | hva = __gfn_to_hva_memslot(memslot, gfn); | 231 | hva = __gfn_to_hva_memslot(memslot, gfn); |
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
238 | ptel = hpte_make_readonly(ptel); | 239 | ptel = hpte_make_readonly(ptel); |
239 | is_io = hpte_cache_bits(pte_val(pte)); | 240 | is_io = hpte_cache_bits(pte_val(pte)); |
240 | pa = pte_pfn(pte) << PAGE_SHIFT; | 241 | pa = pte_pfn(pte) << PAGE_SHIFT; |
242 | pa |= hva & (pte_size - 1); | ||
243 | pa |= gpa & ~PAGE_MASK; | ||
241 | } | 244 | } |
242 | } | 245 | } |
243 | 246 | ||
244 | if (pte_size < psize) | 247 | if (pte_size < psize) |
245 | return H_PARAMETER; | 248 | return H_PARAMETER; |
246 | if (pa && pte_size > psize) | ||
247 | pa |= gpa & (pte_size - 1); | ||
248 | 249 | ||
249 | ptel &= ~(HPTE_R_PP0 - psize); | 250 | ptel &= ~(HPTE_R_PP0 - psize); |
250 | ptel |= pa; | 251 | ptel |= pa; |
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | |||
749 | 20, /* 1M, unsupported */ | 750 | 20, /* 1M, unsupported */ |
750 | }; | 751 | }; |
751 | 752 | ||
753 | /* When called from virtmode, this func should be protected by | ||
754 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||
755 | * can trigger deadlock issue. | ||
756 | */ | ||
752 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | 757 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
753 | unsigned long valid) | 758 | unsigned long valid) |
754 | { | 759 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
153 | 153 | ||
154 | 13: b machine_check_fwnmi | 154 | 13: b machine_check_fwnmi |
155 | 155 | ||
156 | |||
157 | /* | 156 | /* |
158 | * We come in here when wakened from nap mode on a secondary hw thread. | 157 | * We come in here when wakened from nap mode on a secondary hw thread. |
159 | * Relocation is off and most register values are lost. | 158 | * Relocation is off and most register values are lost. |
@@ -224,6 +223,11 @@ kvm_start_guest: | |||
224 | /* Clear our vcpu pointer so we don't come back in early */ | 223 | /* Clear our vcpu pointer so we don't come back in early */ |
225 | li r0, 0 | 224 | li r0, 0 |
226 | std r0, HSTATE_KVM_VCPU(r13) | 225 | std r0, HSTATE_KVM_VCPU(r13) |
226 | /* | ||
227 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||
228 | * the nap_count, because once the increment to nap_count is | ||
229 | * visible we could be given another vcpu. | ||
230 | */ | ||
227 | lwsync | 231 | lwsync |
228 | /* Clear any pending IPI - we're an offline thread */ | 232 | /* Clear any pending IPI - we're an offline thread */ |
229 | ld r5, HSTATE_XICS_PHYS(r13) | 233 | ld r5, HSTATE_XICS_PHYS(r13) |
@@ -241,7 +245,6 @@ kvm_start_guest: | |||
241 | /* increment the nap count and then go to nap mode */ | 245 | /* increment the nap count and then go to nap mode */ |
242 | ld r4, HSTATE_KVM_VCORE(r13) | 246 | ld r4, HSTATE_KVM_VCORE(r13) |
243 | addi r4, r4, VCORE_NAP_COUNT | 247 | addi r4, r4, VCORE_NAP_COUNT |
244 | lwsync /* make previous updates visible */ | ||
245 | 51: lwarx r3, 0, r4 | 248 | 51: lwarx r3, 0, r4 |
246 | addi r3, r3, 1 | 249 | addi r3, r3, 1 |
247 | stwcx. r3, 0, r4 | 250 | stwcx. r3, 0, r4 |
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv: | |||
751 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 754 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
752 | * guest R13 saved in SPRN_SCRATCH0 | 755 | * guest R13 saved in SPRN_SCRATCH0 |
753 | */ | 756 | */ |
754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 757 | std r9, HSTATE_SCRATCH2(r13) |
755 | std r9, HSTATE_HOST_R2(r13) | ||
756 | 758 | ||
757 | lbz r9, HSTATE_IN_GUEST(r13) | 759 | lbz r9, HSTATE_IN_GUEST(r13) |
758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | 760 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
759 | beq kvmppc_bad_host_intr | 761 | beq kvmppc_bad_host_intr |
760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 762 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
761 | cmpwi r9, KVM_GUEST_MODE_GUEST | 763 | cmpwi r9, KVM_GUEST_MODE_GUEST |
762 | ld r9, HSTATE_HOST_R2(r13) | 764 | ld r9, HSTATE_SCRATCH2(r13) |
763 | beq kvmppc_interrupt_pr | 765 | beq kvmppc_interrupt_pr |
764 | #endif | 766 | #endif |
765 | /* We're now back in the host but in guest MMU context */ | 767 | /* We're now back in the host but in guest MMU context */ |
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv: | |||
779 | std r6, VCPU_GPR(R6)(r9) | 781 | std r6, VCPU_GPR(R6)(r9) |
780 | std r7, VCPU_GPR(R7)(r9) | 782 | std r7, VCPU_GPR(R7)(r9) |
781 | std r8, VCPU_GPR(R8)(r9) | 783 | std r8, VCPU_GPR(R8)(r9) |
782 | ld r0, HSTATE_HOST_R2(r13) | 784 | ld r0, HSTATE_SCRATCH2(r13) |
783 | std r0, VCPU_GPR(R9)(r9) | 785 | std r0, VCPU_GPR(R9)(r9) |
784 | std r10, VCPU_GPR(R10)(r9) | 786 | std r10, VCPU_GPR(R10)(r9) |
785 | std r11, VCPU_GPR(R11)(r9) | 787 | std r11, VCPU_GPR(R11)(r9) |
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
990 | */ | 992 | */ |
991 | /* Increment the threads-exiting-guest count in the 0xff00 | 993 | /* Increment the threads-exiting-guest count in the 0xff00 |
992 | bits of vcore->entry_exit_count */ | 994 | bits of vcore->entry_exit_count */ |
993 | lwsync | ||
994 | ld r5,HSTATE_KVM_VCORE(r13) | 995 | ld r5,HSTATE_KVM_VCORE(r13) |
995 | addi r6,r5,VCORE_ENTRY_EXIT | 996 | addi r6,r5,VCORE_ENTRY_EXIT |
996 | 41: lwarx r3,0,r6 | 997 | 41: lwarx r3,0,r6 |
997 | addi r0,r3,0x100 | 998 | addi r0,r3,0x100 |
998 | stwcx. r0,0,r6 | 999 | stwcx. r0,0,r6 |
999 | bne 41b | 1000 | bne 41b |
1000 | lwsync | 1001 | isync /* order stwcx. vs. reading napping_threads */ |
1001 | 1002 | ||
1002 | /* | 1003 | /* |
1003 | * At this point we have an interrupt that we have to pass | 1004 | * At this point we have an interrupt that we have to pass |
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
1030 | sld r0,r0,r4 | 1031 | sld r0,r0,r4 |
1031 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ | 1032 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
1032 | beq 43f | 1033 | beq 43f |
1034 | /* Order entry/exit update vs. IPIs */ | ||
1035 | sync | ||
1033 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | 1036 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
1034 | subf r6,r4,r13 | 1037 | subf r6,r4,r13 |
1035 | 42: andi. r0,r3,1 | 1038 | 42: andi. r0,r3,1 |
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1638 | bge kvm_cede_exit | 1641 | bge kvm_cede_exit |
1639 | stwcx. r4,0,r6 | 1642 | stwcx. r4,0,r6 |
1640 | bne 31b | 1643 | bne 31b |
1644 | /* order napping_threads update vs testing entry_exit_count */ | ||
1645 | isync | ||
1641 | li r0,1 | 1646 | li r0,1 |
1642 | stb r0,HSTATE_NAPPING(r13) | 1647 | stb r0,HSTATE_NAPPING(r13) |
1643 | /* order napping_threads update vs testing entry_exit_count */ | ||
1644 | lwsync | ||
1645 | mr r4,r3 | 1648 | mr r4,r3 |
1646 | lwz r7,VCORE_ENTRY_EXIT(r5) | 1649 | lwz r7,VCORE_ENTRY_EXIT(r5) |
1647 | cmpwi r7,0x100 | 1650 | cmpwi r7,0x100 |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -129,29 +129,32 @@ kvm_start_lightweight: | |||
129 | * R12 = exit handler id | 129 | * R12 = exit handler id |
130 | * R13 = PACA | 130 | * R13 = PACA |
131 | * SVCPU.* = guest * | 131 | * SVCPU.* = guest * |
132 | * MSR.EE = 1 | ||
132 | * | 133 | * |
133 | */ | 134 | */ |
134 | 135 | ||
136 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
137 | |||
138 | /* | ||
139 | * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||
140 | * the exit handler id to the vcpu and restore it from there later. | ||
141 | */ | ||
142 | stw r12, VCPU_TRAP(r3) | ||
143 | |||
135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ | 144 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
136 | /* On 64-bit, interrupts are still off at this point */ | 145 | /* On 64-bit, interrupts are still off at this point */ |
137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | 146 | |
138 | GET_SHADOW_VCPU(r4) | 147 | GET_SHADOW_VCPU(r4) |
139 | bl FUNC(kvmppc_copy_from_svcpu) | 148 | bl FUNC(kvmppc_copy_from_svcpu) |
140 | nop | 149 | nop |
141 | 150 | ||
142 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
143 | /* Re-enable interrupts */ | ||
144 | ld r3, HSTATE_HOST_MSR(r13) | ||
145 | ori r3, r3, MSR_EE | ||
146 | MTMSR_EERI(r3) | ||
147 | |||
148 | /* | 152 | /* |
149 | * Reload kernel SPRG3 value. | 153 | * Reload kernel SPRG3 value. |
150 | * No need to save guest value as usermode can't modify SPRG3. | 154 | * No need to save guest value as usermode can't modify SPRG3. |
151 | */ | 155 | */ |
152 | ld r3, PACA_SPRG3(r13) | 156 | ld r3, PACA_SPRG3(r13) |
153 | mtspr SPRN_SPRG3, r3 | 157 | mtspr SPRN_SPRG3, r3 |
154 | |||
155 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 158 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
156 | 159 | ||
157 | /* R7 = vcpu */ | 160 | /* R7 = vcpu */ |
@@ -177,7 +180,7 @@ kvm_start_lightweight: | |||
177 | PPC_STL r31, VCPU_GPR(R31)(r7) | 180 | PPC_STL r31, VCPU_GPR(R31)(r7) |
178 | 181 | ||
179 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 182 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
180 | mr r5, r12 | 183 | lwz r5, VCPU_TRAP(r7) |
181 | 184 | ||
182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 185 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
183 | REST_2GPRS(3, r1) | 186 | REST_2GPRS(3, r1) |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
69 | svcpu->in_use = 0; | ||
69 | svcpu_put(svcpu); | 70 | svcpu_put(svcpu); |
70 | #endif | 71 | #endif |
71 | vcpu->cpu = smp_processor_id(); | 72 | vcpu->cpu = smp_processor_id(); |
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
78 | { | 79 | { |
79 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 81 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
82 | if (svcpu->in_use) { | ||
83 | kvmppc_copy_from_svcpu(vcpu, svcpu); | ||
84 | } | ||
81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 85 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 86 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | 87 | svcpu_put(svcpu); |
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
110 | svcpu->ctr = vcpu->arch.ctr; | 114 | svcpu->ctr = vcpu->arch.ctr; |
111 | svcpu->lr = vcpu->arch.lr; | 115 | svcpu->lr = vcpu->arch.lr; |
112 | svcpu->pc = vcpu->arch.pc; | 116 | svcpu->pc = vcpu->arch.pc; |
117 | svcpu->in_use = true; | ||
113 | } | 118 | } |
114 | 119 | ||
115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 120 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | 121 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, |
117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | 122 | struct kvmppc_book3s_shadow_vcpu *svcpu) |
118 | { | 123 | { |
124 | /* | ||
125 | * vcpu_put would just call us again because in_use hasn't | ||
126 | * been updated yet. | ||
127 | */ | ||
128 | preempt_disable(); | ||
129 | |||
130 | /* | ||
131 | * Maybe we were already preempted and synced the svcpu from | ||
132 | * our preempt notifiers. Don't bother touching this svcpu then. | ||
133 | */ | ||
134 | if (!svcpu->in_use) | ||
135 | goto out; | ||
136 | |||
119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 137 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 138 | vcpu->arch.gpr[1] = svcpu->gpr[1]; |
121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 139 | vcpu->arch.gpr[2] = svcpu->gpr[2]; |
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
139 | vcpu->arch.fault_dar = svcpu->fault_dar; | 157 | vcpu->arch.fault_dar = svcpu->fault_dar; |
140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 158 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
141 | vcpu->arch.last_inst = svcpu->last_inst; | 159 | vcpu->arch.last_inst = svcpu->last_inst; |
160 | svcpu->in_use = false; | ||
161 | |||
162 | out: | ||
163 | preempt_enable(); | ||
142 | } | 164 | } |
143 | 165 | ||
144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 166 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
153 | 153 | ||
154 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
157 | /* | 156 | /* |
158 | * Set EE in HOST_MSR so that it's enabled when we get into our | 157 | * Set EE in HOST_MSR so that it's enabled when we get into our |
159 | * C exit handler function. On 64-bit we delay enabling | 158 | * C exit handler function. |
160 | * interrupts until we have finished transferring stuff | ||
161 | * to or from the PACA. | ||
162 | */ | 159 | */ |
163 | ori r5, r5, MSR_EE | 160 | ori r5, r5, MSR_EE |
164 | #endif | ||
165 | mtsrr0 r7 | 161 | mtsrr0 r7 |
166 | mtsrr1 r6 | 162 | mtsrr1 r6 |
167 | RFI | 163 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
682 | { | 682 | { |
683 | int ret, s; | 683 | int ret, s; |
684 | struct thread_struct thread; | 684 | struct debug_reg debug; |
685 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
686 | struct thread_fp_state fp; | 686 | struct thread_fp_state fp; |
687 | int fpexc_mode; | 687 | int fpexc_mode; |
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
723 | #endif | 723 | #endif |
724 | 724 | ||
725 | /* Switch to guest debug context */ | 725 | /* Switch to guest debug context */ |
726 | thread.debug = vcpu->arch.shadow_dbg_reg; | 726 | debug = vcpu->arch.shadow_dbg_reg; |
727 | switch_booke_debug_regs(&thread); | 727 | switch_booke_debug_regs(&debug); |
728 | thread.debug = current->thread.debug; | 728 | debug = current->thread.debug; |
729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; |
730 | 730 | ||
731 | kvmppc_fix_ee_before_entry(); | 731 | kvmppc_fix_ee_before_entry(); |
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
736 | We also get here with interrupts enabled. */ | 736 | We also get here with interrupts enabled. */ |
737 | 737 | ||
738 | /* Switch back to user space debug context */ | 738 | /* Switch back to user space debug context */ |
739 | switch_booke_debug_regs(&thread); | 739 | switch_booke_debug_regs(&debug); |
740 | current->thread.debug = thread.debug; | 740 | current->thread.debug = debug; |
741 | 741 | ||
742 | #ifdef CONFIG_PPC_FPU | 742 | #ifdef CONFIG_PPC_FPU |
743 | kvmppc_save_guest_fp(vcpu); | 743 | kvmppc_save_guest_fp(vcpu); |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..596a285c0755 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -9,6 +9,14 @@ | |||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ppc_asm.h> | 10 | #include <asm/ppc_asm.h> |
11 | 11 | ||
12 | #ifdef __BIG_ENDIAN__ | ||
13 | #define sLd sld /* Shift towards low-numbered address. */ | ||
14 | #define sHd srd /* Shift towards high-numbered address. */ | ||
15 | #else | ||
16 | #define sLd srd /* Shift towards low-numbered address. */ | ||
17 | #define sHd sld /* Shift towards high-numbered address. */ | ||
18 | #endif | ||
19 | |||
12 | .align 7 | 20 | .align 7 |
13 | _GLOBAL(__copy_tofrom_user) | 21 | _GLOBAL(__copy_tofrom_user) |
14 | BEGIN_FTR_SECTION | 22 | BEGIN_FTR_SECTION |
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
118 | 126 | ||
119 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | 127 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ |
120 | 25: ld r0,8(r4) | 128 | 25: ld r0,8(r4) |
121 | sld r6,r9,r10 | 129 | sLd r6,r9,r10 |
122 | 26: ldu r9,16(r4) | 130 | 26: ldu r9,16(r4) |
123 | srd r7,r0,r11 | 131 | sHd r7,r0,r11 |
124 | sld r8,r0,r10 | 132 | sLd r8,r0,r10 |
125 | or r7,r7,r6 | 133 | or r7,r7,r6 |
126 | blt cr6,79f | 134 | blt cr6,79f |
127 | 27: ld r0,8(r4) | 135 | 27: ld r0,8(r4) |
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
129 | 137 | ||
130 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | 138 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ |
131 | 29: ldu r9,8(r4) | 139 | 29: ldu r9,8(r4) |
132 | sld r8,r0,r10 | 140 | sLd r8,r0,r10 |
133 | addi r3,r3,-8 | 141 | addi r3,r3,-8 |
134 | blt cr6,5f | 142 | blt cr6,5f |
135 | 30: ld r0,8(r4) | 143 | 30: ld r0,8(r4) |
136 | srd r12,r9,r11 | 144 | sHd r12,r9,r11 |
137 | sld r6,r9,r10 | 145 | sLd r6,r9,r10 |
138 | 31: ldu r9,16(r4) | 146 | 31: ldu r9,16(r4) |
139 | or r12,r8,r12 | 147 | or r12,r8,r12 |
140 | srd r7,r0,r11 | 148 | sHd r7,r0,r11 |
141 | sld r8,r0,r10 | 149 | sLd r8,r0,r10 |
142 | addi r3,r3,16 | 150 | addi r3,r3,16 |
143 | beq cr6,78f | 151 | beq cr6,78f |
144 | 152 | ||
145 | 1: or r7,r7,r6 | 153 | 1: or r7,r7,r6 |
146 | 32: ld r0,8(r4) | 154 | 32: ld r0,8(r4) |
147 | 76: std r12,8(r3) | 155 | 76: std r12,8(r3) |
148 | 2: srd r12,r9,r11 | 156 | 2: sHd r12,r9,r11 |
149 | sld r6,r9,r10 | 157 | sLd r6,r9,r10 |
150 | 33: ldu r9,16(r4) | 158 | 33: ldu r9,16(r4) |
151 | or r12,r8,r12 | 159 | or r12,r8,r12 |
152 | 77: stdu r7,16(r3) | 160 | 77: stdu r7,16(r3) |
153 | srd r7,r0,r11 | 161 | sHd r7,r0,r11 |
154 | sld r8,r0,r10 | 162 | sLd r8,r0,r10 |
155 | bdnz 1b | 163 | bdnz 1b |
156 | 164 | ||
157 | 78: std r12,8(r3) | 165 | 78: std r12,8(r3) |
158 | or r7,r7,r6 | 166 | or r7,r7,r6 |
159 | 79: std r7,16(r3) | 167 | 79: std r7,16(r3) |
160 | 5: srd r12,r9,r11 | 168 | 5: sHd r12,r9,r11 |
161 | or r12,r8,r12 | 169 | or r12,r8,r12 |
162 | 80: std r12,24(r3) | 170 | 80: std r12,24(r3) |
163 | bne 6f | 171 | bne 6f |
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
165 | blr | 173 | blr |
166 | 6: cmpwi cr1,r5,8 | 174 | 6: cmpwi cr1,r5,8 |
167 | addi r3,r3,32 | 175 | addi r3,r3,32 |
168 | sld r9,r9,r10 | 176 | sLd r9,r9,r10 |
169 | ble cr1,7f | 177 | ble cr1,7f |
170 | 34: ld r0,8(r4) | 178 | 34: ld r0,8(r4) |
171 | srd r7,r0,r11 | 179 | sHd r7,r0,r11 |
172 | or r9,r7,r9 | 180 | or r9,r7,r9 |
173 | 7: | 181 | 7: |
174 | bf cr7*4+1,1f | 182 | bf cr7*4+1,1f |
183 | #ifdef __BIG_ENDIAN__ | ||
175 | rotldi r9,r9,32 | 184 | rotldi r9,r9,32 |
185 | #endif | ||
176 | 94: stw r9,0(r3) | 186 | 94: stw r9,0(r3) |
187 | #ifdef __LITTLE_ENDIAN__ | ||
188 | rotrdi r9,r9,32 | ||
189 | #endif | ||
177 | addi r3,r3,4 | 190 | addi r3,r3,4 |
178 | 1: bf cr7*4+2,2f | 191 | 1: bf cr7*4+2,2f |
192 | #ifdef __BIG_ENDIAN__ | ||
179 | rotldi r9,r9,16 | 193 | rotldi r9,r9,16 |
194 | #endif | ||
180 | 95: sth r9,0(r3) | 195 | 95: sth r9,0(r3) |
196 | #ifdef __LITTLE_ENDIAN__ | ||
197 | rotrdi r9,r9,16 | ||
198 | #endif | ||
181 | addi r3,r3,2 | 199 | addi r3,r3,2 |
182 | 2: bf cr7*4+3,3f | 200 | 2: bf cr7*4+3,3f |
201 | #ifdef __BIG_ENDIAN__ | ||
183 | rotldi r9,r9,8 | 202 | rotldi r9,r9,8 |
203 | #endif | ||
184 | 96: stb r9,0(r3) | 204 | 96: stb r9,0(r3) |
205 | #ifdef __LITTLE_ENDIAN__ | ||
206 | rotrdi r9,r9,8 | ||
207 | #endif | ||
185 | 3: li r3,0 | 208 | 3: li r3,0 |
186 | blr | 209 | blr |
187 | 210 | ||
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245cee7818..d7ddcee7feb8 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include "powernv.h" | 36 | #include "powernv.h" |
37 | #include "pci.h" | 37 | #include "pci.h" |
38 | 38 | ||
39 | static char *hub_diag = NULL; | ||
40 | static int ioda_eeh_nb_init = 0; | 39 | static int ioda_eeh_nb_init = 0; |
41 | 40 | ||
42 | static int ioda_eeh_event(struct notifier_block *nb, | 41 | static int ioda_eeh_event(struct notifier_block *nb, |
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) | |||
140 | ioda_eeh_nb_init = 1; | 139 | ioda_eeh_nb_init = 1; |
141 | } | 140 | } |
142 | 141 | ||
143 | /* We needn't HUB diag-data on PHB3 */ | ||
144 | if (phb->type == PNV_PHB_IODA1 && !hub_diag) { | ||
145 | hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
146 | if (!hub_diag) { | ||
147 | pr_err("%s: Out of memory !\n", __func__); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_FS | 142 | #ifdef CONFIG_DEBUG_FS |
153 | if (phb->dbgfs) { | 143 | if (phb->dbgfs) { |
154 | debugfs_create_file("err_injct_outbound", 0600, | 144 | debugfs_create_file("err_injct_outbound", 0600, |
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | |||
633 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | 623 | static void ioda_eeh_hub_diag(struct pci_controller *hose) |
634 | { | 624 | { |
635 | struct pnv_phb *phb = hose->private_data; | 625 | struct pnv_phb *phb = hose->private_data; |
636 | struct OpalIoP7IOCErrorData *data; | 626 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; |
637 | long rc; | 627 | long rc; |
638 | 628 | ||
639 | data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; | 629 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); |
640 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); | ||
641 | if (rc != OPAL_SUCCESS) { | 630 | if (rc != OPAL_SUCCESS) { |
642 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", | 631 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", |
643 | __func__, phb->hub_id, rc); | 632 | __func__, phb->hub_id, rc); |
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) | |||
820 | struct OpalIoPhbErrorCommon *common; | 809 | struct OpalIoPhbErrorCommon *common; |
821 | long rc; | 810 | long rc; |
822 | 811 | ||
823 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | 812 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, |
824 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); | 813 | PNV_PCI_DIAG_BUF_SIZE); |
825 | if (rc != OPAL_SUCCESS) { | 814 | if (rc != OPAL_SUCCESS) { |
826 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | 815 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", |
827 | __func__, hose->global_number, rc); | 816 | __func__, hose->global_number, rc); |
828 | return; | 817 | return; |
829 | } | 818 | } |
830 | 819 | ||
820 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | ||
831 | switch (common->ioType) { | 821 | switch (common->ioType) { |
832 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: | 822 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: |
833 | ioda_eeh_p7ioc_phb_diag(hose, common); | 823 | ioda_eeh_p7ioc_phb_diag(hose, common); |
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4f9892..79d83cad3d67 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c | |||
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; | |||
24 | static u8 opal_lpc_inb(unsigned long port) | 24 | static u8 opal_lpc_inb(unsigned long port) |
25 | { | 25 | { |
26 | int64_t rc; | 26 | int64_t rc; |
27 | uint32_t data; | 27 | __be32 data; |
28 | 28 | ||
29 | if (opal_lpc_chip_id < 0 || port > 0xffff) | 29 | if (opal_lpc_chip_id < 0 || port > 0xffff) |
30 | return 0xff; | 30 | return 0xff; |
31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); | 31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); |
32 | return rc ? 0xff : data; | 32 | return rc ? 0xff : be32_to_cpu(data); |
33 | } | 33 | } |
34 | 34 | ||
35 | static __le16 __opal_lpc_inw(unsigned long port) | 35 | static __le16 __opal_lpc_inw(unsigned long port) |
36 | { | 36 | { |
37 | int64_t rc; | 37 | int64_t rc; |
38 | uint32_t data; | 38 | __be32 data; |
39 | 39 | ||
40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) | 40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) |
41 | return 0xffff; | 41 | return 0xffff; |
42 | if (port & 1) | 42 | if (port & 1) |
43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); | 43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); |
44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); | 44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); |
45 | return rc ? 0xffff : data; | 45 | return rc ? 0xffff : be32_to_cpu(data); |
46 | } | 46 | } |
47 | static u16 opal_lpc_inw(unsigned long port) | 47 | static u16 opal_lpc_inw(unsigned long port) |
48 | { | 48 | { |
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) | |||
52 | static __le32 __opal_lpc_inl(unsigned long port) | 52 | static __le32 __opal_lpc_inl(unsigned long port) |
53 | { | 53 | { |
54 | int64_t rc; | 54 | int64_t rc; |
55 | uint32_t data; | 55 | __be32 data; |
56 | 56 | ||
57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) | 57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) |
58 | return 0xffffffff; | 58 | return 0xffffffff; |
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) | |||
62 | (__le32)opal_lpc_inb(port + 2) << 8 | | 62 | (__le32)opal_lpc_inb(port + 2) << 8 | |
63 | opal_lpc_inb(port + 3); | 63 | opal_lpc_inb(port + 3); |
64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); | 64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); |
65 | return rc ? 0xffffffff : data; | 65 | return rc ? 0xffffffff : be32_to_cpu(data); |
66 | } | 66 | } |
67 | 67 | ||
68 | static u32 opal_lpc_inl(unsigned long port) | 68 | static u32 opal_lpc_inl(unsigned long port) |
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8fd55ac..4fbf276ac99e 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | |||
96 | { | 96 | { |
97 | struct opal_scom_map *m = map; | 97 | struct opal_scom_map *m = map; |
98 | int64_t rc; | 98 | int64_t rc; |
99 | __be64 v; | ||
99 | 100 | ||
100 | reg = opal_scom_unmangle(reg); | 101 | reg = opal_scom_unmangle(reg); |
101 | rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); | 102 | rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); |
103 | *value = be64_to_cpu(v); | ||
102 | return opal_xscom_err_xlate(rc); | 104 | return opal_xscom_err_xlate(rc); |
103 | } | 105 | } |
104 | 106 | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 084cdfa40682..2c6d173842b2 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | |||
720 | tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; | 720 | tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; |
721 | } | 721 | } |
722 | iommu_init_table(tbl, phb->hose->node); | 722 | iommu_init_table(tbl, phb->hose->node); |
723 | iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); | ||
723 | 724 | ||
724 | if (pe->pdev) | 725 | if (pe->pdev) |
725 | set_iommu_table_base(&pe->pdev->dev, tbl); | 726 | set_iommu_table_base(&pe->pdev->dev, tbl); |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24ef033e..1ed8d5f40f5a 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -172,11 +172,13 @@ struct pnv_phb { | |||
172 | } ioda; | 172 | } ioda; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | /* PHB status structure */ | 175 | /* PHB and hub status structure */ |
176 | union { | 176 | union { |
177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; | 177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; |
178 | struct OpalIoP7IOCPhbErrorData p7ioc; | 178 | struct OpalIoP7IOCPhbErrorData p7ioc; |
179 | struct OpalIoP7IOCErrorData hub_diag; | ||
179 | } diag; | 180 | } diag; |
181 | |||
180 | }; | 182 | }; |
181 | 183 | ||
182 | extern struct pci_ops pnv_pci_ops; | 184 | extern struct pci_ops pnv_pci_ops; |
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007eae64..c9fecf09b8fa 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c | |||
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
157 | { | 157 | { |
158 | struct hvcall_ppp_data ppp_data; | 158 | struct hvcall_ppp_data ppp_data; |
159 | struct device_node *root; | 159 | struct device_node *root; |
160 | const int *perf_level; | 160 | const __be32 *perf_level; |
161 | int rc; | 161 | int rc; |
162 | 162 | ||
163 | rc = h_get_ppp(&ppp_data); | 163 | rc = h_get_ppp(&ppp_data); |
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
201 | perf_level = of_get_property(root, | 201 | perf_level = of_get_property(root, |
202 | "ibm,partition-performance-parameters-level", | 202 | "ibm,partition-performance-parameters-level", |
203 | NULL); | 203 | NULL); |
204 | if (perf_level && (*perf_level >= 1)) { | 204 | if (perf_level && (be32_to_cpup(perf_level) >= 1)) { |
205 | seq_printf(m, | 205 | seq_printf(m, |
206 | "physical_procs_allocated_to_virtualization=%d\n", | 206 | "physical_procs_allocated_to_virtualization=%d\n", |
207 | ppp_data.phys_platform_procs); | 207 | ppp_data.phys_platform_procs); |
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
435 | int partition_potential_processors; | 435 | int partition_potential_processors; |
436 | int partition_active_processors; | 436 | int partition_active_processors; |
437 | struct device_node *rtas_node; | 437 | struct device_node *rtas_node; |
438 | const int *lrdrp = NULL; | 438 | const __be32 *lrdrp = NULL; |
439 | 439 | ||
440 | rtas_node = of_find_node_by_path("/rtas"); | 440 | rtas_node = of_find_node_by_path("/rtas"); |
441 | if (rtas_node) | 441 | if (rtas_node) |
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
444 | if (lrdrp == NULL) { | 444 | if (lrdrp == NULL) { |
445 | partition_potential_processors = vdso_data->processorCount; | 445 | partition_potential_processors = vdso_data->processorCount; |
446 | } else { | 446 | } else { |
447 | partition_potential_processors = *(lrdrp + 4); | 447 | partition_potential_processors = be32_to_cpup(lrdrp + 4); |
448 | } | 448 | } |
449 | of_node_put(rtas_node); | 449 | of_node_put(rtas_node); |
450 | 450 | ||
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
654 | const char *model = ""; | 654 | const char *model = ""; |
655 | const char *system_id = ""; | 655 | const char *system_id = ""; |
656 | const char *tmp; | 656 | const char *tmp; |
657 | const unsigned int *lp_index_ptr; | 657 | const __be32 *lp_index_ptr; |
658 | unsigned int lp_index = 0; | 658 | unsigned int lp_index = 0; |
659 | 659 | ||
660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); | 660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); |
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", | 670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", |
671 | NULL); | 671 | NULL); |
672 | if (lp_index_ptr) | 672 | if (lp_index_ptr) |
673 | lp_index = *lp_index_ptr; | 673 | lp_index = be32_to_cpup(lp_index_ptr); |
674 | of_node_put(rootdn); | 674 | of_node_put(rootdn); |
675 | } | 675 | } |
676 | seq_printf(m, "serial_number=%s\n", system_id); | 676 | seq_printf(m, "serial_number=%s\n", system_id); |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0abce6fa..0c882e83c4ce 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
130 | { | 130 | { |
131 | struct device_node *dn; | 131 | struct device_node *dn; |
132 | struct pci_dn *pdn; | 132 | struct pci_dn *pdn; |
133 | const u32 *req_msi; | 133 | const __be32 *p; |
134 | u32 req_msi; | ||
134 | 135 | ||
135 | pdn = pci_get_pdn(pdev); | 136 | pdn = pci_get_pdn(pdev); |
136 | if (!pdn) | 137 | if (!pdn) |
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
138 | 139 | ||
139 | dn = pdn->node; | 140 | dn = pdn->node; |
140 | 141 | ||
141 | req_msi = of_get_property(dn, prop_name, NULL); | 142 | p = of_get_property(dn, prop_name, NULL); |
142 | if (!req_msi) { | 143 | if (!p) { |
143 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); | 144 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); |
144 | return -ENOENT; | 145 | return -ENOENT; |
145 | } | 146 | } |
146 | 147 | ||
147 | if (*req_msi < nvec) { | 148 | req_msi = be32_to_cpup(p); |
149 | if (req_msi < nvec) { | ||
148 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); | 150 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); |
149 | 151 | ||
150 | if (*req_msi == 0) /* Be paranoid */ | 152 | if (req_msi == 0) /* Be paranoid */ |
151 | return -ENOSPC; | 153 | return -ENOSPC; |
152 | 154 | ||
153 | return *req_msi; | 155 | return req_msi; |
154 | } | 156 | } |
155 | 157 | ||
156 | return 0; | 158 | return 0; |
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) | |||
171 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | 173 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) |
172 | { | 174 | { |
173 | struct device_node *dn; | 175 | struct device_node *dn; |
174 | const u32 *p; | 176 | const __be32 *p; |
175 | 177 | ||
176 | dn = of_node_get(pci_device_to_OF_node(dev)); | 178 | dn = of_node_get(pci_device_to_OF_node(dev)); |
177 | while (dn) { | 179 | while (dn) { |
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | |||
179 | if (p) { | 181 | if (p) { |
180 | pr_debug("rtas_msi: found prop on dn %s\n", | 182 | pr_debug("rtas_msi: found prop on dn %s\n", |
181 | dn->full_name); | 183 | dn->full_name); |
182 | *total = *p; | 184 | *total = be32_to_cpup(p); |
183 | return dn; | 185 | return dn; |
184 | } | 186 | } |
185 | 187 | ||
@@ -232,13 +234,13 @@ struct msi_counts { | |||
232 | static void *count_non_bridge_devices(struct device_node *dn, void *data) | 234 | static void *count_non_bridge_devices(struct device_node *dn, void *data) |
233 | { | 235 | { |
234 | struct msi_counts *counts = data; | 236 | struct msi_counts *counts = data; |
235 | const u32 *p; | 237 | const __be32 *p; |
236 | u32 class; | 238 | u32 class; |
237 | 239 | ||
238 | pr_debug("rtas_msi: counting %s\n", dn->full_name); | 240 | pr_debug("rtas_msi: counting %s\n", dn->full_name); |
239 | 241 | ||
240 | p = of_get_property(dn, "class-code", NULL); | 242 | p = of_get_property(dn, "class-code", NULL); |
241 | class = p ? *p : 0; | 243 | class = p ? be32_to_cpup(p) : 0; |
242 | 244 | ||
243 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) | 245 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) |
244 | counts->num_devices++; | 246 | counts->num_devices++; |
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) | |||
249 | static void *count_spare_msis(struct device_node *dn, void *data) | 251 | static void *count_spare_msis(struct device_node *dn, void *data) |
250 | { | 252 | { |
251 | struct msi_counts *counts = data; | 253 | struct msi_counts *counts = data; |
252 | const u32 *p; | 254 | const __be32 *p; |
253 | int req; | 255 | int req; |
254 | 256 | ||
255 | if (dn == counts->requestor) | 257 | if (dn == counts->requestor) |
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) | |||
260 | req = 0; | 262 | req = 0; |
261 | p = of_get_property(dn, "ibm,req#msi", NULL); | 263 | p = of_get_property(dn, "ibm,req#msi", NULL); |
262 | if (p) | 264 | if (p) |
263 | req = *p; | 265 | req = be32_to_cpup(p); |
264 | 266 | ||
265 | p = of_get_property(dn, "ibm,req#msi-x", NULL); | 267 | p = of_get_property(dn, "ibm,req#msi-x", NULL); |
266 | if (p) | 268 | if (p) |
267 | req = max(req, (int)*p); | 269 | req = max(req, (int)be32_to_cpup(p)); |
268 | } | 270 | } |
269 | 271 | ||
270 | if (req < counts->quota) | 272 | if (req < counts->quota) |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58d4664..d7096f2f7751 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ | |||
43 | static DEFINE_SPINLOCK(nvram_lock); | 43 | static DEFINE_SPINLOCK(nvram_lock); |
44 | 44 | ||
45 | struct err_log_info { | 45 | struct err_log_info { |
46 | int error_type; | 46 | __be32 error_type; |
47 | unsigned int seq_num; | 47 | __be32 seq_num; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct nvram_os_partition { | 50 | struct nvram_os_partition { |
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { | |||
79 | }; | 79 | }; |
80 | 80 | ||
81 | struct oops_log_info { | 81 | struct oops_log_info { |
82 | u16 version; | 82 | __be16 version; |
83 | u16 report_length; | 83 | __be16 report_length; |
84 | u64 timestamp; | 84 | __be64 timestamp; |
85 | } __attribute__((packed)); | 85 | } __attribute__((packed)); |
86 | 86 | ||
87 | static void oops_to_nvram(struct kmsg_dumper *dumper, | 87 | static void oops_to_nvram(struct kmsg_dumper *dumper, |
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | |||
291 | length = part->size; | 291 | length = part->size; |
292 | } | 292 | } |
293 | 293 | ||
294 | info.error_type = err_type; | 294 | info.error_type = cpu_to_be32(err_type); |
295 | info.seq_num = error_log_cnt; | 295 | info.seq_num = cpu_to_be32(error_log_cnt); |
296 | 296 | ||
297 | tmp_index = part->index; | 297 | tmp_index = part->index; |
298 | 298 | ||
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, | |||
364 | } | 364 | } |
365 | 365 | ||
366 | if (part->os_partition) { | 366 | if (part->os_partition) { |
367 | *error_log_cnt = info.seq_num; | 367 | *error_log_cnt = be32_to_cpu(info.seq_num); |
368 | *err_type = info.error_type; | 368 | *err_type = be32_to_cpu(info.error_type); |
369 | } | 369 | } |
370 | 370 | ||
371 | return 0; | 371 | return 0; |
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) | |||
529 | pr_err("nvram: logging uncompressed oops/panic report\n"); | 529 | pr_err("nvram: logging uncompressed oops/panic report\n"); |
530 | return -1; | 530 | return -1; |
531 | } | 531 | } |
532 | oops_hdr->version = OOPS_HDR_VERSION; | 532 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
533 | oops_hdr->report_length = (u16) zipped_len; | 533 | oops_hdr->report_length = cpu_to_be16(zipped_len); |
534 | oops_hdr->timestamp = get_seconds(); | 534 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | 537 | ||
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, | |||
574 | clobbering_unread_rtas_event()) | 574 | clobbering_unread_rtas_event()) |
575 | return -1; | 575 | return -1; |
576 | 576 | ||
577 | oops_hdr->version = OOPS_HDR_VERSION; | 577 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
578 | oops_hdr->report_length = (u16) size; | 578 | oops_hdr->report_length = cpu_to_be16(size); |
579 | oops_hdr->timestamp = get_seconds(); | 579 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
580 | 580 | ||
581 | if (compressed) | 581 | if (compressed) |
582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; | 582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; |
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | |||
670 | size_t length, hdr_size; | 670 | size_t length, hdr_size; |
671 | 671 | ||
672 | oops_hdr = (struct oops_log_info *)buff; | 672 | oops_hdr = (struct oops_log_info *)buff; |
673 | if (oops_hdr->version < OOPS_HDR_VERSION) { | 673 | if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { |
674 | /* Old format oops header had 2-byte record size */ | 674 | /* Old format oops header had 2-byte record size */ |
675 | hdr_size = sizeof(u16); | 675 | hdr_size = sizeof(u16); |
676 | length = oops_hdr->version; | 676 | length = be16_to_cpu(oops_hdr->version); |
677 | time->tv_sec = 0; | 677 | time->tv_sec = 0; |
678 | time->tv_nsec = 0; | 678 | time->tv_nsec = 0; |
679 | } else { | 679 | } else { |
680 | hdr_size = sizeof(*oops_hdr); | 680 | hdr_size = sizeof(*oops_hdr); |
681 | length = oops_hdr->report_length; | 681 | length = be16_to_cpu(oops_hdr->report_length); |
682 | time->tv_sec = oops_hdr->timestamp; | 682 | time->tv_sec = be64_to_cpu(oops_hdr->timestamp); |
683 | time->tv_nsec = 0; | 683 | time->tv_nsec = 0; |
684 | } | 684 | } |
685 | *buf = kmalloc(length, GFP_KERNEL); | 685 | *buf = kmalloc(length, GFP_KERNEL); |
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | |||
889 | kmsg_dump_get_buffer(dumper, false, | 889 | kmsg_dump_get_buffer(dumper, false, |
890 | oops_data, oops_data_sz, &text_len); | 890 | oops_data, oops_data_sz, &text_len); |
891 | err_type = ERR_TYPE_KERNEL_PANIC; | 891 | err_type = ERR_TYPE_KERNEL_PANIC; |
892 | oops_hdr->version = OOPS_HDR_VERSION; | 892 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
893 | oops_hdr->report_length = (u16) text_len; | 893 | oops_hdr->report_length = cpu_to_be16(text_len); |
894 | oops_hdr->timestamp = get_seconds(); | 894 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
895 | } | 895 | } |
896 | 896 | ||
897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | 897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, |
898 | (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, | 898 | (int) (sizeof(*oops_hdr) + text_len), err_type, |
899 | ++oops_count); | 899 | ++oops_count); |
900 | 900 | ||
901 | spin_unlock_irqrestore(&lock, flags); | 901 | spin_unlock_irqrestore(&lock, flags); |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856cdf47..70670a2d9cf2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
113 | { | 113 | { |
114 | struct device_node *dn, *pdn; | 114 | struct device_node *dn, *pdn; |
115 | struct pci_bus *bus; | 115 | struct pci_bus *bus; |
116 | const uint32_t *pcie_link_speed_stats; | 116 | const __be32 *pcie_link_speed_stats; |
117 | 117 | ||
118 | bus = bridge->bus; | 118 | bus = bridge->bus; |
119 | 119 | ||
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { |
125 | pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, | 125 | pcie_link_speed_stats = of_get_property(pdn, |
126 | "ibm,pcie-link-speed-stats", NULL); | 126 | "ibm,pcie-link-speed-stats", NULL); |
127 | if (pcie_link_speed_stats) | 127 | if (pcie_link_speed_stats) |
128 | break; | 128 | break; |
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | switch (pcie_link_speed_stats[0]) { | 138 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
139 | case 0x01: | 139 | case 0x01: |
140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; | 140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; |
141 | break; | 141 | break; |
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | 149 | ||
150 | switch (pcie_link_speed_stats[1]) { | 150 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
151 | case 0x01: | 151 | case 0x01: |
152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; |
153 | break; | 153 | break; |
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c index b7c43453236d..85d9e37f5ccb 100644 --- a/arch/powerpc/sysdev/ppc4xx_ocm.c +++ b/arch/powerpc/sysdev/ppc4xx_ocm.c | |||
@@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, | |||
339 | if (IS_ERR_VALUE(offset)) | 339 | if (IS_ERR_VALUE(offset)) |
340 | continue; | 340 | continue; |
341 | 341 | ||
342 | ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL); | 342 | ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL); |
343 | if (!ocm_blk) { | 343 | if (!ocm_blk) { |
344 | printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); | 344 | printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); |
345 | rh_free(ocm_reg->rh, offset); | 345 | rh_free(ocm_reg->rh, offset); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5877e71901b3..e9f312532526 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -135,7 +135,6 @@ config S390 | |||
135 | select HAVE_SYSCALL_TRACEPOINTS | 135 | select HAVE_SYSCALL_TRACEPOINTS |
136 | select HAVE_UID16 if 32BIT | 136 | select HAVE_UID16 if 32BIT |
137 | select HAVE_VIRT_CPU_ACCOUNTING | 137 | select HAVE_VIRT_CPU_ACCOUNTING |
138 | select INIT_ALL_POSSIBLE | ||
139 | select KTIME_SCALAR if 32BIT | 138 | select KTIME_SCALAR if 32BIT |
140 | select MODULES_USE_ELF_RELA | 139 | select MODULES_USE_ELF_RELA |
141 | select OLD_SIGACTION | 140 | select OLD_SIGACTION |
@@ -347,14 +346,14 @@ config SMP | |||
347 | Even if you don't know what to do here, say Y. | 346 | Even if you don't know what to do here, say Y. |
348 | 347 | ||
349 | config NR_CPUS | 348 | config NR_CPUS |
350 | int "Maximum number of CPUs (2-64)" | 349 | int "Maximum number of CPUs (2-256)" |
351 | range 2 64 | 350 | range 2 256 |
352 | depends on SMP | 351 | depends on SMP |
353 | default "32" if !64BIT | 352 | default "32" if !64BIT |
354 | default "64" if 64BIT | 353 | default "64" if 64BIT |
355 | help | 354 | help |
356 | This allows you to specify the maximum number of CPUs which this | 355 | This allows you to specify the maximum number of CPUs which this |
357 | kernel will support. The maximum supported value is 64 and the | 356 | kernel will support. The maximum supported value is 256 and the |
358 | minimum value which makes sense is 2. | 357 | minimum value which makes sense is 2. |
359 | 358 | ||
360 | This is purely to save memory - each supported CPU adds | 359 | This is purely to save memory - each supported CPU adds |
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 30ef748bc161..2f390956c7c1 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <asm/chpid.h> | 10 | #include <asm/chpid.h> |
11 | #include <asm/cpu.h> | ||
11 | 12 | ||
12 | #define SCLP_CHP_INFO_MASK_SIZE 32 | 13 | #define SCLP_CHP_INFO_MASK_SIZE 32 |
13 | 14 | ||
@@ -37,7 +38,7 @@ struct sclp_cpu_info { | |||
37 | unsigned int standby; | 38 | unsigned int standby; |
38 | unsigned int combined; | 39 | unsigned int combined; |
39 | int has_cpu_type; | 40 | int has_cpu_type; |
40 | struct sclp_cpu_entry cpu[255]; | 41 | struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1]; |
41 | }; | 42 | }; |
42 | 43 | ||
43 | int sclp_get_cpu_info(struct sclp_cpu_info *info); | 44 | int sclp_get_cpu_info(struct sclp_cpu_info *info); |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ac9bed8e103f..160779394096 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -31,6 +31,7 @@ extern void smp_yield(void); | |||
31 | extern void smp_stop_cpu(void); | 31 | extern void smp_stop_cpu(void); |
32 | extern void smp_cpu_set_polarization(int cpu, int val); | 32 | extern void smp_cpu_set_polarization(int cpu, int val); |
33 | extern int smp_cpu_get_polarization(int cpu); | 33 | extern int smp_cpu_get_polarization(int cpu); |
34 | extern void smp_fill_possible_mask(void); | ||
34 | 35 | ||
35 | #else /* CONFIG_SMP */ | 36 | #else /* CONFIG_SMP */ |
36 | 37 | ||
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; } | |||
50 | static inline void smp_yield_cpu(int cpu) { } | 51 | static inline void smp_yield_cpu(int cpu) { } |
51 | static inline void smp_yield(void) { } | 52 | static inline void smp_yield(void) { } |
52 | static inline void smp_stop_cpu(void) { } | 53 | static inline void smp_stop_cpu(void) { } |
54 | static inline void smp_fill_possible_mask(void) { } | ||
53 | 55 | ||
54 | #endif /* CONFIG_SMP */ | 56 | #endif /* CONFIG_SMP */ |
55 | 57 | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 496116cd65ec..e4c99a183651 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -72,6 +72,7 @@ int main(void) | |||
72 | /* constants used by the vdso */ | 72 | /* constants used by the vdso */ |
73 | DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); | 73 | DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); |
74 | DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); | 74 | DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); |
75 | DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); | ||
75 | DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); | 76 | DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); |
76 | BLANK(); | 77 | BLANK(); |
77 | /* idle data offsets */ | 78 | /* idle data offsets */ |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4444875266ee..0f3d44ecbfc6 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p) | |||
1023 | setup_vmcoreinfo(); | 1023 | setup_vmcoreinfo(); |
1024 | setup_lowcore(); | 1024 | setup_lowcore(); |
1025 | 1025 | ||
1026 | smp_fill_possible_mask(); | ||
1026 | cpu_init(); | 1027 | cpu_init(); |
1027 | s390_init_cpu_topology(); | 1028 | s390_init_cpu_topology(); |
1028 | 1029 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index dc4a53465060..958704798f4a 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
724 | static int __init setup_possible_cpus(char *s) | 724 | static unsigned int setup_possible_cpus __initdata; |
725 | { | ||
726 | int max, cpu; | ||
727 | 725 | ||
728 | if (kstrtoint(s, 0, &max) < 0) | 726 | static int __init _setup_possible_cpus(char *s) |
729 | return 0; | 727 | { |
730 | init_cpu_possible(cpumask_of(0)); | 728 | get_option(&s, &setup_possible_cpus); |
731 | for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) | ||
732 | set_cpu_possible(cpu, true); | ||
733 | return 0; | 729 | return 0; |
734 | } | 730 | } |
735 | early_param("possible_cpus", setup_possible_cpus); | 731 | early_param("possible_cpus", _setup_possible_cpus); |
736 | 732 | ||
737 | #ifdef CONFIG_HOTPLUG_CPU | 733 | #ifdef CONFIG_HOTPLUG_CPU |
738 | 734 | ||
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void) | |||
775 | 771 | ||
776 | #endif /* CONFIG_HOTPLUG_CPU */ | 772 | #endif /* CONFIG_HOTPLUG_CPU */ |
777 | 773 | ||
774 | void __init smp_fill_possible_mask(void) | ||
775 | { | ||
776 | unsigned int possible, cpu; | ||
777 | |||
778 | possible = setup_possible_cpus; | ||
779 | if (!possible) | ||
780 | possible = MACHINE_IS_VM ? 64 : nr_cpu_ids; | ||
781 | for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) | ||
782 | set_cpu_possible(cpu, true); | ||
783 | } | ||
784 | |||
778 | void __init smp_prepare_cpus(unsigned int max_cpus) | 785 | void __init smp_prepare_cpus(unsigned int max_cpus) |
779 | { | 786 | { |
780 | /* request the 0x1201 emergency signal external interrupt */ | 787 | /* request the 0x1201 emergency signal external interrupt */ |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index a84476f2a9bb..613649096783 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) | |||
125 | psal[i] = 0x80000000; | 125 | psal[i] = 0x80000000; |
126 | 126 | ||
127 | lowcore->paste[4] = (u32)(addr_t) psal; | 127 | lowcore->paste[4] = (u32)(addr_t) psal; |
128 | psal[0] = 0x20000000; | 128 | psal[0] = 0x02000000; |
129 | psal[2] = (u32)(addr_t) aste; | 129 | psal[2] = (u32)(addr_t) aste; |
130 | *(unsigned long *) (aste + 2) = segment_table + | 130 | *(unsigned long *) (aste + 2) = segment_table + |
131 | _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; | 131 | _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; |
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 5be8e472f57d..65fc3979c2f1 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
@@ -46,18 +46,13 @@ __kernel_clock_gettime: | |||
46 | jnm 3f | 46 | jnm 3f |
47 | a %r0,__VDSO_TK_MULT(%r5) | 47 | a %r0,__VDSO_TK_MULT(%r5) |
48 | 3: alr %r0,%r2 | 48 | 3: alr %r0,%r2 |
49 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ | 49 | al %r0,__VDSO_WTOM_NSEC(%r5) |
50 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
51 | brc 12,4f | ||
52 | ahi %r0,1 | ||
53 | 4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */ | ||
54 | al %r1,__VDSO_WTOM_NSEC+4(%r5) | 50 | al %r1,__VDSO_WTOM_NSEC+4(%r5) |
55 | brc 12,5f | 51 | brc 12,5f |
56 | ahi %r0,1 | 52 | ahi %r0,1 |
57 | 5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ | 53 | 5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ |
58 | srdl %r0,0(%r2) /* >> tk->shift */ | 54 | srdl %r0,0(%r2) /* >> tk->shift */ |
59 | l %r2,__VDSO_XTIME_SEC+4(%r5) | 55 | l %r2,__VDSO_WTOM_SEC+4(%r5) |
60 | al %r2,__VDSO_WTOM_SEC+4(%r5) | ||
61 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | 56 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ |
62 | jne 1b | 57 | jne 1b |
63 | basr %r5,0 | 58 | basr %r5,0 |
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S index 176e1f75f9aa..34deba7c7ed1 100644 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ b/arch/s390/kernel/vdso64/clock_getres.S | |||
@@ -23,7 +23,9 @@ __kernel_clock_getres: | |||
23 | je 0f | 23 | je 0f |
24 | cghi %r2,__CLOCK_MONOTONIC | 24 | cghi %r2,__CLOCK_MONOTONIC |
25 | je 0f | 25 | je 0f |
26 | cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ | 26 | cghi %r2,__CLOCK_THREAD_CPUTIME_ID |
27 | je 0f | ||
28 | cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ | ||
27 | jne 2f | 29 | jne 2f |
28 | larl %r5,_vdso_data | 30 | larl %r5,_vdso_data |
29 | icm %r0,15,__LC_ECTG_OK(%r5) | 31 | icm %r0,15,__LC_ECTG_OK(%r5) |
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 0add1072ba30..91940ed33a4a 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -22,7 +22,9 @@ __kernel_clock_gettime: | |||
22 | larl %r5,_vdso_data | 22 | larl %r5,_vdso_data |
23 | cghi %r2,__CLOCK_REALTIME | 23 | cghi %r2,__CLOCK_REALTIME |
24 | je 4f | 24 | je 4f |
25 | cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ | 25 | cghi %r2,__CLOCK_THREAD_CPUTIME_ID |
26 | je 9f | ||
27 | cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ | ||
26 | je 9f | 28 | je 9f |
27 | cghi %r2,__CLOCK_MONOTONIC | 29 | cghi %r2,__CLOCK_MONOTONIC |
28 | jne 12f | 30 | jne 12f |
@@ -35,13 +37,11 @@ __kernel_clock_gettime: | |||
35 | jnz 0b | 37 | jnz 0b |
36 | stck 48(%r15) /* Store TOD clock */ | 38 | stck 48(%r15) /* Store TOD clock */ |
37 | lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ | 39 | lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ |
38 | lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ | 40 | lg %r0,__VDSO_WTOM_SEC(%r5) |
39 | alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */ | ||
40 | lg %r1,48(%r15) | 41 | lg %r1,48(%r15) |
41 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 42 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
42 | msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ | 43 | msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ |
43 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ | 44 | alg %r1,__VDSO_WTOM_NSEC(%r5) |
44 | alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */ | ||
45 | srlg %r1,%r1,0(%r2) /* >> tk->shift */ | 45 | srlg %r1,%r1,0(%r2) /* >> tk->shift */ |
46 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | 46 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ |
47 | jne 0b | 47 | jne 0b |
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 800f064b0da7..069607209a30 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
@@ -75,6 +75,7 @@ void zpci_event_availability(void *data) | |||
75 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) | 75 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) |
76 | break; | 76 | break; |
77 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | 77 | zdev->state = ZPCI_FN_STATE_CONFIGURED; |
78 | zdev->fh = ccdf->fh; | ||
78 | ret = zpci_enable_device(zdev); | 79 | ret = zpci_enable_device(zdev); |
79 | if (ret) | 80 | if (ret) |
80 | break; | 81 | break; |
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data) | |||
101 | if (pdev) | 102 | if (pdev) |
102 | pci_stop_and_remove_bus_device(pdev); | 103 | pci_stop_and_remove_bus_device(pdev); |
103 | 104 | ||
105 | zdev->fh = ccdf->fh; | ||
104 | zpci_disable_device(zdev); | 106 | zpci_disable_device(zdev); |
105 | zdev->state = ZPCI_FN_STATE_STANDBY; | 107 | zdev->state = ZPCI_FN_STATE_STANDBY; |
106 | break; | 108 | break; |
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 2a0a596ebf67..d77f2f6c7ff0 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic); | |||
20 | EXPORT_SYMBOL(copy_page); | 20 | EXPORT_SYMBOL(copy_page); |
21 | EXPORT_SYMBOL(__clear_user); | 21 | EXPORT_SYMBOL(__clear_user); |
22 | EXPORT_SYMBOL(empty_zero_page); | 22 | EXPORT_SYMBOL(empty_zero_page); |
23 | #ifdef CONFIG_FLATMEM | ||
24 | /* need in pfn_valid macro */ | ||
25 | EXPORT_SYMBOL(min_low_pfn); | ||
26 | EXPORT_SYMBOL(max_low_pfn); | ||
27 | #endif | ||
23 | 28 | ||
24 | #define DECLARE_EXPORT(name) \ | 29 | #define DECLARE_EXPORT(name) \ |
25 | extern void name(void);EXPORT_SYMBOL(name) | 30 | extern void name(void);EXPORT_SYMBOL(name) |
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29e3174..3baff31e58cf 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ | |||
6 | checksum.o strlen.o div64.o div64-generic.o | 6 | checksum.o strlen.o div64.o div64-generic.o |
7 | 7 | ||
8 | # Extracted from libgcc | 8 | # Extracted from libgcc |
9 | lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ | 9 | obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ |
10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ | 10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ |
11 | udiv_qrnnd.o | 11 | udiv_qrnnd.o |
12 | 12 | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc144959..0f9e94537eee 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | #define pte_accessible pte_accessible | 621 | #define pte_accessible pte_accessible |
622 | static inline unsigned long pte_accessible(pte_t a) | 622 | static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) |
623 | { | 623 | { |
624 | return pte_val(a) & _PAGE_VALID; | 624 | return pte_val(a) & _PAGE_VALID; |
625 | } | 625 | } |
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | 847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U |
848 | * and SUN4V pte layout, so this inline test is fine. | 848 | * and SUN4V pte layout, so this inline test is fine. |
849 | */ | 849 | */ |
850 | if (likely(mm != &init_mm) && pte_accessible(orig)) | 850 | if (likely(mm != &init_mm) && pte_accessible(mm, orig)) |
851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); | 851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); |
852 | } | 852 | } |
853 | 853 | ||
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e562d3caee57..ad7e178337f1 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); | |||
262 | extern __must_check long strlen_user(const char __user *str); | 262 | extern __must_check long strlen_user(const char __user *str); |
263 | extern __must_check long strnlen_user(const char __user *str, long n); | 263 | extern __must_check long strnlen_user(const char __user *str, long n); |
264 | 264 | ||
265 | #define __copy_to_user_inatomic ___copy_to_user | 265 | #define __copy_to_user_inatomic __copy_to_user |
266 | #define __copy_from_user_inatomic ___copy_from_user | 266 | #define __copy_from_user_inatomic __copy_from_user |
267 | 267 | ||
268 | struct pt_regs; | 268 | struct pt_regs; |
269 | extern unsigned long compute_effective_address(struct pt_regs *, | 269 | extern unsigned long compute_effective_address(struct pt_regs *, |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed141aac7..76663b019eb5 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
854 | return 1; | 854 | return 1; |
855 | 855 | ||
856 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
857 | if (dev->bus == &pci_bus_type) | 857 | if (dev_is_pci(dev)) |
858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
859 | #endif | 859 | #endif |
860 | 860 | ||
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468de9b2..e7e215dfa866 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); | |||
666 | */ | 666 | */ |
667 | int dma_supported(struct device *dev, u64 mask) | 667 | int dma_supported(struct device *dev, u64 mask) |
668 | { | 668 | { |
669 | #ifdef CONFIG_PCI | 669 | if (dev_is_pci(dev)) |
670 | if (dev->bus == &pci_bus_type) | ||
671 | return 1; | 670 | return 1; |
672 | #endif | 671 | |
673 | return 0; | 672 | return 0; |
674 | } | 673 | } |
675 | EXPORT_SYMBOL(dma_supported); | 674 | EXPORT_SYMBOL(dma_supported); |
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 60b19f50c80a..b45fe3fb4d2c 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kgdb.h> | 6 | #include <linux/kgdb.h> |
7 | #include <linux/kdebug.h> | 7 | #include <linux/kdebug.h> |
8 | #include <linux/ftrace.h> | 8 | #include <linux/ftrace.h> |
9 | #include <linux/context_tracking.h> | ||
9 | 10 | ||
10 | #include <asm/cacheflush.h> | 11 | #include <asm/cacheflush.h> |
11 | #include <asm/kdebug.h> | 12 | #include <asm/kdebug.h> |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b66a5338231e..b085311dcd0e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -123,11 +123,12 @@ void smp_callin(void) | |||
123 | rmb(); | 123 | rmb(); |
124 | 124 | ||
125 | set_cpu_online(cpuid, true); | 125 | set_cpu_online(cpuid, true); |
126 | local_irq_enable(); | ||
127 | 126 | ||
128 | /* idle thread is expected to have preempt disabled */ | 127 | /* idle thread is expected to have preempt disabled */ |
129 | preempt_disable(); | 128 | preempt_disable(); |
130 | 129 | ||
130 | local_irq_enable(); | ||
131 | |||
131 | cpu_startup_entry(CPUHP_ONLINE); | 132 | cpu_startup_entry(CPUHP_ONLINE); |
132 | } | 133 | } |
133 | 134 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e903c71f7e69..0952ecd60eca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -26,6 +26,7 @@ config X86 | |||
26 | select HAVE_AOUT if X86_32 | 26 | select HAVE_AOUT if X86_32 |
27 | select HAVE_UNSTABLE_SCHED_CLOCK | 27 | select HAVE_UNSTABLE_SCHED_CLOCK |
28 | select ARCH_SUPPORTS_NUMA_BALANCING | 28 | select ARCH_SUPPORTS_NUMA_BALANCING |
29 | select ARCH_SUPPORTS_INT128 if X86_64 | ||
29 | select ARCH_WANTS_PROT_NUMA_PROT_NONE | 30 | select ARCH_WANTS_PROT_NUMA_PROT_NONE |
30 | select HAVE_IDE | 31 | select HAVE_IDE |
31 | select HAVE_OPROFILE | 32 | select HAVE_OPROFILE |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index eda00f9be0cf..57d021507120 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y) | |||
31 | 31 | ||
32 | KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return | 32 | KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return |
33 | 33 | ||
34 | # Don't autogenerate SSE instructions | 34 | # Don't autogenerate MMX or SSE instructions |
35 | KBUILD_CFLAGS += -mno-sse | 35 | KBUILD_CFLAGS += -mno-mmx -mno-sse |
36 | 36 | ||
37 | # Never want PIC in a 32-bit kernel, prevent breakage with GCC built | 37 | # Never want PIC in a 32-bit kernel, prevent breakage with GCC built |
38 | # with nonstandard options | 38 | # with nonstandard options |
@@ -60,8 +60,8 @@ else | |||
60 | KBUILD_AFLAGS += -m64 | 60 | KBUILD_AFLAGS += -m64 |
61 | KBUILD_CFLAGS += -m64 | 61 | KBUILD_CFLAGS += -m64 |
62 | 62 | ||
63 | # Don't autogenerate SSE instructions | 63 | # Don't autogenerate MMX or SSE instructions |
64 | KBUILD_CFLAGS += -mno-sse | 64 | KBUILD_CFLAGS += -mno-mmx -mno-sse |
65 | 65 | ||
66 | # Use -mpreferred-stack-boundary=3 if supported. | 66 | # Use -mpreferred-stack-boundary=3 if supported. |
67 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) | 67 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) |
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index dce69a256896..d9c11956fce0 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE | |||
53 | 53 | ||
54 | # How to compile the 16-bit code. Note we always compile for -march=i386, | 54 | # How to compile the 16-bit code. Note we always compile for -march=i386, |
55 | # that way we can complain to the user if the CPU is insufficient. | 55 | # that way we can complain to the user if the CPU is insufficient. |
56 | KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ | 56 | KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \ |
57 | -DDISABLE_BRANCH_PROFILING \ | 57 | -DDISABLE_BRANCH_PROFILING \ |
58 | -Wall -Wstrict-prototypes \ | 58 | -Wall -Wstrict-prototypes \ |
59 | -march=i386 -mregparm=3 \ | 59 | -march=i386 -mregparm=3 \ |
60 | -include $(srctree)/$(src)/code16gcc.h \ | 60 | -include $(srctree)/$(src)/code16gcc.h \ |
61 | -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ | 61 | -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ |
62 | -mno-mmx -mno-sse \ | ||
62 | $(call cc-option, -ffreestanding) \ | 63 | $(call cc-option, -ffreestanding) \ |
63 | $(call cc-option, -fno-toplevel-reorder,\ | 64 | $(call cc-option, -fno-toplevel-reorder,\ |
64 | $(call cc-option, -fno-unit-at-a-time)) \ | 65 | $(call cc-option, -fno-unit-at-a-time)) \ |
65 | $(call cc-option, -fno-stack-protector) \ | 66 | $(call cc-option, -fno-stack-protector) \ |
66 | $(call cc-option, -mpreferred-stack-boundary=2) | 67 | $(call cc-option, -mpreferred-stack-boundary=2) |
67 | KBUILD_CFLAGS += $(call cc-option, -m32) | ||
68 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ | 68 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ |
69 | GCOV_PROFILE := n | 69 | GCOV_PROFILE := n |
70 | 70 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index dcd90df10ab4..c8a6792e7842 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | |||
13 | cflags-$(CONFIG_X86_32) := -march=i386 | 13 | cflags-$(CONFIG_X86_32) := -march=i386 |
14 | cflags-$(CONFIG_X86_64) := -mcmodel=small | 14 | cflags-$(CONFIG_X86_64) := -mcmodel=small |
15 | KBUILD_CFLAGS += $(cflags-y) | 15 | KBUILD_CFLAGS += $(cflags-y) |
16 | KBUILD_CFLAGS += -mno-mmx -mno-sse | ||
16 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) | 17 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) |
17 | KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) | 18 | KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) |
18 | 19 | ||
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index c49a613c6452..cea1c76d49bf 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
294 | is pending. Clear the x87 state here by setting it to fixed | 294 | is pending. Clear the x87 state here by setting it to fixed |
295 | values. "m" is a random variable that should be in L1 */ | 295 | values. "m" is a random variable that should be in L1 */ |
296 | alternative_input( | 296 | if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { |
297 | ASM_NOP8 ASM_NOP2, | 297 | asm volatile( |
298 | "emms\n\t" /* clear stack tags */ | 298 | "fnclex\n\t" |
299 | "fildl %P[addr]", /* set F?P to defined value */ | 299 | "emms\n\t" |
300 | X86_FEATURE_FXSAVE_LEAK, | 300 | "fildl %P[addr]" /* set F?P to defined value */ |
301 | [addr] "m" (tsk->thread.fpu.has_fpu)); | 301 | : : [addr] "m" (tsk->thread.fpu.has_fpu)); |
302 | } | ||
302 | 303 | ||
303 | return fpu_restore_checking(&tsk->thread.fpu); | 304 | return fpu_restore_checking(&tsk->thread.fpu); |
304 | } | 305 | } |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d1999458709..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) | |||
452 | } | 452 | } |
453 | 453 | ||
454 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
455 | static inline int pte_accessible(pte_t a) | 455 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
456 | { | 456 | { |
457 | return pte_flags(a) & _PAGE_PRESENT; | 457 | if (pte_flags(a) & _PAGE_PRESENT) |
458 | return true; | ||
459 | |||
460 | if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && | ||
461 | mm_tlb_flush_pending(mm)) | ||
462 | return true; | ||
463 | |||
464 | return false; | ||
458 | } | 465 | } |
459 | 466 | ||
460 | static inline int pte_hidden(pte_t pte) | 467 | static inline int pte_hidden(pte_t pte) |
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723636fd..c8b051933b1b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h | |||
@@ -8,6 +8,12 @@ | |||
8 | DECLARE_PER_CPU(int, __preempt_count); | 8 | DECLARE_PER_CPU(int, __preempt_count); |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such | ||
12 | * that a decrement hitting 0 means we can and should reschedule. | ||
13 | */ | ||
14 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) | ||
15 | |||
16 | /* | ||
11 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | 17 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
12 | * that think a non-zero value indicates we cannot preempt. | 18 | * that think a non-zero value indicates we cannot preempt. |
13 | */ | 19 | */ |
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) | |||
74 | __this_cpu_add_4(__preempt_count, -val); | 80 | __this_cpu_add_4(__preempt_count, -val); |
75 | } | 81 | } |
76 | 82 | ||
83 | /* | ||
84 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule | ||
85 | * a decrement which hits zero means we have no preempt_count and should | ||
86 | * reschedule. | ||
87 | */ | ||
77 | static __always_inline bool __preempt_count_dec_and_test(void) | 88 | static __always_inline bool __preempt_count_dec_and_test(void) |
78 | { | 89 | { |
79 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | 90 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939..ea04b342c026 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
387 | set_cpu_cap(c, X86_FEATURE_PEBS); | 387 | set_cpu_cap(c, X86_FEATURE_PEBS); |
388 | } | 388 | } |
389 | 389 | ||
390 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | 390 | if (c->x86 == 6 && cpu_has_clflush && |
391 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | ||
391 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 392 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); |
392 | 393 | ||
393 | #ifdef CONFIG_X86_64 | 394 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fd00bb29425d..c1a861829d81 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -262,11 +262,20 @@ struct cpu_hw_events { | |||
262 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 262 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
263 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | 263 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
264 | 264 | ||
265 | #define EVENT_CONSTRAINT_END \ | 265 | /* |
266 | EVENT_CONSTRAINT(0, 0, 0) | 266 | * We define the end marker as having a weight of -1 |
267 | * to enable blacklisting of events using a counter bitmask | ||
268 | * of zero and thus a weight of zero. | ||
269 | * The end marker has a weight that cannot possibly be | ||
270 | * obtained from counting the bits in the bitmask. | ||
271 | */ | ||
272 | #define EVENT_CONSTRAINT_END { .weight = -1 } | ||
267 | 273 | ||
274 | /* | ||
275 | * Check for end marker with weight == -1 | ||
276 | */ | ||
268 | #define for_each_event_constraint(e, c) \ | 277 | #define for_each_event_constraint(e, c) \ |
269 | for ((e) = (c); (e)->weight; (e)++) | 278 | for ((e) = (c); (e)->weight != -1; (e)++) |
270 | 279 | ||
271 | /* | 280 | /* |
272 | * Extra registers for specific events. | 281 | * Extra registers for specific events. |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 51e2988c5728..a2a4f4697889 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller) | |||
1082 | pushl $0 /* Pass NULL as regs pointer */ | 1082 | pushl $0 /* Pass NULL as regs pointer */ |
1083 | movl 4*4(%esp), %eax | 1083 | movl 4*4(%esp), %eax |
1084 | movl 0x4(%ebp), %edx | 1084 | movl 0x4(%ebp), %edx |
1085 | leal function_trace_op, %ecx | 1085 | movl function_trace_op, %ecx |
1086 | subl $MCOUNT_INSN_SIZE, %eax | 1086 | subl $MCOUNT_INSN_SIZE, %eax |
1087 | 1087 | ||
1088 | .globl ftrace_call | 1088 | .globl ftrace_call |
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller) | |||
1140 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ | 1140 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
1141 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ | 1141 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
1142 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ | 1142 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
1143 | leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ | 1143 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
1144 | pushl %esp /* Save pt_regs as 4th parameter */ | 1144 | pushl %esp /* Save pt_regs as 4th parameter */ |
1145 | 1145 | ||
1146 | GLOBAL(ftrace_regs_call) | 1146 | GLOBAL(ftrace_regs_call) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e21b0785a85b..1e96c3628bf2 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -88,7 +88,7 @@ END(function_hook) | |||
88 | MCOUNT_SAVE_FRAME \skip | 88 | MCOUNT_SAVE_FRAME \skip |
89 | 89 | ||
90 | /* Load the ftrace_ops into the 3rd parameter */ | 90 | /* Load the ftrace_ops into the 3rd parameter */ |
91 | leaq function_trace_op, %rdx | 91 | movq function_trace_op(%rip), %rdx |
92 | 92 | ||
93 | /* Load ip into the first parameter */ | 93 | /* Load ip into the first parameter */ |
94 | movq RIP(%rsp), %rdi | 94 | movq RIP(%rsp), %rdi |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5439117d5c4c..1673940cf9c3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic) | |||
143 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; | 143 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; |
144 | } | 144 | } |
145 | 145 | ||
146 | #define KVM_X2APIC_CID_BITS 0 | ||
147 | |||
146 | static void recalculate_apic_map(struct kvm *kvm) | 148 | static void recalculate_apic_map(struct kvm *kvm) |
147 | { | 149 | { |
148 | struct kvm_apic_map *new, *old = NULL; | 150 | struct kvm_apic_map *new, *old = NULL; |
@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm) | |||
180 | if (apic_x2apic_mode(apic)) { | 182 | if (apic_x2apic_mode(apic)) { |
181 | new->ldr_bits = 32; | 183 | new->ldr_bits = 32; |
182 | new->cid_shift = 16; | 184 | new->cid_shift = 16; |
183 | new->cid_mask = new->lid_mask = 0xffff; | 185 | new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; |
186 | new->lid_mask = 0xffff; | ||
184 | } else if (kvm_apic_sw_enabled(apic) && | 187 | } else if (kvm_apic_sw_enabled(apic) && |
185 | !new->cid_mask /* flat mode */ && | 188 | !new->cid_mask /* flat mode */ && |
186 | kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { | 189 | kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { |
@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) | |||
841 | ASSERT(apic != NULL); | 844 | ASSERT(apic != NULL); |
842 | 845 | ||
843 | /* if initial count is 0, current count should also be 0 */ | 846 | /* if initial count is 0, current count should also be 0 */ |
844 | if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) | 847 | if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || |
848 | apic->lapic_timer.period == 0) | ||
845 | return 0; | 849 | return 0; |
846 | 850 | ||
847 | remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); | 851 | remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); |
@@ -1346,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1346 | return; | 1350 | return; |
1347 | } | 1351 | } |
1348 | 1352 | ||
1353 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
1354 | value &= ~MSR_IA32_APICBASE_BSP; | ||
1355 | vcpu->arch.apic_base = value; | ||
1356 | |||
1349 | /* update jump label if enable bit changes */ | 1357 | /* update jump label if enable bit changes */ |
1350 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { | 1358 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { |
1351 | if (value & MSR_IA32_APICBASE_ENABLE) | 1359 | if (value & MSR_IA32_APICBASE_ENABLE) |
@@ -1355,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1355 | recalculate_apic_map(vcpu->kvm); | 1363 | recalculate_apic_map(vcpu->kvm); |
1356 | } | 1364 | } |
1357 | 1365 | ||
1358 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
1359 | value &= ~MSR_IA32_APICBASE_BSP; | ||
1360 | |||
1361 | vcpu->arch.apic_base = value; | ||
1362 | if ((old_value ^ value) & X2APIC_ENABLE) { | 1366 | if ((old_value ^ value) & X2APIC_ENABLE) { |
1363 | if (value & X2APIC_ENABLE) { | 1367 | if (value & X2APIC_ENABLE) { |
1364 | u32 id = kvm_apic_id(apic); | 1368 | u32 id = kvm_apic_id(apic); |
@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, | |||
1691 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) | 1695 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) |
1692 | { | 1696 | { |
1693 | u32 data; | 1697 | u32 data; |
1694 | void *vapic; | ||
1695 | 1698 | ||
1696 | if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) | 1699 | if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) |
1697 | apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); | 1700 | apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); |
@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) | |||
1699 | if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) | 1702 | if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) |
1700 | return; | 1703 | return; |
1701 | 1704 | ||
1702 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page); | 1705 | kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, |
1703 | data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); | 1706 | sizeof(u32)); |
1704 | kunmap_atomic(vapic); | ||
1705 | 1707 | ||
1706 | apic_set_tpr(vcpu->arch.apic, data & 0xff); | 1708 | apic_set_tpr(vcpu->arch.apic, data & 0xff); |
1707 | } | 1709 | } |
@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) | |||
1737 | u32 data, tpr; | 1739 | u32 data, tpr; |
1738 | int max_irr, max_isr; | 1740 | int max_irr, max_isr; |
1739 | struct kvm_lapic *apic = vcpu->arch.apic; | 1741 | struct kvm_lapic *apic = vcpu->arch.apic; |
1740 | void *vapic; | ||
1741 | 1742 | ||
1742 | apic_sync_pv_eoi_to_guest(vcpu, apic); | 1743 | apic_sync_pv_eoi_to_guest(vcpu, apic); |
1743 | 1744 | ||
@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) | |||
1753 | max_isr = 0; | 1754 | max_isr = 0; |
1754 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); | 1755 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); |
1755 | 1756 | ||
1756 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page); | 1757 | kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, |
1757 | *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; | 1758 | sizeof(u32)); |
1758 | kunmap_atomic(vapic); | ||
1759 | } | 1759 | } |
1760 | 1760 | ||
1761 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) | 1761 | int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) |
1762 | { | 1762 | { |
1763 | vcpu->arch.apic->vapic_addr = vapic_addr; | 1763 | if (vapic_addr) { |
1764 | if (vapic_addr) | 1764 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, |
1765 | &vcpu->arch.apic->vapic_cache, | ||
1766 | vapic_addr, sizeof(u32))) | ||
1767 | return -EINVAL; | ||
1765 | __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); | 1768 | __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); |
1766 | else | 1769 | } else { |
1767 | __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); | 1770 | __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); |
1771 | } | ||
1772 | |||
1773 | vcpu->arch.apic->vapic_addr = vapic_addr; | ||
1774 | return 0; | ||
1768 | } | 1775 | } |
1769 | 1776 | ||
1770 | int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 1777 | int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index c730ac9fe801..c8b0d0d2da5c 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h | |||
@@ -34,7 +34,7 @@ struct kvm_lapic { | |||
34 | */ | 34 | */ |
35 | void *regs; | 35 | void *regs; |
36 | gpa_t vapic_addr; | 36 | gpa_t vapic_addr; |
37 | struct page *vapic_page; | 37 | struct gfn_to_hva_cache vapic_cache; |
38 | unsigned long pending_events; | 38 | unsigned long pending_events; |
39 | unsigned int sipi_vector; | 39 | unsigned int sipi_vector; |
40 | }; | 40 | }; |
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); | |||
76 | void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); | 76 | void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); |
77 | void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); | 77 | void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); |
78 | 78 | ||
79 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); | 79 | int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); |
80 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); | 80 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); |
81 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); | 81 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); |
82 | 82 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b2fe1c252f35..da7837e1349d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
8283 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | 8283 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
8284 | kvm_set_cr4(vcpu, vmcs12->host_cr4); | 8284 | kvm_set_cr4(vcpu, vmcs12->host_cr4); |
8285 | 8285 | ||
8286 | if (nested_cpu_has_ept(vmcs12)) | 8286 | nested_ept_uninit_mmu_context(vcpu); |
8287 | nested_ept_uninit_mmu_context(vcpu); | ||
8288 | 8287 | ||
8289 | kvm_set_cr3(vcpu, vmcs12->host_cr3); | 8288 | kvm_set_cr3(vcpu, vmcs12->host_cr3); |
8290 | kvm_mmu_reset_context(vcpu); | 8289 | kvm_mmu_reset_context(vcpu); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21ef1ba184ae..5d004da1e35d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
3214 | r = -EFAULT; | 3214 | r = -EFAULT; |
3215 | if (copy_from_user(&va, argp, sizeof va)) | 3215 | if (copy_from_user(&va, argp, sizeof va)) |
3216 | goto out; | 3216 | goto out; |
3217 | r = 0; | 3217 | r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); |
3218 | kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); | ||
3219 | break; | 3218 | break; |
3220 | } | 3219 | } |
3221 | case KVM_X86_SETUP_MCE: { | 3220 | case KVM_X86_SETUP_MCE: { |
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) | |||
5739 | !kvm_event_needs_reinjection(vcpu); | 5738 | !kvm_event_needs_reinjection(vcpu); |
5740 | } | 5739 | } |
5741 | 5740 | ||
5742 | static int vapic_enter(struct kvm_vcpu *vcpu) | ||
5743 | { | ||
5744 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
5745 | struct page *page; | ||
5746 | |||
5747 | if (!apic || !apic->vapic_addr) | ||
5748 | return 0; | ||
5749 | |||
5750 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); | ||
5751 | if (is_error_page(page)) | ||
5752 | return -EFAULT; | ||
5753 | |||
5754 | vcpu->arch.apic->vapic_page = page; | ||
5755 | return 0; | ||
5756 | } | ||
5757 | |||
5758 | static void vapic_exit(struct kvm_vcpu *vcpu) | ||
5759 | { | ||
5760 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
5761 | int idx; | ||
5762 | |||
5763 | if (!apic || !apic->vapic_addr) | ||
5764 | return; | ||
5765 | |||
5766 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
5767 | kvm_release_page_dirty(apic->vapic_page); | ||
5768 | mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); | ||
5769 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
5770 | } | ||
5771 | |||
5772 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) | 5741 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) |
5773 | { | 5742 | { |
5774 | int max_irr, tpr; | 5743 | int max_irr, tpr; |
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6069 | struct kvm *kvm = vcpu->kvm; | 6038 | struct kvm *kvm = vcpu->kvm; |
6070 | 6039 | ||
6071 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | 6040 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
6072 | r = vapic_enter(vcpu); | ||
6073 | if (r) { | ||
6074 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | ||
6075 | return r; | ||
6076 | } | ||
6077 | 6041 | ||
6078 | r = 1; | 6042 | r = 1; |
6079 | while (r > 0) { | 6043 | while (r > 0) { |
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6132 | 6096 | ||
6133 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6097 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
6134 | 6098 | ||
6135 | vapic_exit(vcpu); | ||
6136 | |||
6137 | return r; | 6099 | return r; |
6138 | } | 6100 | } |
6139 | 6101 | ||
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46828c0..0596e8e0cc19 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
83 | pte_t pte = gup_get_pte(ptep); | 83 | pte_t pte = gup_get_pte(ptep); |
84 | struct page *page; | 84 | struct page *page; |
85 | 85 | ||
86 | /* Similar to the PMD case, NUMA hinting must take slow path */ | ||
87 | if (pte_numa(pte)) { | ||
88 | pte_unmap(ptep); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
86 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | 92 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { |
87 | pte_unmap(ptep); | 93 | pte_unmap(ptep); |
88 | return 0; | 94 | return 0; |
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
167 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | 173 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
168 | return 0; | 174 | return 0; |
169 | if (unlikely(pmd_large(pmd))) { | 175 | if (unlikely(pmd_large(pmd))) { |
176 | /* | ||
177 | * NUMA hinting faults need to be handled in the GUP | ||
178 | * slowpath for accounting purposes and so that they | ||
179 | * can be serialised against THP migration. | ||
180 | */ | ||
181 | if (pmd_numa(pmd)) | ||
182 | return 0; | ||
170 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | 183 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) |
171 | return 0; | 184 | return 0; |
172 | } else { | 185 | } else { |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 92c02344a060..cceb813044ef 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -690,13 +690,6 @@ void __init efi_init(void) | |||
690 | 690 | ||
691 | set_bit(EFI_MEMMAP, &x86_efi_facility); | 691 | set_bit(EFI_MEMMAP, &x86_efi_facility); |
692 | 692 | ||
693 | #ifdef CONFIG_X86_32 | ||
694 | if (efi_is_native()) { | ||
695 | x86_platform.get_wallclock = efi_get_time; | ||
696 | x86_platform.set_wallclock = efi_set_rtc_mmss; | ||
697 | } | ||
698 | #endif | ||
699 | |||
700 | #if EFI_DEBUG | 693 | #if EFI_DEBUG |
701 | print_efi_memmap(); | 694 | print_efi_memmap(); |
702 | #endif | 695 | #endif |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 0f92173a12b6..efe4d7220397 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1070 | unsigned long status; | 1070 | unsigned long status; |
1071 | 1071 | ||
1072 | bcp = &per_cpu(bau_control, cpu); | 1072 | bcp = &per_cpu(bau_control, cpu); |
1073 | stat = bcp->statp; | ||
1074 | stat->s_enters++; | ||
1075 | 1073 | ||
1076 | if (bcp->nobau) | 1074 | if (bcp->nobau) |
1077 | return cpumask; | 1075 | return cpumask; |
1078 | 1076 | ||
1077 | stat = bcp->statp; | ||
1078 | stat->s_enters++; | ||
1079 | |||
1079 | if (bcp->busy) { | 1080 | if (bcp->busy) { |
1080 | descriptor_status = | 1081 | descriptor_status = |
1081 | read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); | 1082 | read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); |
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 88692871823f..9cac82588cbc 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile | |||
@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \ | |||
73 | -march=i386 -mregparm=3 \ | 73 | -march=i386 -mregparm=3 \ |
74 | -include $(srctree)/$(src)/../../boot/code16gcc.h \ | 74 | -include $(srctree)/$(src)/../../boot/code16gcc.h \ |
75 | -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ | 75 | -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ |
76 | -mno-mmx -mno-sse \ | ||
76 | $(call cc-option, -ffreestanding) \ | 77 | $(call cc-option, -ffreestanding) \ |
77 | $(call cc-option, -fno-toplevel-reorder,\ | 78 | $(call cc-option, -fno-toplevel-reorder,\ |
78 | $(call cc-option, -fno-unit-at-a-time)) \ | 79 | $(call cc-option, -fno-unit-at-a-time)) \ |
79 | $(call cc-option, -fno-stack-protector) \ | 80 | $(call cc-option, -fno-stack-protector) \ |
80 | $(call cc-option, -mpreferred-stack-boundary=2) | 81 | $(call cc-option, -mpreferred-stack-boundary=2) |
81 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ | 82 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ |
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba6cf8e9aa0a..b91ce75bd35d 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c | |||
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { | |||
335 | void blk_mq_unregister_disk(struct gendisk *disk) | 335 | void blk_mq_unregister_disk(struct gendisk *disk) |
336 | { | 336 | { |
337 | struct request_queue *q = disk->queue; | 337 | struct request_queue *q = disk->queue; |
338 | struct blk_mq_hw_ctx *hctx; | ||
339 | struct blk_mq_ctx *ctx; | ||
340 | int i, j; | ||
341 | |||
342 | queue_for_each_hw_ctx(q, hctx, i) { | ||
343 | hctx_for_each_ctx(hctx, ctx, j) { | ||
344 | kobject_del(&ctx->kobj); | ||
345 | kobject_put(&ctx->kobj); | ||
346 | } | ||
347 | kobject_del(&hctx->kobj); | ||
348 | kobject_put(&hctx->kobj); | ||
349 | } | ||
338 | 350 | ||
339 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | 351 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
340 | kobject_del(&q->mq_kobj); | 352 | kobject_del(&q->mq_kobj); |
353 | kobject_put(&q->mq_kobj); | ||
341 | 354 | ||
342 | kobject_put(&disk_to_dev(disk)->kobj); | 355 | kobject_put(&disk_to_dev(disk)->kobj); |
343 | } | 356 | } |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5d9248526d78..4770de5707b9 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig" | |||
348 | config ACPI_EXTLOG | 348 | config ACPI_EXTLOG |
349 | tristate "Extended Error Log support" | 349 | tristate "Extended Error Log support" |
350 | depends on X86_MCE && X86_LOCAL_APIC | 350 | depends on X86_MCE && X86_LOCAL_APIC |
351 | select EFI | ||
352 | select UEFI_CPER | 351 | select UEFI_CPER |
353 | default n | 352 | default n |
354 | help | 353 | help |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 8711e3797165..3c2e4aa529c4 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev) | |||
207 | goto end; | 207 | goto end; |
208 | 208 | ||
209 | result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), | 209 | result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), |
210 | ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); | 210 | ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); |
211 | if (result) { | 211 | if (result) { |
212 | power_supply_unregister(&ac->charger); | 212 | power_supply_unregister(&ac->charger); |
213 | goto end; | 213 | goto end; |
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev) | |||
255 | return -EINVAL; | 255 | return -EINVAL; |
256 | 256 | ||
257 | acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), | 257 | acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), |
258 | ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); | 258 | ACPI_ALL_NOTIFY, acpi_ac_notify_handler); |
259 | 259 | ||
260 | ac = platform_get_drvdata(pdev); | 260 | ac = platform_get_drvdata(pdev); |
261 | if (ac->charger.dev) | 261 | if (ac->charger.dev) |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 6745fe137b9e..e60390597372 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | |||
162 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, | 162 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, |
163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, | 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, |
164 | { "INT33B2", }, | 164 | { "INT33B2", }, |
165 | { "INT33FC", }, | ||
165 | 166 | ||
166 | { "INT3430", (unsigned long)&lpt_dev_desc }, | 167 | { "INT3430", (unsigned long)&lpt_dev_desc }, |
167 | { "INT3431", (unsigned long)&lpt_dev_desc }, | 168 | { "INT3431", (unsigned long)&lpt_dev_desc }, |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 786294bb682c..3650b2183227 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
@@ -2,7 +2,6 @@ config ACPI_APEI | |||
2 | bool "ACPI Platform Error Interface (APEI)" | 2 | bool "ACPI Platform Error Interface (APEI)" |
3 | select MISC_FILESYSTEMS | 3 | select MISC_FILESYSTEMS |
4 | select PSTORE | 4 | select PSTORE |
5 | select EFI | ||
6 | select UEFI_CPER | 5 | select UEFI_CPER |
7 | depends on X86 | 6 | depends on X86 |
8 | help | 7 | help |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 26311f23c824..cb1d557fc22c 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count, | |||
942 | static struct pstore_info erst_info = { | 942 | static struct pstore_info erst_info = { |
943 | .owner = THIS_MODULE, | 943 | .owner = THIS_MODULE, |
944 | .name = "erst", | 944 | .name = "erst", |
945 | .flags = PSTORE_FLAGS_FRAGILE, | ||
945 | .open = erst_open_pstore, | 946 | .open = erst_open_pstore, |
946 | .close = erst_close_pstore, | 947 | .close = erst_close_pstore, |
947 | .read = erst_reader, | 948 | .read = erst_reader, |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index fbf1aceda8b8..5876a49dfd38 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>"); | |||
62 | MODULE_DESCRIPTION("ACPI Battery Driver"); | 62 | MODULE_DESCRIPTION("ACPI Battery Driver"); |
63 | MODULE_LICENSE("GPL"); | 63 | MODULE_LICENSE("GPL"); |
64 | 64 | ||
65 | static int battery_bix_broken_package; | ||
65 | static unsigned int cache_time = 1000; | 66 | static unsigned int cache_time = 1000; |
66 | module_param(cache_time, uint, 0644); | 67 | module_param(cache_time, uint, 0644); |
67 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 68 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery) | |||
416 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); | 417 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); |
417 | return -ENODEV; | 418 | return -ENODEV; |
418 | } | 419 | } |
419 | if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) | 420 | |
421 | if (battery_bix_broken_package) | ||
422 | result = extract_package(battery, buffer.pointer, | ||
423 | extended_info_offsets + 1, | ||
424 | ARRAY_SIZE(extended_info_offsets) - 1); | ||
425 | else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) | ||
420 | result = extract_package(battery, buffer.pointer, | 426 | result = extract_package(battery, buffer.pointer, |
421 | extended_info_offsets, | 427 | extended_info_offsets, |
422 | ARRAY_SIZE(extended_info_offsets)); | 428 | ARRAY_SIZE(extended_info_offsets)); |
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb, | |||
754 | return 0; | 760 | return 0; |
755 | } | 761 | } |
756 | 762 | ||
763 | static struct dmi_system_id bat_dmi_table[] = { | ||
764 | { | ||
765 | .ident = "NEC LZ750/LS", | ||
766 | .matches = { | ||
767 | DMI_MATCH(DMI_SYS_VENDOR, "NEC"), | ||
768 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"), | ||
769 | }, | ||
770 | }, | ||
771 | {}, | ||
772 | }; | ||
773 | |||
757 | static int acpi_battery_add(struct acpi_device *device) | 774 | static int acpi_battery_add(struct acpi_device *device) |
758 | { | 775 | { |
759 | int result = 0; | 776 | int result = 0; |
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) | |||
846 | { | 863 | { |
847 | if (acpi_disabled) | 864 | if (acpi_disabled) |
848 | return; | 865 | return; |
866 | |||
867 | if (dmi_check_system(bat_dmi_table)) | ||
868 | battery_bix_broken_package = 1; | ||
849 | acpi_bus_register_driver(&acpi_battery_driver); | 869 | acpi_bus_register_driver(&acpi_battery_driver); |
850 | } | 870 | } |
851 | 871 | ||
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index bba9b72e25f8..0710004055c8 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) | |||
156 | } | 156 | } |
157 | EXPORT_SYMBOL(acpi_bus_get_private_data); | 157 | EXPORT_SYMBOL(acpi_bus_get_private_data); |
158 | 158 | ||
159 | void acpi_bus_no_hotplug(acpi_handle handle) | ||
160 | { | ||
161 | struct acpi_device *adev = NULL; | ||
162 | |||
163 | acpi_bus_get_device(handle, &adev); | ||
164 | if (adev) | ||
165 | adev->flags.no_hotplug = true; | ||
166 | } | ||
167 | EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug); | ||
168 | |||
159 | static void acpi_print_osc_error(acpi_handle handle, | 169 | static void acpi_print_osc_error(acpi_handle handle, |
160 | struct acpi_osc_context *context, char *error) | 170 | struct acpi_osc_context *context, char *error) |
161 | { | 171 | { |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 14f1e9506338..e3a92a6da39a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
427 | .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ | 427 | .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ |
428 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), | 428 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), |
429 | .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ | 429 | .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ |
430 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178, | ||
431 | PCI_VENDOR_ID_MARVELL_EXT, 0x9170), | ||
432 | .driver_data = board_ahci_yes_fbs }, /* 88se9170 */ | ||
430 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), | 433 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), |
431 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ | 434 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ |
432 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), | 435 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), |
@@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1238 | if (rc) | 1241 | if (rc) |
1239 | return rc; | 1242 | return rc; |
1240 | 1243 | ||
1241 | /* AHCI controllers often implement SFF compatible interface. | ||
1242 | * Grab all PCI BARs just in case. | ||
1243 | */ | ||
1244 | rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||
1245 | if (rc == -EBUSY) | ||
1246 | pcim_pin_device(pdev); | ||
1247 | if (rc) | ||
1248 | return rc; | ||
1249 | |||
1250 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 1244 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
1251 | (pdev->device == 0x2652 || pdev->device == 0x2653)) { | 1245 | (pdev->device == 0x2652 || pdev->device == 0x2653)) { |
1252 | u8 map; | 1246 | u8 map; |
@@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1263 | } | 1257 | } |
1264 | } | 1258 | } |
1265 | 1259 | ||
1260 | /* AHCI controllers often implement SFF compatible interface. | ||
1261 | * Grab all PCI BARs just in case. | ||
1262 | */ | ||
1263 | rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||
1264 | if (rc == -EBUSY) | ||
1265 | pcim_pin_device(pdev); | ||
1266 | if (rc) | ||
1267 | return rc; | ||
1268 | |||
1266 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | 1269 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); |
1267 | if (!hpriv) | 1270 | if (!hpriv) |
1268 | return -ENOMEM; | 1271 | return -ENOMEM; |
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index ae2d73fe321e..3e23e9941dad 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c | |||
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | |||
113 | /* | 113 | /* |
114 | * set PHY Paremeters, two steps to configure the GPR13, | 114 | * set PHY Paremeters, two steps to configure the GPR13, |
115 | * one write for rest of parameters, mask of first write | 115 | * one write for rest of parameters, mask of first write |
116 | * is 0x07fffffd, and the other one write for setting | 116 | * is 0x07ffffff, and the other one write for setting |
117 | * the mpll_clk_en. | 117 | * the mpll_clk_en. |
118 | */ | 118 | */ |
119 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | 119 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | |||
124 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK | 124 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
125 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK | 125 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK |
126 | | IMX6Q_GPR13_SATA_TX_LVL_MASK | 126 | | IMX6Q_GPR13_SATA_TX_LVL_MASK |
127 | | IMX6Q_GPR13_SATA_MPLL_CLK_EN | ||
127 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE | 128 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE |
128 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | 129 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
129 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | 130 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 75b93678bbcd..1393a5890ed5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev, | |||
2149 | "failed to get NCQ Send/Recv Log Emask 0x%x\n", | 2149 | "failed to get NCQ Send/Recv Log Emask 0x%x\n", |
2150 | err_mask); | 2150 | err_mask); |
2151 | } else { | 2151 | } else { |
2152 | u8 *cmds = dev->ncq_send_recv_cmds; | ||
2153 | |||
2152 | dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; | 2154 | dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; |
2153 | memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, | 2155 | memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); |
2154 | ATA_LOG_NCQ_SEND_RECV_SIZE); | 2156 | |
2157 | if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { | ||
2158 | ata_dev_dbg(dev, "disabling queued TRIM support\n"); | ||
2159 | cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= | ||
2160 | ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; | ||
2161 | } | ||
2155 | } | 2162 | } |
2156 | } | 2163 | } |
2157 | 2164 | ||
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4156 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | | 4163 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
4157 | ATA_HORKAGE_FIRMWARE_WARN }, | 4164 | ATA_HORKAGE_FIRMWARE_WARN }, |
4158 | 4165 | ||
4166 | /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ | ||
4167 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | ||
4168 | |||
4159 | /* Blacklist entries taken from Silicon Image 3124/3132 | 4169 | /* Blacklist entries taken from Silicon Image 3124/3132 |
4160 | Windows driver .inf file - also several Linux problem reports */ | 4170 | Windows driver .inf file - also several Linux problem reports */ |
4161 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, | 4171 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, |
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4202 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, | 4212 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
4203 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4213 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
4204 | 4214 | ||
4215 | /* devices that don't properly handle queued TRIM commands */ | ||
4216 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
4217 | { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
4218 | |||
4205 | /* End Marker */ | 4219 | /* End Marker */ |
4206 | { } | 4220 | { } |
4207 | }; | 4221 | }; |
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur, | |||
6519 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, | 6533 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, |
6520 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, | 6534 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, |
6521 | { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, | 6535 | { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, |
6536 | { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, | ||
6522 | }; | 6537 | }; |
6523 | char *start = *cur, *p = *cur; | 6538 | char *start = *cur, *p = *cur; |
6524 | char *id, *val, *endp; | 6539 | char *id, *val, *endp; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ab58556d347c..377eb889f555 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
3872 | return; | 3872 | return; |
3873 | } | 3873 | } |
3874 | 3874 | ||
3875 | /* | ||
3876 | * XXX - UGLY HACK | ||
3877 | * | ||
3878 | * The block layer suspend/resume path is fundamentally broken due | ||
3879 | * to freezable kthreads and workqueue and may deadlock if a block | ||
3880 | * device gets removed while resume is in progress. I don't know | ||
3881 | * what the solution is short of removing freezable kthreads and | ||
3882 | * workqueues altogether. | ||
3883 | * | ||
3884 | * The following is an ugly hack to avoid kicking off device | ||
3885 | * removal while freezer is active. This is a joke but does avoid | ||
3886 | * this particular deadlock scenario. | ||
3887 | * | ||
3888 | * https://bugzilla.kernel.org/show_bug.cgi?id=62801 | ||
3889 | * http://marc.info/?l=linux-kernel&m=138695698516487 | ||
3890 | */ | ||
3891 | #ifdef CONFIG_FREEZER | ||
3892 | while (pm_freezing) | ||
3893 | msleep(10); | ||
3894 | #endif | ||
3895 | |||
3875 | DPRINTK("ENTER\n"); | 3896 | DPRINTK("ENTER\n"); |
3876 | mutex_lock(&ap->scsi_scan_mutex); | 3897 | mutex_lock(&ap->scsi_scan_mutex); |
3877 | 3898 | ||
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index fe3ca0989b14..1ad2f62d34b9 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = { | |||
83 | .id_table = sis_pci_tbl, | 83 | .id_table = sis_pci_tbl, |
84 | .probe = sis_init_one, | 84 | .probe = sis_init_one, |
85 | .remove = ata_pci_remove_one, | 85 | .remove = ata_pci_remove_one, |
86 | #ifdef CONFIG_PM | ||
87 | .suspend = ata_pci_device_suspend, | ||
88 | .resume = ata_pci_device_resume, | ||
89 | #endif | ||
86 | }; | 90 | }; |
87 | 91 | ||
88 | static struct scsi_host_template sis_sht = { | 92 | static struct scsi_host_template sis_sht = { |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e3219dfd736c..1b41fca3d65a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/async.h> | 29 | #include <linux/async.h> |
30 | #include <linux/suspend.h> | 30 | #include <linux/suspend.h> |
31 | #include <trace/events/power.h> | 31 | #include <trace/events/power.h> |
32 | #include <linux/cpufreq.h> | ||
33 | #include <linux/cpuidle.h> | 32 | #include <linux/cpuidle.h> |
34 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
35 | 34 | ||
@@ -541,7 +540,6 @@ static void dpm_resume_noirq(pm_message_t state) | |||
541 | dpm_show_time(starttime, state, "noirq"); | 540 | dpm_show_time(starttime, state, "noirq"); |
542 | resume_device_irqs(); | 541 | resume_device_irqs(); |
543 | cpuidle_resume(); | 542 | cpuidle_resume(); |
544 | cpufreq_resume(); | ||
545 | } | 543 | } |
546 | 544 | ||
547 | /** | 545 | /** |
@@ -957,7 +955,6 @@ static int dpm_suspend_noirq(pm_message_t state) | |||
957 | ktime_t starttime = ktime_get(); | 955 | ktime_t starttime = ktime_get(); |
958 | int error = 0; | 956 | int error = 0; |
959 | 957 | ||
960 | cpufreq_suspend(); | ||
961 | cpuidle_pause(); | 958 | cpuidle_pause(); |
962 | suspend_device_irqs(); | 959 | suspend_device_irqs(); |
963 | mutex_lock(&dpm_list_mtx); | 960 | mutex_lock(&dpm_list_mtx); |
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 98745dd77e8c..81f977510775 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c | |||
@@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context, | |||
40 | 40 | ||
41 | BUG_ON(reg_size != 4); | 41 | BUG_ON(reg_size != 4); |
42 | 42 | ||
43 | if (ctx->clk) { | 43 | if (!IS_ERR(ctx->clk)) { |
44 | ret = clk_enable(ctx->clk); | 44 | ret = clk_enable(ctx->clk); |
45 | if (ret < 0) | 45 | if (ret < 0) |
46 | return ret; | 46 | return ret; |
@@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context, | |||
73 | offset += ctx->val_bytes; | 73 | offset += ctx->val_bytes; |
74 | } | 74 | } |
75 | 75 | ||
76 | if (ctx->clk) | 76 | if (!IS_ERR(ctx->clk)) |
77 | clk_disable(ctx->clk); | 77 | clk_disable(ctx->clk); |
78 | 78 | ||
79 | return 0; | 79 | return 0; |
@@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context, | |||
96 | 96 | ||
97 | BUG_ON(reg_size != 4); | 97 | BUG_ON(reg_size != 4); |
98 | 98 | ||
99 | if (ctx->clk) { | 99 | if (!IS_ERR(ctx->clk)) { |
100 | ret = clk_enable(ctx->clk); | 100 | ret = clk_enable(ctx->clk); |
101 | if (ret < 0) | 101 | if (ret < 0) |
102 | return ret; | 102 | return ret; |
@@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context, | |||
129 | offset += ctx->val_bytes; | 129 | offset += ctx->val_bytes; |
130 | } | 130 | } |
131 | 131 | ||
132 | if (ctx->clk) | 132 | if (!IS_ERR(ctx->clk)) |
133 | clk_disable(ctx->clk); | 133 | clk_disable(ctx->clk); |
134 | 134 | ||
135 | return 0; | 135 | return 0; |
@@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context) | |||
139 | { | 139 | { |
140 | struct regmap_mmio_context *ctx = context; | 140 | struct regmap_mmio_context *ctx = context; |
141 | 141 | ||
142 | if (ctx->clk) { | 142 | if (!IS_ERR(ctx->clk)) { |
143 | clk_unprepare(ctx->clk); | 143 | clk_unprepare(ctx->clk); |
144 | clk_put(ctx->clk); | 144 | clk_put(ctx->clk); |
145 | } | 145 | } |
@@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, | |||
209 | 209 | ||
210 | ctx->regs = regs; | 210 | ctx->regs = regs; |
211 | ctx->val_bytes = config->val_bits / 8; | 211 | ctx->val_bytes = config->val_bits / 8; |
212 | ctx->clk = ERR_PTR(-ENODEV); | ||
212 | 213 | ||
213 | if (clk_id == NULL) | 214 | if (clk_id == NULL) |
214 | return ctx; | 215 | return ctx; |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 9c021d9cace0..c2e002100949 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, | |||
1549 | val + (i * val_bytes), | 1549 | val + (i * val_bytes), |
1550 | val_bytes); | 1550 | val_bytes); |
1551 | if (ret != 0) | 1551 | if (ret != 0) |
1552 | return ret; | 1552 | goto out; |
1553 | } | 1553 | } |
1554 | } else { | 1554 | } else { |
1555 | ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); | 1555 | ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); |
@@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, | |||
1743 | /** | 1743 | /** |
1744 | * regmap_read(): Read a value from a single register | 1744 | * regmap_read(): Read a value from a single register |
1745 | * | 1745 | * |
1746 | * @map: Register map to write to | 1746 | * @map: Register map to read from |
1747 | * @reg: Register to be read from | 1747 | * @reg: Register to be read from |
1748 | * @val: Pointer to store read value | 1748 | * @val: Pointer to store read value |
1749 | * | 1749 | * |
@@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read); | |||
1770 | /** | 1770 | /** |
1771 | * regmap_raw_read(): Read raw data from the device | 1771 | * regmap_raw_read(): Read raw data from the device |
1772 | * | 1772 | * |
1773 | * @map: Register map to write to | 1773 | * @map: Register map to read from |
1774 | * @reg: First register to be read from | 1774 | * @reg: First register to be read from |
1775 | * @val: Pointer to store read value | 1775 | * @val: Pointer to store read value |
1776 | * @val_len: Size of data to read | 1776 | * @val_len: Size of data to read |
@@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read); | |||
1882 | /** | 1882 | /** |
1883 | * regmap_bulk_read(): Read multiple registers from the device | 1883 | * regmap_bulk_read(): Read multiple registers from the device |
1884 | * | 1884 | * |
1885 | * @map: Register map to write to | 1885 | * @map: Register map to read from |
1886 | * @reg: First register to be read from | 1886 | * @reg: First register to be read from |
1887 | * @val: Pointer to store read value, in native register size for device | 1887 | * @val: Pointer to store read value, in native register size for device |
1888 | * @val_count: Number of registers to read | 1888 | * @val_count: Number of registers to read |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ea192ec029c4..83a598ebb65a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | |||
2 | #include <linux/moduleparam.h> | 3 | #include <linux/moduleparam.h> |
3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
4 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
@@ -65,7 +66,7 @@ enum { | |||
65 | NULL_Q_MQ = 2, | 66 | NULL_Q_MQ = 2, |
66 | }; | 67 | }; |
67 | 68 | ||
68 | static int submit_queues = 1; | 69 | static int submit_queues; |
69 | module_param(submit_queues, int, S_IRUGO); | 70 | module_param(submit_queues, int, S_IRUGO); |
70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | 71 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
71 | 72 | ||
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64; | |||
101 | module_param(hw_queue_depth, int, S_IRUGO); | 102 | module_param(hw_queue_depth, int, S_IRUGO); |
102 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | 103 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); |
103 | 104 | ||
104 | static bool use_per_node_hctx = true; | 105 | static bool use_per_node_hctx = false; |
105 | module_param(use_per_node_hctx, bool, S_IRUGO); | 106 | module_param(use_per_node_hctx, bool, S_IRUGO); |
106 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); | 107 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
107 | 108 | ||
108 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | 109 | static void put_tag(struct nullb_queue *nq, unsigned int tag) |
109 | { | 110 | { |
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
346 | 347 | ||
347 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | 348 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) |
348 | { | 349 | { |
349 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, | 350 | int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); |
350 | hctx_index); | 351 | int tip = (reg->nr_hw_queues % nr_online_nodes); |
352 | int node = 0, i, n; | ||
353 | |||
354 | /* | ||
355 | * Split submit queues evenly wrt to the number of nodes. If uneven, | ||
356 | * fill the first buckets with one extra, until the rest is filled with | ||
357 | * no extra. | ||
358 | */ | ||
359 | for (i = 0, n = 1; i < hctx_index; i++, n++) { | ||
360 | if (n % b_size == 0) { | ||
361 | n = 0; | ||
362 | node++; | ||
363 | |||
364 | tip--; | ||
365 | if (!tip) | ||
366 | b_size = reg->nr_hw_queues / nr_online_nodes; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * A node might not be online, therefore map the relative node id to the | ||
372 | * real node id. | ||
373 | */ | ||
374 | for_each_online_node(n) { | ||
375 | if (!node) | ||
376 | break; | ||
377 | node--; | ||
378 | } | ||
379 | |||
380 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | ||
351 | } | 381 | } |
352 | 382 | ||
353 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | 383 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) |
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | |||
355 | kfree(hctx); | 385 | kfree(hctx); |
356 | } | 386 | } |
357 | 387 | ||
388 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) | ||
389 | { | ||
390 | BUG_ON(!nullb); | ||
391 | BUG_ON(!nq); | ||
392 | |||
393 | init_waitqueue_head(&nq->wait); | ||
394 | nq->queue_depth = nullb->queue_depth; | ||
395 | } | ||
396 | |||
358 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | 397 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
359 | unsigned int index) | 398 | unsigned int index) |
360 | { | 399 | { |
361 | struct nullb *nullb = data; | 400 | struct nullb *nullb = data; |
362 | struct nullb_queue *nq = &nullb->queues[index]; | 401 | struct nullb_queue *nq = &nullb->queues[index]; |
363 | 402 | ||
364 | init_waitqueue_head(&nq->wait); | ||
365 | nq->queue_depth = nullb->queue_depth; | ||
366 | nullb->nr_queues++; | ||
367 | hctx->driver_data = nq; | 403 | hctx->driver_data = nq; |
404 | null_init_queue(nullb, nq); | ||
405 | nullb->nr_queues++; | ||
368 | 406 | ||
369 | return 0; | 407 | return 0; |
370 | } | 408 | } |
@@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb) | |||
387 | list_del_init(&nullb->list); | 425 | list_del_init(&nullb->list); |
388 | 426 | ||
389 | del_gendisk(nullb->disk); | 427 | del_gendisk(nullb->disk); |
390 | if (queue_mode == NULL_Q_MQ) | 428 | blk_cleanup_queue(nullb->q); |
391 | blk_mq_free_queue(nullb->q); | ||
392 | else | ||
393 | blk_cleanup_queue(nullb->q); | ||
394 | put_disk(nullb->disk); | 429 | put_disk(nullb->disk); |
395 | kfree(nullb); | 430 | kfree(nullb); |
396 | } | 431 | } |
@@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq) | |||
417 | 452 | ||
418 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | 453 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); |
419 | if (!nq->cmds) | 454 | if (!nq->cmds) |
420 | return 1; | 455 | return -ENOMEM; |
421 | 456 | ||
422 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | 457 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; |
423 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | 458 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); |
424 | if (!nq->tag_map) { | 459 | if (!nq->tag_map) { |
425 | kfree(nq->cmds); | 460 | kfree(nq->cmds); |
426 | return 1; | 461 | return -ENOMEM; |
427 | } | 462 | } |
428 | 463 | ||
429 | for (i = 0; i < nq->queue_depth; i++) { | 464 | for (i = 0; i < nq->queue_depth; i++) { |
@@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb) | |||
454 | 489 | ||
455 | static int setup_queues(struct nullb *nullb) | 490 | static int setup_queues(struct nullb *nullb) |
456 | { | 491 | { |
457 | struct nullb_queue *nq; | 492 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
458 | int i; | 493 | GFP_KERNEL); |
459 | |||
460 | nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); | ||
461 | if (!nullb->queues) | 494 | if (!nullb->queues) |
462 | return 1; | 495 | return -ENOMEM; |
463 | 496 | ||
464 | nullb->nr_queues = 0; | 497 | nullb->nr_queues = 0; |
465 | nullb->queue_depth = hw_queue_depth; | 498 | nullb->queue_depth = hw_queue_depth; |
466 | 499 | ||
467 | if (queue_mode == NULL_Q_MQ) | 500 | return 0; |
468 | return 0; | 501 | } |
502 | |||
503 | static int init_driver_queues(struct nullb *nullb) | ||
504 | { | ||
505 | struct nullb_queue *nq; | ||
506 | int i, ret = 0; | ||
469 | 507 | ||
470 | for (i = 0; i < submit_queues; i++) { | 508 | for (i = 0; i < submit_queues; i++) { |
471 | nq = &nullb->queues[i]; | 509 | nq = &nullb->queues[i]; |
472 | init_waitqueue_head(&nq->wait); | 510 | |
473 | nq->queue_depth = hw_queue_depth; | 511 | null_init_queue(nullb, nq); |
474 | if (setup_commands(nq)) | 512 | |
475 | break; | 513 | ret = setup_commands(nq); |
514 | if (ret) | ||
515 | goto err_queue; | ||
476 | nullb->nr_queues++; | 516 | nullb->nr_queues++; |
477 | } | 517 | } |
478 | 518 | ||
479 | if (i == submit_queues) | 519 | return 0; |
480 | return 0; | 520 | err_queue: |
481 | |||
482 | cleanup_queues(nullb); | 521 | cleanup_queues(nullb); |
483 | return 1; | 522 | return ret; |
484 | } | 523 | } |
485 | 524 | ||
486 | static int null_add_dev(void) | 525 | static int null_add_dev(void) |
@@ -495,34 +534,36 @@ static int null_add_dev(void) | |||
495 | 534 | ||
496 | spin_lock_init(&nullb->lock); | 535 | spin_lock_init(&nullb->lock); |
497 | 536 | ||
537 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) | ||
538 | submit_queues = nr_online_nodes; | ||
539 | |||
498 | if (setup_queues(nullb)) | 540 | if (setup_queues(nullb)) |
499 | goto err; | 541 | goto err; |
500 | 542 | ||
501 | if (queue_mode == NULL_Q_MQ) { | 543 | if (queue_mode == NULL_Q_MQ) { |
502 | null_mq_reg.numa_node = home_node; | 544 | null_mq_reg.numa_node = home_node; |
503 | null_mq_reg.queue_depth = hw_queue_depth; | 545 | null_mq_reg.queue_depth = hw_queue_depth; |
546 | null_mq_reg.nr_hw_queues = submit_queues; | ||
504 | 547 | ||
505 | if (use_per_node_hctx) { | 548 | if (use_per_node_hctx) { |
506 | null_mq_reg.ops->alloc_hctx = null_alloc_hctx; | 549 | null_mq_reg.ops->alloc_hctx = null_alloc_hctx; |
507 | null_mq_reg.ops->free_hctx = null_free_hctx; | 550 | null_mq_reg.ops->free_hctx = null_free_hctx; |
508 | |||
509 | null_mq_reg.nr_hw_queues = nr_online_nodes; | ||
510 | } else { | 551 | } else { |
511 | null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; | 552 | null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; |
512 | null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; | 553 | null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; |
513 | |||
514 | null_mq_reg.nr_hw_queues = submit_queues; | ||
515 | } | 554 | } |
516 | 555 | ||
517 | nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); | 556 | nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); |
518 | } else if (queue_mode == NULL_Q_BIO) { | 557 | } else if (queue_mode == NULL_Q_BIO) { |
519 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 558 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
520 | blk_queue_make_request(nullb->q, null_queue_bio); | 559 | blk_queue_make_request(nullb->q, null_queue_bio); |
560 | init_driver_queues(nullb); | ||
521 | } else { | 561 | } else { |
522 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 562 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
523 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 563 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
524 | if (nullb->q) | 564 | if (nullb->q) |
525 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | 565 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
566 | init_driver_queues(nullb); | ||
526 | } | 567 | } |
527 | 568 | ||
528 | if (!nullb->q) | 569 | if (!nullb->q) |
@@ -534,10 +575,7 @@ static int null_add_dev(void) | |||
534 | disk = nullb->disk = alloc_disk_node(1, home_node); | 575 | disk = nullb->disk = alloc_disk_node(1, home_node); |
535 | if (!disk) { | 576 | if (!disk) { |
536 | queue_fail: | 577 | queue_fail: |
537 | if (queue_mode == NULL_Q_MQ) | 578 | blk_cleanup_queue(nullb->q); |
538 | blk_mq_free_queue(nullb->q); | ||
539 | else | ||
540 | blk_cleanup_queue(nullb->q); | ||
541 | cleanup_queues(nullb); | 579 | cleanup_queues(nullb); |
542 | err: | 580 | err: |
543 | kfree(nullb); | 581 | kfree(nullb); |
@@ -579,7 +617,13 @@ static int __init null_init(void) | |||
579 | } | 617 | } |
580 | #endif | 618 | #endif |
581 | 619 | ||
582 | if (submit_queues > nr_cpu_ids) | 620 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
621 | if (submit_queues < nr_online_nodes) { | ||
622 | pr_warn("null_blk: submit_queues param is set to %u.", | ||
623 | nr_online_nodes); | ||
624 | submit_queues = nr_online_nodes; | ||
625 | } | ||
626 | } else if (submit_queues > nr_cpu_ids) | ||
583 | submit_queues = nr_cpu_ids; | 627 | submit_queues = nr_cpu_ids; |
584 | else if (!submit_queues) | 628 | else if (!submit_queues) |
585 | submit_queues = 1; | 629 | submit_queues = 1; |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 9199c93be926..eb6e1e0e8db2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) | |||
5269 | } | 5269 | } |
5270 | } | 5270 | } |
5271 | 5271 | ||
5272 | const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | 5272 | static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) |
5273 | { | 5273 | { |
5274 | switch (state) { | 5274 | switch (state) { |
5275 | case SKD_MSG_STATE_IDLE: | 5275 | case SKD_MSG_STATE_IDLE: |
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | |||
5281 | } | 5281 | } |
5282 | } | 5282 | } |
5283 | 5283 | ||
5284 | const char *skd_skreq_state_to_str(enum skd_req_state state) | 5284 | static const char *skd_skreq_state_to_str(enum skd_req_state state) |
5285 | { | 5285 | { |
5286 | switch (state) { | 5286 | switch (state) { |
5287 | case SKD_REQ_STATE_IDLE: | 5287 | case SKD_REQ_STATE_IDLE: |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 6bfc1bb318f6..dceb85f8d9a8 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = { | |||
87 | { USB_DEVICE(0x0CF3, 0xE004) }, | 87 | { USB_DEVICE(0x0CF3, 0xE004) }, |
88 | { USB_DEVICE(0x0CF3, 0xE005) }, | 88 | { USB_DEVICE(0x0CF3, 0xE005) }, |
89 | { USB_DEVICE(0x0930, 0x0219) }, | 89 | { USB_DEVICE(0x0930, 0x0219) }, |
90 | { USB_DEVICE(0x0930, 0x0220) }, | ||
90 | { USB_DEVICE(0x0489, 0xe057) }, | 91 | { USB_DEVICE(0x0489, 0xe057) }, |
91 | { USB_DEVICE(0x13d3, 0x3393) }, | 92 | { USB_DEVICE(0x13d3, 0x3393) }, |
92 | { USB_DEVICE(0x0489, 0xe04e) }, | 93 | { USB_DEVICE(0x0489, 0xe04e) }, |
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { | |||
129 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 130 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
130 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | 131 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, |
131 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 132 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
133 | { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, | ||
132 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 134 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
133 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 135 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
134 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, | 136 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c0ff34f2d2df..3980fd18f6ea 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
154 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 154 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
155 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | 155 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, |
156 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 156 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
157 | { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, | ||
157 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 158 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
158 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 159 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
159 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, | 160 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 40cc0cf2ded6..e6939e13e338 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = { | |||
664 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), | 664 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), |
665 | }, | 665 | }, |
666 | }, | 666 | }, |
667 | { | ||
668 | .ident = "Dell XPS421", | ||
669 | .matches = { | ||
670 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
671 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), | ||
672 | }, | ||
673 | }, | ||
667 | { } | 674 | { } |
668 | }; | 675 | }; |
669 | 676 | ||
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 8e562dc65601..e1f3337a0cf9 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c | |||
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM"; | |||
27 | static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, | 27 | static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, |
28 | void **return_value) | 28 | void **return_value) |
29 | { | 29 | { |
30 | acpi_status status; | 30 | acpi_status status = AE_OK; |
31 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 31 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
32 | status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 32 | |
33 | if (strstr(buffer.pointer, context) != NULL) { | 33 | if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) { |
34 | *return_value = handle; | 34 | if (strstr(buffer.pointer, context) != NULL) { |
35 | *return_value = handle; | ||
36 | status = AE_CTRL_TERMINATE; | ||
37 | } | ||
35 | kfree(buffer.pointer); | 38 | kfree(buffer.pointer); |
36 | return AE_CTRL_TERMINATE; | ||
37 | } | 39 | } |
38 | return AE_OK; | 40 | |
41 | return status; | ||
39 | } | 42 | } |
40 | 43 | ||
41 | static inline void ppi_assign_params(union acpi_object params[4], | 44 | static inline void ppi_assign_params(union acpi_object params[4], |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 8d3009e44fba..5543b7df8e16 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table, | |||
87 | return 0; | 87 | return 0; |
88 | } | 88 | } |
89 | 89 | ||
90 | static unsigned int _get_val(struct clk_divider *divider, u8 div) | 90 | static unsigned int _get_val(struct clk_divider *divider, unsigned int div) |
91 | { | 91 | { |
92 | if (divider->flags & CLK_DIVIDER_ONE_BASED) | 92 | if (divider->flags & CLK_DIVIDER_ONE_BASED) |
93 | return div; | 93 | return div; |
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 7be41e676a64..00a3abe103a5 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c | |||
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw) | |||
60 | struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); | 60 | struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); |
61 | int ret; | 61 | int ret; |
62 | 62 | ||
63 | ret = regmap_update_bits(s2mps11->iodev->regmap, | 63 | ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, |
64 | S2MPS11_REG_RTC_CTRL, | 64 | S2MPS11_REG_RTC_CTRL, |
65 | s2mps11->mask, s2mps11->mask); | 65 | s2mps11->mask, s2mps11->mask); |
66 | if (!ret) | 66 | if (!ret) |
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw) | |||
74 | struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); | 74 | struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); |
75 | int ret; | 75 | int ret; |
76 | 76 | ||
77 | ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, | 77 | ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, |
78 | s2mps11->mask, ~s2mps11->mask); | 78 | s2mps11->mask, ~s2mps11->mask); |
79 | 79 | ||
80 | if (!ret) | 80 | if (!ret) |
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) | |||
174 | s2mps11_clk->hw.init = &s2mps11_clks_init[i]; | 174 | s2mps11_clk->hw.init = &s2mps11_clks_init[i]; |
175 | s2mps11_clk->mask = 1 << i; | 175 | s2mps11_clk->mask = 1 << i; |
176 | 176 | ||
177 | ret = regmap_read(s2mps11_clk->iodev->regmap, | 177 | ret = regmap_read(s2mps11_clk->iodev->regmap_pmic, |
178 | S2MPS11_REG_RTC_CTRL, &val); | 178 | S2MPS11_REG_RTC_CTRL, &val); |
179 | if (ret < 0) | 179 | if (ret < 0) |
180 | goto err_reg; | 180 | goto err_reg; |
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 39b40aaede2b..68e515d093d8 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c | |||
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data; | |||
26 | #define ASS_CLK_DIV 0x4 | 26 | #define ASS_CLK_DIV 0x4 |
27 | #define ASS_CLK_GATE 0x8 | 27 | #define ASS_CLK_GATE 0x8 |
28 | 28 | ||
29 | /* list of all parent clock list */ | ||
30 | static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; | ||
31 | static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; | ||
32 | |||
33 | #ifdef CONFIG_PM_SLEEP | ||
29 | static unsigned long reg_save[][2] = { | 34 | static unsigned long reg_save[][2] = { |
30 | {ASS_CLK_SRC, 0}, | 35 | {ASS_CLK_SRC, 0}, |
31 | {ASS_CLK_DIV, 0}, | 36 | {ASS_CLK_DIV, 0}, |
32 | {ASS_CLK_GATE, 0}, | 37 | {ASS_CLK_GATE, 0}, |
33 | }; | 38 | }; |
34 | 39 | ||
35 | /* list of all parent clock list */ | ||
36 | static const char *mout_audss_p[] = { "fin_pll", "fout_epll" }; | ||
37 | static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" }; | ||
38 | |||
39 | #ifdef CONFIG_PM_SLEEP | ||
40 | static int exynos_audss_clk_suspend(void) | 40 | static int exynos_audss_clk_suspend(void) |
41 | { | 41 | { |
42 | int i; | 42 | int i; |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index ad5ff50c5f28..1a7c1b929c69 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #define SRC_TOP1 0xc214 | 39 | #define SRC_TOP1 0xc214 |
40 | #define SRC_CAM 0xc220 | 40 | #define SRC_CAM 0xc220 |
41 | #define SRC_TV 0xc224 | 41 | #define SRC_TV 0xc224 |
42 | #define SRC_MFC 0xcc28 | 42 | #define SRC_MFC 0xc228 |
43 | #define SRC_G3D 0xc22c | 43 | #define SRC_G3D 0xc22c |
44 | #define E4210_SRC_IMAGE 0xc230 | 44 | #define E4210_SRC_IMAGE 0xc230 |
45 | #define SRC_LCD0 0xc234 | 45 | #define SRC_LCD0 0xc234 |
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index adf32343c9f9..e52359cf9b6f 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #define MPLL_LOCK 0x4000 | 25 | #define MPLL_LOCK 0x4000 |
26 | #define MPLL_CON0 0x4100 | 26 | #define MPLL_CON0 0x4100 |
27 | #define SRC_CORE1 0x4204 | 27 | #define SRC_CORE1 0x4204 |
28 | #define GATE_IP_ACP 0x8800 | ||
28 | #define CPLL_LOCK 0x10020 | 29 | #define CPLL_LOCK 0x10020 |
29 | #define EPLL_LOCK 0x10030 | 30 | #define EPLL_LOCK 0x10030 |
30 | #define VPLL_LOCK 0x10040 | 31 | #define VPLL_LOCK 0x10040 |
@@ -75,7 +76,6 @@ | |||
75 | #define SRC_CDREX 0x20200 | 76 | #define SRC_CDREX 0x20200 |
76 | #define PLL_DIV2_SEL 0x20a24 | 77 | #define PLL_DIV2_SEL 0x20a24 |
77 | #define GATE_IP_DISP1 0x10928 | 78 | #define GATE_IP_DISP1 0x10928 |
78 | #define GATE_IP_ACP 0x10000 | ||
79 | 79 | ||
80 | /* list of PLLs to be registered */ | 80 | /* list of PLLs to be registered */ |
81 | enum exynos5250_plls { | 81 | enum exynos5250_plls { |
@@ -120,7 +120,8 @@ enum exynos5250_clks { | |||
120 | spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, | 120 | spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, |
121 | hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, | 121 | hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, |
122 | tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, | 122 | tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, |
123 | wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, | 123 | wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0, |
124 | smmu_mdma0, | ||
124 | 125 | ||
125 | /* mux clocks */ | 126 | /* mux clocks */ |
126 | mout_hdmi = 1024, | 127 | mout_hdmi = 1024, |
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { | |||
354 | GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), | 355 | GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), |
355 | GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), | 356 | GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), |
356 | GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), | 357 | GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), |
357 | GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), | 358 | GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0), |
358 | GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), | 359 | GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0), |
359 | GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), | 360 | GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), |
360 | GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), | 361 | GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), |
361 | GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), | 362 | GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), |
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { | |||
406 | GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), | 407 | GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), |
407 | GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), | 408 | GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), |
408 | GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), | 409 | GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), |
409 | GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), | 410 | GATE(sysreg, "sysreg", "aclk66", |
411 | GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0), | ||
410 | GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), | 412 | GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), |
411 | GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), | 413 | GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), |
412 | GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), | 414 | GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), |
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { | |||
492 | GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), | 494 | GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), |
493 | GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), | 495 | GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), |
494 | GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), | 496 | GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), |
497 | GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0), | ||
498 | GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0), | ||
495 | }; | 499 | }; |
496 | 500 | ||
497 | static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { | 501 | static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5c07a56962db..634c4d6dd45a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK | |||
75 | config CLKSRC_EFM32 | 75 | config CLKSRC_EFM32 |
76 | bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 | 76 | bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 |
77 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) | 77 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) |
78 | select CLKSRC_MMIO | ||
78 | default ARCH_EFM32 | 79 | default ARCH_EFM32 |
79 | help | 80 | help |
80 | Support to use the timers of EFM32 SoCs as clock source and clock | 81 | Support to use the timers of EFM32 SoCs as clock source and clock |
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 35639cf4e5a2..b9ddd9e3a2f5 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c | |||
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void) | |||
35 | 35 | ||
36 | init_func = match->data; | 36 | init_func = match->data; |
37 | init_func(np); | 37 | init_func(np); |
38 | of_node_put(np); | ||
39 | } | 38 | } |
40 | } | 39 | } |
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 45ba8aecc729..2a2ea2717f3a 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c | |||
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer) | |||
108 | 108 | ||
109 | static u64 read_sched_clock(void) | 109 | static u64 read_sched_clock(void) |
110 | { | 110 | { |
111 | return __raw_readl(sched_io_base); | 111 | return ~__raw_readl(sched_io_base); |
112 | } | 112 | } |
113 | 113 | ||
114 | static const struct of_device_id sptimer_ids[] __initconst = { | 114 | static const struct of_device_id sptimer_ids[] __initconst = { |
115 | { .compatible = "picochip,pc3x2-rtc" }, | 115 | { .compatible = "picochip,pc3x2-rtc" }, |
116 | { .compatible = "snps,dw-apb-timer-sp" }, | ||
117 | { /* Sentinel */ }, | 116 | { /* Sentinel */ }, |
118 | }; | 117 | }; |
119 | 118 | ||
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer) | |||
151 | num_called++; | 150 | num_called++; |
152 | } | 151 | } |
153 | CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); | 152 | CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); |
154 | CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); | 153 | CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); |
154 | CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); | ||
155 | CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); | ||
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 2fb4695a28d8..a4f6119aafd8 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c | |||
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node) | |||
179 | writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), | 179 | writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), |
180 | timer_base + TIMER_CTL_REG(0)); | 180 | timer_base + TIMER_CTL_REG(0)); |
181 | 181 | ||
182 | /* Make sure timer is stopped before playing with interrupts */ | ||
183 | sun4i_clkevt_time_stop(0); | ||
184 | |||
182 | ret = setup_irq(irq, &sun4i_timer_irq); | 185 | ret = setup_irq(irq, &sun4i_timer_irq); |
183 | if (ret) | 186 | if (ret) |
184 | pr_warn("failed to setup irq %d\n", irq); | 187 | pr_warn("failed to setup irq %d\n", irq); |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index d8e47e502785..4e7f6802e840 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | 256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; |
257 | 257 | ||
258 | /* | 258 | /* |
259 | * Set scale and timer for sched_clock. | ||
260 | */ | ||
261 | sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); | ||
262 | |||
263 | /* | ||
264 | * Setup free-running clocksource timer (interrupts | 259 | * Setup free-running clocksource timer (interrupts |
265 | * disabled). | 260 | * disabled). |
266 | */ | 261 | */ |
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
270 | timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | | 265 | timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | |
271 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | 266 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); |
272 | 267 | ||
268 | /* | ||
269 | * Set scale and timer for sched_clock. | ||
270 | */ | ||
271 | sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); | ||
272 | |||
273 | clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, | 273 | clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, |
274 | "armada_370_xp_clocksource", | 274 | "armada_370_xp_clocksource", |
275 | timer_clk, 300, 32, clocksource_mmio_readl_down); | 275 | timer_clk, 300, 32, clocksource_mmio_readl_down); |
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c index 856ad80418ae..7c03dd84f66a 100644 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ b/drivers/cpufreq/at32ap-cpufreq.c | |||
@@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) | |||
58 | return 0; | 58 | return 0; |
59 | } | 59 | } |
60 | 60 | ||
61 | static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) | 61 | static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) |
62 | { | 62 | { |
63 | unsigned int frequency, rate, min_freq; | 63 | unsigned int frequency, rate, min_freq; |
64 | int retval, steps, i; | 64 | int retval, steps, i; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 81e9d4412db8..8d19f7c06010 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/suspend.h> | ||
30 | #include <linux/syscore_ops.h> | 29 | #include <linux/syscore_ops.h> |
31 | #include <linux/tick.h> | 30 | #include <linux/tick.h> |
32 | #include <trace/events/power.h> | 31 | #include <trace/events/power.h> |
@@ -48,9 +47,6 @@ static LIST_HEAD(cpufreq_policy_list); | |||
48 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); | 47 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); |
49 | #endif | 48 | #endif |
50 | 49 | ||
51 | /* Flag to suspend/resume CPUFreq governors */ | ||
52 | static bool cpufreq_suspended; | ||
53 | |||
54 | static inline bool has_target(void) | 50 | static inline bool has_target(void) |
55 | { | 51 | { |
56 | return cpufreq_driver->target_index || cpufreq_driver->target; | 52 | return cpufreq_driver->target_index || cpufreq_driver->target; |
@@ -832,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
832 | int ret = 0; | 828 | int ret = 0; |
833 | 829 | ||
834 | memcpy(&new_policy, policy, sizeof(*policy)); | 830 | memcpy(&new_policy, policy, sizeof(*policy)); |
831 | |||
832 | /* Use the default policy if its valid. */ | ||
833 | if (cpufreq_driver->setpolicy) | ||
834 | cpufreq_parse_governor(policy->governor->name, | ||
835 | &new_policy.policy, NULL); | ||
836 | |||
835 | /* assure that the starting sequence is run in cpufreq_set_policy */ | 837 | /* assure that the starting sequence is run in cpufreq_set_policy */ |
836 | policy->governor = NULL; | 838 | policy->governor = NULL; |
837 | 839 | ||
838 | /* set default policy */ | 840 | /* set default policy */ |
839 | ret = cpufreq_set_policy(policy, &new_policy); | 841 | ret = cpufreq_set_policy(policy, &new_policy); |
840 | policy->user_policy.policy = policy->policy; | ||
841 | policy->user_policy.governor = policy->governor; | ||
842 | |||
843 | if (ret) { | 842 | if (ret) { |
844 | pr_debug("setting policy failed\n"); | 843 | pr_debug("setting policy failed\n"); |
845 | if (cpufreq_driver->exit) | 844 | if (cpufreq_driver->exit) |
@@ -849,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
849 | 848 | ||
850 | #ifdef CONFIG_HOTPLUG_CPU | 849 | #ifdef CONFIG_HOTPLUG_CPU |
851 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | 850 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, |
852 | unsigned int cpu, struct device *dev, | 851 | unsigned int cpu, struct device *dev) |
853 | bool frozen) | ||
854 | { | 852 | { |
855 | int ret = 0; | 853 | int ret = 0; |
856 | unsigned long flags; | 854 | unsigned long flags; |
@@ -881,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
881 | } | 879 | } |
882 | } | 880 | } |
883 | 881 | ||
884 | /* Don't touch sysfs links during light-weight init */ | 882 | return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); |
885 | if (!frozen) | ||
886 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||
887 | |||
888 | return ret; | ||
889 | } | 883 | } |
890 | #endif | 884 | #endif |
891 | 885 | ||
@@ -930,6 +924,27 @@ err_free_policy: | |||
930 | return NULL; | 924 | return NULL; |
931 | } | 925 | } |
932 | 926 | ||
927 | static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) | ||
928 | { | ||
929 | struct kobject *kobj; | ||
930 | struct completion *cmp; | ||
931 | |||
932 | down_read(&policy->rwsem); | ||
933 | kobj = &policy->kobj; | ||
934 | cmp = &policy->kobj_unregister; | ||
935 | up_read(&policy->rwsem); | ||
936 | kobject_put(kobj); | ||
937 | |||
938 | /* | ||
939 | * We need to make sure that the underlying kobj is | ||
940 | * actually not referenced anymore by anybody before we | ||
941 | * proceed with unloading. | ||
942 | */ | ||
943 | pr_debug("waiting for dropping of refcount\n"); | ||
944 | wait_for_completion(cmp); | ||
945 | pr_debug("wait complete\n"); | ||
946 | } | ||
947 | |||
933 | static void cpufreq_policy_free(struct cpufreq_policy *policy) | 948 | static void cpufreq_policy_free(struct cpufreq_policy *policy) |
934 | { | 949 | { |
935 | free_cpumask_var(policy->related_cpus); | 950 | free_cpumask_var(policy->related_cpus); |
@@ -990,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
990 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { | 1005 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { |
991 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { | 1006 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { |
992 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1007 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
993 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); | 1008 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); |
994 | up_read(&cpufreq_rwsem); | 1009 | up_read(&cpufreq_rwsem); |
995 | return ret; | 1010 | return ret; |
996 | } | 1011 | } |
@@ -998,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
998 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1013 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
999 | #endif | 1014 | #endif |
1000 | 1015 | ||
1001 | if (frozen) | 1016 | /* |
1002 | /* Restore the saved policy when doing light-weight init */ | 1017 | * Restore the saved policy when doing light-weight init and fall back |
1003 | policy = cpufreq_policy_restore(cpu); | 1018 | * to the full init if that fails. |
1004 | else | 1019 | */ |
1020 | policy = frozen ? cpufreq_policy_restore(cpu) : NULL; | ||
1021 | if (!policy) { | ||
1022 | frozen = false; | ||
1005 | policy = cpufreq_policy_alloc(); | 1023 | policy = cpufreq_policy_alloc(); |
1006 | 1024 | if (!policy) | |
1007 | if (!policy) | 1025 | goto nomem_out; |
1008 | goto nomem_out; | 1026 | } |
1009 | |||
1010 | 1027 | ||
1011 | /* | 1028 | /* |
1012 | * In the resume path, since we restore a saved policy, the assignment | 1029 | * In the resume path, since we restore a saved policy, the assignment |
@@ -1051,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1051 | */ | 1068 | */ |
1052 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | 1069 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); |
1053 | 1070 | ||
1054 | policy->user_policy.min = policy->min; | 1071 | if (!frozen) { |
1055 | policy->user_policy.max = policy->max; | 1072 | policy->user_policy.min = policy->min; |
1073 | policy->user_policy.max = policy->max; | ||
1074 | } | ||
1056 | 1075 | ||
1057 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1076 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1058 | CPUFREQ_START, policy); | 1077 | CPUFREQ_START, policy); |
@@ -1083,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1083 | 1102 | ||
1084 | cpufreq_init_policy(policy); | 1103 | cpufreq_init_policy(policy); |
1085 | 1104 | ||
1105 | if (!frozen) { | ||
1106 | policy->user_policy.policy = policy->policy; | ||
1107 | policy->user_policy.governor = policy->governor; | ||
1108 | } | ||
1109 | |||
1086 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 1110 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
1087 | up_read(&cpufreq_rwsem); | 1111 | up_read(&cpufreq_rwsem); |
1088 | 1112 | ||
@@ -1100,7 +1124,13 @@ err_get_freq: | |||
1100 | if (cpufreq_driver->exit) | 1124 | if (cpufreq_driver->exit) |
1101 | cpufreq_driver->exit(policy); | 1125 | cpufreq_driver->exit(policy); |
1102 | err_set_policy_cpu: | 1126 | err_set_policy_cpu: |
1127 | if (frozen) { | ||
1128 | /* Do not leave stale fallback data behind. */ | ||
1129 | per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; | ||
1130 | cpufreq_policy_put_kobj(policy); | ||
1131 | } | ||
1103 | cpufreq_policy_free(policy); | 1132 | cpufreq_policy_free(policy); |
1133 | |||
1104 | nomem_out: | 1134 | nomem_out: |
1105 | up_read(&cpufreq_rwsem); | 1135 | up_read(&cpufreq_rwsem); |
1106 | 1136 | ||
@@ -1122,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
1122 | } | 1152 | } |
1123 | 1153 | ||
1124 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | 1154 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, |
1125 | unsigned int old_cpu, bool frozen) | 1155 | unsigned int old_cpu) |
1126 | { | 1156 | { |
1127 | struct device *cpu_dev; | 1157 | struct device *cpu_dev; |
1128 | int ret; | 1158 | int ret; |
@@ -1130,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
1130 | /* first sibling now owns the new sysfs dir */ | 1160 | /* first sibling now owns the new sysfs dir */ |
1131 | cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); | 1161 | cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); |
1132 | 1162 | ||
1133 | /* Don't touch sysfs files during light-weight tear-down */ | ||
1134 | if (frozen) | ||
1135 | return cpu_dev->id; | ||
1136 | |||
1137 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | 1163 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); |
1138 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); | 1164 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); |
1139 | if (ret) { | 1165 | if (ret) { |
@@ -1200,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1200 | if (!frozen) | 1226 | if (!frozen) |
1201 | sysfs_remove_link(&dev->kobj, "cpufreq"); | 1227 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1202 | } else if (cpus > 1) { | 1228 | } else if (cpus > 1) { |
1203 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); | 1229 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); |
1204 | if (new_cpu >= 0) { | 1230 | if (new_cpu >= 0) { |
1205 | update_policy_cpu(policy, new_cpu); | 1231 | update_policy_cpu(policy, new_cpu); |
1206 | 1232 | ||
@@ -1222,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1222 | int ret; | 1248 | int ret; |
1223 | unsigned long flags; | 1249 | unsigned long flags; |
1224 | struct cpufreq_policy *policy; | 1250 | struct cpufreq_policy *policy; |
1225 | struct kobject *kobj; | ||
1226 | struct completion *cmp; | ||
1227 | 1251 | ||
1228 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 1252 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
1229 | policy = per_cpu(cpufreq_cpu_data, cpu); | 1253 | policy = per_cpu(cpufreq_cpu_data, cpu); |
@@ -1253,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1253 | } | 1277 | } |
1254 | } | 1278 | } |
1255 | 1279 | ||
1256 | if (!frozen) { | 1280 | if (!frozen) |
1257 | down_read(&policy->rwsem); | 1281 | cpufreq_policy_put_kobj(policy); |
1258 | kobj = &policy->kobj; | ||
1259 | cmp = &policy->kobj_unregister; | ||
1260 | up_read(&policy->rwsem); | ||
1261 | kobject_put(kobj); | ||
1262 | |||
1263 | /* | ||
1264 | * We need to make sure that the underlying kobj is | ||
1265 | * actually not referenced anymore by anybody before we | ||
1266 | * proceed with unloading. | ||
1267 | */ | ||
1268 | pr_debug("waiting for dropping of refcount\n"); | ||
1269 | wait_for_completion(cmp); | ||
1270 | pr_debug("wait complete\n"); | ||
1271 | } | ||
1272 | 1282 | ||
1273 | /* | 1283 | /* |
1274 | * Perform the ->exit() even during light-weight tear-down, | 1284 | * Perform the ->exit() even during light-weight tear-down, |
@@ -1466,41 +1476,6 @@ static struct subsys_interface cpufreq_interface = { | |||
1466 | .remove_dev = cpufreq_remove_dev, | 1476 | .remove_dev = cpufreq_remove_dev, |
1467 | }; | 1477 | }; |
1468 | 1478 | ||
1469 | void cpufreq_suspend(void) | ||
1470 | { | ||
1471 | struct cpufreq_policy *policy; | ||
1472 | |||
1473 | if (!has_target()) | ||
1474 | return; | ||
1475 | |||
1476 | pr_debug("%s: Suspending Governors\n", __func__); | ||
1477 | |||
1478 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) | ||
1479 | if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) | ||
1480 | pr_err("%s: Failed to stop governor for policy: %p\n", | ||
1481 | __func__, policy); | ||
1482 | |||
1483 | cpufreq_suspended = true; | ||
1484 | } | ||
1485 | |||
1486 | void cpufreq_resume(void) | ||
1487 | { | ||
1488 | struct cpufreq_policy *policy; | ||
1489 | |||
1490 | if (!has_target()) | ||
1491 | return; | ||
1492 | |||
1493 | pr_debug("%s: Resuming Governors\n", __func__); | ||
1494 | |||
1495 | cpufreq_suspended = false; | ||
1496 | |||
1497 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) | ||
1498 | if (__cpufreq_governor(policy, CPUFREQ_GOV_START) | ||
1499 | || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) | ||
1500 | pr_err("%s: Failed to start governor for policy: %p\n", | ||
1501 | __func__, policy); | ||
1502 | } | ||
1503 | |||
1504 | /** | 1479 | /** |
1505 | * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. | 1480 | * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. |
1506 | * | 1481 | * |
@@ -1803,10 +1778,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1803 | struct cpufreq_governor *gov = NULL; | 1778 | struct cpufreq_governor *gov = NULL; |
1804 | #endif | 1779 | #endif |
1805 | 1780 | ||
1806 | /* Don't start any governor operations if we are entering suspend */ | ||
1807 | if (cpufreq_suspended) | ||
1808 | return 0; | ||
1809 | |||
1810 | if (policy->governor->max_transition_latency && | 1781 | if (policy->governor->max_transition_latency && |
1811 | policy->cpuinfo.transition_latency > | 1782 | policy->cpuinfo.transition_latency > |
1812 | policy->governor->max_transition_latency) { | 1783 | policy->governor->max_transition_latency) { |
@@ -2119,6 +2090,9 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
2119 | dev = get_cpu_device(cpu); | 2090 | dev = get_cpu_device(cpu); |
2120 | if (dev) { | 2091 | if (dev) { |
2121 | 2092 | ||
2093 | if (action & CPU_TASKS_FROZEN) | ||
2094 | frozen = true; | ||
2095 | |||
2122 | switch (action & ~CPU_TASKS_FROZEN) { | 2096 | switch (action & ~CPU_TASKS_FROZEN) { |
2123 | case CPU_ONLINE: | 2097 | case CPU_ONLINE: |
2124 | __cpufreq_add_dev(dev, NULL, frozen); | 2098 | __cpufreq_add_dev(dev, NULL, frozen); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 5f1cbae36961..d51f17ed691e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
581 | } | 581 | } |
582 | 582 | ||
583 | #define ICPU(model, policy) \ | 583 | #define ICPU(model, policy) \ |
584 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } | 584 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ |
585 | (unsigned long)&policy } | ||
585 | 586 | ||
586 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 587 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
587 | ICPU(0x2a, core_params), | 588 | ICPU(0x2a, core_params), |
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
614 | cpu = all_cpu_data[cpunum]; | 615 | cpu = all_cpu_data[cpunum]; |
615 | 616 | ||
616 | intel_pstate_get_cpu_pstates(cpu); | 617 | intel_pstate_get_cpu_pstates(cpu); |
618 | if (!cpu->pstate.current_pstate) { | ||
619 | all_cpu_data[cpunum] = NULL; | ||
620 | kfree(cpu); | ||
621 | return -ENODATA; | ||
622 | } | ||
617 | 623 | ||
618 | cpu->cpu = cpunum; | 624 | cpu->cpu = cpunum; |
619 | 625 | ||
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index 36795639df0d..6e51114057d0 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c | |||
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = { | |||
65 | .state_count = 2, | 65 | .state_count = 2, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static int __init calxeda_cpuidle_probe(struct platform_device *pdev) | 68 | static int calxeda_cpuidle_probe(struct platform_device *pdev) |
69 | { | 69 | { |
70 | return cpuidle_register(&calxeda_idle_driver, NULL); | 70 | return cpuidle_register(&calxeda_idle_driver, NULL); |
71 | } | 71 | } |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 9dd6e01eac33..f757a0f428bd 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = { | |||
1410 | static int __init ixp_module_init(void) | 1410 | static int __init ixp_module_init(void) |
1411 | { | 1411 | { |
1412 | int num = ARRAY_SIZE(ixp4xx_algos); | 1412 | int num = ARRAY_SIZE(ixp4xx_algos); |
1413 | int i, err ; | 1413 | int i, err; |
1414 | 1414 | ||
1415 | pdev = platform_device_register_full(&ixp_dev_info); | 1415 | pdev = platform_device_register_full(&ixp_dev_info); |
1416 | if (IS_ERR(pdev)) | 1416 | if (IS_ERR(pdev)) |
1417 | return PTR_ERR(pdev); | 1417 | return PTR_ERR(pdev); |
1418 | 1418 | ||
1419 | dev = &pdev->dev; | ||
1420 | |||
1421 | spin_lock_init(&desc_lock); | 1419 | spin_lock_init(&desc_lock); |
1422 | spin_lock_init(&emerg_lock); | 1420 | spin_lock_init(&emerg_lock); |
1423 | 1421 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687cc2334..c823daaf9043 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -62,6 +62,7 @@ config INTEL_IOATDMA | |||
62 | tristate "Intel I/OAT DMA support" | 62 | tristate "Intel I/OAT DMA support" |
63 | depends on PCI && X86 | 63 | depends on PCI && X86 |
64 | select DMA_ENGINE | 64 | select DMA_ENGINE |
65 | select DMA_ENGINE_RAID | ||
65 | select DCA | 66 | select DCA |
66 | help | 67 | help |
67 | Enable support for the Intel(R) I/OAT DMA engine present | 68 | Enable support for the Intel(R) I/OAT DMA engine present |
@@ -112,6 +113,7 @@ config MV_XOR | |||
112 | bool "Marvell XOR engine support" | 113 | bool "Marvell XOR engine support" |
113 | depends on PLAT_ORION | 114 | depends on PLAT_ORION |
114 | select DMA_ENGINE | 115 | select DMA_ENGINE |
116 | select DMA_ENGINE_RAID | ||
115 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 117 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
116 | ---help--- | 118 | ---help--- |
117 | Enable support for the Marvell XOR engine. | 119 | Enable support for the Marvell XOR engine. |
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA | |||
187 | tristate "AMCC PPC440SPe ADMA support" | 189 | tristate "AMCC PPC440SPe ADMA support" |
188 | depends on 440SPe || 440SP | 190 | depends on 440SPe || 440SP |
189 | select DMA_ENGINE | 191 | select DMA_ENGINE |
192 | select DMA_ENGINE_RAID | ||
190 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 193 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
191 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 194 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
192 | help | 195 | help |
@@ -352,6 +355,7 @@ config NET_DMA | |||
352 | bool "Network: TCP receive copy offload" | 355 | bool "Network: TCP receive copy offload" |
353 | depends on DMA_ENGINE && NET | 356 | depends on DMA_ENGINE && NET |
354 | default (INTEL_IOATDMA || FSL_DMA) | 357 | default (INTEL_IOATDMA || FSL_DMA) |
358 | depends on BROKEN | ||
355 | help | 359 | help |
356 | This enables the use of DMA engines in the network stack to | 360 | This enables the use of DMA engines in the network stack to |
357 | offload receive copy-to-user operations, freeing CPU cycles. | 361 | offload receive copy-to-user operations, freeing CPU cycles. |
@@ -377,4 +381,7 @@ config DMATEST | |||
377 | Simple DMA test client. Say N unless you're debugging a | 381 | Simple DMA test client. Say N unless you're debugging a |
378 | DMA Device driver. | 382 | DMA Device driver. |
379 | 383 | ||
384 | config DMA_ENGINE_RAID | ||
385 | bool | ||
386 | |||
380 | endif | 387 | endif |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 16a2aa28f856..ec4ee5c1fe9d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd) | |||
1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); | 1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); |
1171 | 1171 | ||
1172 | dma_descriptor_unmap(txd); | 1172 | dma_descriptor_unmap(&vd->tx); |
1173 | if (!txd->done) | 1173 | if (!txd->done) |
1174 | pl08x_release_mux(plchan); | 1174 | pl08x_release_mux(plchan); |
1175 | 1175 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index f31d647acdfa..2787aba60c6b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) | |||
347 | { | 347 | { |
348 | return &chan->dev->device; | 348 | return &chan->dev->device; |
349 | } | 349 | } |
350 | static struct device *chan2parent(struct dma_chan *chan) | ||
351 | { | ||
352 | return chan->dev->device.parent; | ||
353 | } | ||
354 | 350 | ||
355 | #if defined(VERBOSE_DEBUG) | 351 | #if defined(VERBOSE_DEBUG) |
356 | static void vdbg_dump_regs(struct at_dma_chan *atchan) | 352 | static void vdbg_dump_regs(struct at_dma_chan *atchan) |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ea806bdc12ef..ef63b9058f3c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool { | |||
912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } | 912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
913 | static struct dmaengine_unmap_pool unmap_pool[] = { | 913 | static struct dmaengine_unmap_pool unmap_pool[] = { |
914 | __UNMAP_POOL(2), | 914 | __UNMAP_POOL(2), |
915 | #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) | 915 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
916 | __UNMAP_POOL(16), | 916 | __UNMAP_POOL(16), |
917 | __UNMAP_POOL(128), | 917 | __UNMAP_POOL(128), |
918 | __UNMAP_POOL(256), | 918 | __UNMAP_POOL(256), |
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
1054 | dma_cookie_t cookie; | 1054 | dma_cookie_t cookie; |
1055 | unsigned long flags; | 1055 | unsigned long flags; |
1056 | 1056 | ||
1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); | 1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); |
1058 | if (!unmap) | 1058 | if (!unmap) |
1059 | return -ENOMEM; | 1059 | return -ENOMEM; |
1060 | 1060 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3aaf926..9dfcaf5c1288 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -539,9 +539,9 @@ static int dmatest_func(void *data) | |||
539 | 539 | ||
540 | um->len = params->buf_size; | 540 | um->len = params->buf_size; |
541 | for (i = 0; i < src_cnt; i++) { | 541 | for (i = 0; i < src_cnt; i++) { |
542 | unsigned long buf = (unsigned long) thread->srcs[i]; | 542 | void *buf = thread->srcs[i]; |
543 | struct page *pg = virt_to_page(buf); | 543 | struct page *pg = virt_to_page(buf); |
544 | unsigned pg_off = buf & ~PAGE_MASK; | 544 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; |
545 | 545 | ||
546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
547 | um->len, DMA_TO_DEVICE); | 547 | um->len, DMA_TO_DEVICE); |
@@ -559,9 +559,9 @@ static int dmatest_func(void *data) | |||
559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
560 | dsts = &um->addr[src_cnt]; | 560 | dsts = &um->addr[src_cnt]; |
561 | for (i = 0; i < dst_cnt; i++) { | 561 | for (i = 0; i < dst_cnt; i++) { |
562 | unsigned long buf = (unsigned long) thread->dsts[i]; | 562 | void *buf = thread->dsts[i]; |
563 | struct page *pg = virt_to_page(buf); | 563 | struct page *pg = virt_to_page(buf); |
564 | unsigned pg_off = buf & ~PAGE_MASK; | 564 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; |
565 | 565 | ||
566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | 566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, |
567 | DMA_BIDIRECTIONAL); | 567 | DMA_BIDIRECTIONAL); |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7086a16a55f2..f157c6f76b32 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, | |||
86 | hw->count = CPU_TO_DMA(chan, count, 32); | 86 | hw->count = CPU_TO_DMA(chan, count, 32); |
87 | } | 87 | } |
88 | 88 | ||
89 | static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | ||
90 | { | ||
91 | return DMA_TO_CPU(chan, desc->hw.count, 32); | ||
92 | } | ||
93 | |||
94 | static void set_desc_src(struct fsldma_chan *chan, | 89 | static void set_desc_src(struct fsldma_chan *chan, |
95 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 90 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
96 | { | 91 | { |
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, | |||
101 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 96 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
102 | } | 97 | } |
103 | 98 | ||
104 | static dma_addr_t get_desc_src(struct fsldma_chan *chan, | ||
105 | struct fsl_desc_sw *desc) | ||
106 | { | ||
107 | u64 snoop_bits; | ||
108 | |||
109 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
110 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
111 | return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; | ||
112 | } | ||
113 | |||
114 | static void set_desc_dst(struct fsldma_chan *chan, | 99 | static void set_desc_dst(struct fsldma_chan *chan, |
115 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 100 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
116 | { | 101 | { |
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, | |||
121 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 106 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
122 | } | 107 | } |
123 | 108 | ||
124 | static dma_addr_t get_desc_dst(struct fsldma_chan *chan, | ||
125 | struct fsl_desc_sw *desc) | ||
126 | { | ||
127 | u64 snoop_bits; | ||
128 | |||
129 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
130 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
131 | return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; | ||
132 | } | ||
133 | |||
134 | static void set_desc_next(struct fsldma_chan *chan, | 109 | static void set_desc_next(struct fsldma_chan *chan, |
135 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 110 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
136 | { | 111 | { |
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
408 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 383 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
409 | struct fsl_desc_sw *child; | 384 | struct fsl_desc_sw *child; |
410 | unsigned long flags; | 385 | unsigned long flags; |
411 | dma_cookie_t cookie; | 386 | dma_cookie_t cookie = -EINVAL; |
412 | 387 | ||
413 | spin_lock_irqsave(&chan->desc_lock, flags); | 388 | spin_lock_irqsave(&chan->desc_lock, flags); |
414 | 389 | ||
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | |||
854 | struct fsl_desc_sw *desc) | 829 | struct fsl_desc_sw *desc) |
855 | { | 830 | { |
856 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | 831 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
857 | struct device *dev = chan->common.device->dev; | ||
858 | dma_addr_t src = get_desc_src(chan, desc); | ||
859 | dma_addr_t dst = get_desc_dst(chan, desc); | ||
860 | u32 len = get_desc_cnt(chan, desc); | ||
861 | 832 | ||
862 | /* Run the link descriptor callback function */ | 833 | /* Run the link descriptor callback function */ |
863 | if (txd->callback) { | 834 | if (txd->callback) { |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a49c777607c..87529181efcc 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
817 | } | 817 | } |
818 | 818 | ||
819 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 819 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
820 | if (dma_mapping_error(dev, dma_src)) { | ||
821 | dev_err(dev, "mapping src buffer failed\n"); | ||
822 | goto free_resources; | ||
823 | } | ||
820 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 824 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
825 | if (dma_mapping_error(dev, dma_dest)) { | ||
826 | dev_err(dev, "mapping dest buffer failed\n"); | ||
827 | goto unmap_src; | ||
828 | } | ||
821 | flags = DMA_PREP_INTERRUPT; | 829 | flags = DMA_PREP_INTERRUPT; |
822 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 830 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
823 | IOAT_TEST_SIZE, flags); | 831 | IOAT_TEST_SIZE, flags); |
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
855 | } | 863 | } |
856 | 864 | ||
857 | unmap_dma: | 865 | unmap_dma: |
858 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
859 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 866 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
867 | unmap_src: | ||
868 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
860 | free_resources: | 869 | free_resources: |
861 | dma->device_free_chan_resources(dma_chan); | 870 | dma->device_free_chan_resources(dma_chan); |
862 | out: | 871 | out: |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index dcb1e05149a7..8869500ab92b 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
1017 | } | 1017 | } |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | platform_set_drvdata(op, pdev); | ||
1020 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); | 1021 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); |
1021 | return 0; | 1022 | return 0; |
1022 | } | 1023 | } |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7807f0ef4e20..53fb0c8365b0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | |||
54 | hw_desc->desc_command = (1 << 31); | 54 | hw_desc->desc_command = (1 << 31); |
55 | } | 55 | } |
56 | 56 | ||
57 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | ||
58 | { | ||
59 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
60 | return hw_desc->phy_dest_addr; | ||
61 | } | ||
62 | |||
63 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 57 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
64 | u32 byte_count) | 58 | u32 byte_count) |
65 | { | 59 | { |
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) | |||
787 | /* | 781 | /* |
788 | * Perform a transaction to verify the HW works. | 782 | * Perform a transaction to verify the HW works. |
789 | */ | 783 | */ |
790 | #define MV_XOR_TEST_SIZE 2000 | ||
791 | 784 | ||
792 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | 785 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) |
793 | { | 786 | { |
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
797 | struct dma_chan *dma_chan; | 790 | struct dma_chan *dma_chan; |
798 | dma_cookie_t cookie; | 791 | dma_cookie_t cookie; |
799 | struct dma_async_tx_descriptor *tx; | 792 | struct dma_async_tx_descriptor *tx; |
793 | struct dmaengine_unmap_data *unmap; | ||
800 | int err = 0; | 794 | int err = 0; |
801 | 795 | ||
802 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 796 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
803 | if (!src) | 797 | if (!src) |
804 | return -ENOMEM; | 798 | return -ENOMEM; |
805 | 799 | ||
806 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 800 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
807 | if (!dest) { | 801 | if (!dest) { |
808 | kfree(src); | 802 | kfree(src); |
809 | return -ENOMEM; | 803 | return -ENOMEM; |
810 | } | 804 | } |
811 | 805 | ||
812 | /* Fill in src buffer */ | 806 | /* Fill in src buffer */ |
813 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | 807 | for (i = 0; i < PAGE_SIZE; i++) |
814 | ((u8 *) src)[i] = (u8)i; | 808 | ((u8 *) src)[i] = (u8)i; |
815 | 809 | ||
816 | dma_chan = &mv_chan->dmachan; | 810 | dma_chan = &mv_chan->dmachan; |
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
819 | goto out; | 813 | goto out; |
820 | } | 814 | } |
821 | 815 | ||
822 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | 816 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
823 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 817 | if (!unmap) { |
818 | err = -ENOMEM; | ||
819 | goto free_resources; | ||
820 | } | ||
821 | |||
822 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | ||
823 | PAGE_SIZE, DMA_TO_DEVICE); | ||
824 | unmap->to_cnt = 1; | ||
825 | unmap->addr[0] = src_dma; | ||
824 | 826 | ||
825 | src_dma = dma_map_single(dma_chan->device->dev, src, | 827 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
826 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | 828 | PAGE_SIZE, DMA_FROM_DEVICE); |
829 | unmap->from_cnt = 1; | ||
830 | unmap->addr[1] = dest_dma; | ||
831 | |||
832 | unmap->len = PAGE_SIZE; | ||
827 | 833 | ||
828 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 834 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
829 | MV_XOR_TEST_SIZE, 0); | 835 | PAGE_SIZE, 0); |
830 | cookie = mv_xor_tx_submit(tx); | 836 | cookie = mv_xor_tx_submit(tx); |
831 | mv_xor_issue_pending(dma_chan); | 837 | mv_xor_issue_pending(dma_chan); |
832 | async_tx_ack(tx); | 838 | async_tx_ack(tx); |
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
841 | } | 847 | } |
842 | 848 | ||
843 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | 849 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
844 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 850 | PAGE_SIZE, DMA_FROM_DEVICE); |
845 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | 851 | if (memcmp(src, dest, PAGE_SIZE)) { |
846 | dev_err(dma_chan->device->dev, | 852 | dev_err(dma_chan->device->dev, |
847 | "Self-test copy failed compare, disabling\n"); | 853 | "Self-test copy failed compare, disabling\n"); |
848 | err = -ENODEV; | 854 | err = -ENODEV; |
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
850 | } | 856 | } |
851 | 857 | ||
852 | free_resources: | 858 | free_resources: |
859 | dmaengine_unmap_put(unmap); | ||
853 | mv_xor_free_chan_resources(dma_chan); | 860 | mv_xor_free_chan_resources(dma_chan); |
854 | out: | 861 | out: |
855 | kfree(src); | 862 | kfree(src); |
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
867 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | 874 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; |
868 | dma_addr_t dest_dma; | 875 | dma_addr_t dest_dma; |
869 | struct dma_async_tx_descriptor *tx; | 876 | struct dma_async_tx_descriptor *tx; |
877 | struct dmaengine_unmap_data *unmap; | ||
870 | struct dma_chan *dma_chan; | 878 | struct dma_chan *dma_chan; |
871 | dma_cookie_t cookie; | 879 | dma_cookie_t cookie; |
872 | u8 cmp_byte = 0; | 880 | u8 cmp_byte = 0; |
873 | u32 cmp_word; | 881 | u32 cmp_word; |
874 | int err = 0; | 882 | int err = 0; |
883 | int src_count = MV_XOR_NUM_SRC_TEST; | ||
875 | 884 | ||
876 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 885 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
877 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 886 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
878 | if (!xor_srcs[src_idx]) { | 887 | if (!xor_srcs[src_idx]) { |
879 | while (src_idx--) | 888 | while (src_idx--) |
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
890 | } | 899 | } |
891 | 900 | ||
892 | /* Fill in src buffers */ | 901 | /* Fill in src buffers */ |
893 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 902 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
894 | u8 *ptr = page_address(xor_srcs[src_idx]); | 903 | u8 *ptr = page_address(xor_srcs[src_idx]); |
895 | for (i = 0; i < PAGE_SIZE; i++) | 904 | for (i = 0; i < PAGE_SIZE; i++) |
896 | ptr[i] = (1 << src_idx); | 905 | ptr[i] = (1 << src_idx); |
897 | } | 906 | } |
898 | 907 | ||
899 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | 908 | for (src_idx = 0; src_idx < src_count; src_idx++) |
900 | cmp_byte ^= (u8) (1 << src_idx); | 909 | cmp_byte ^= (u8) (1 << src_idx); |
901 | 910 | ||
902 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | 911 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
910 | goto out; | 919 | goto out; |
911 | } | 920 | } |
912 | 921 | ||
922 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, | ||
923 | GFP_KERNEL); | ||
924 | if (!unmap) { | ||
925 | err = -ENOMEM; | ||
926 | goto free_resources; | ||
927 | } | ||
928 | |||
913 | /* test xor */ | 929 | /* test xor */ |
914 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | 930 | for (i = 0; i < src_count; i++) { |
915 | DMA_FROM_DEVICE); | 931 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], |
932 | 0, PAGE_SIZE, DMA_TO_DEVICE); | ||
933 | dma_srcs[i] = unmap->addr[i]; | ||
934 | unmap->to_cnt++; | ||
935 | } | ||
916 | 936 | ||
917 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | 937 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
918 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 938 | DMA_FROM_DEVICE); |
919 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 939 | dest_dma = unmap->addr[src_count]; |
940 | unmap->from_cnt = 1; | ||
941 | unmap->len = PAGE_SIZE; | ||
920 | 942 | ||
921 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 943 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
922 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | 944 | src_count, PAGE_SIZE, 0); |
923 | 945 | ||
924 | cookie = mv_xor_tx_submit(tx); | 946 | cookie = mv_xor_tx_submit(tx); |
925 | mv_xor_issue_pending(dma_chan); | 947 | mv_xor_issue_pending(dma_chan); |
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
948 | } | 970 | } |
949 | 971 | ||
950 | free_resources: | 972 | free_resources: |
973 | dmaengine_unmap_put(unmap); | ||
951 | mv_xor_free_chan_resources(dma_chan); | 974 | mv_xor_free_chan_resources(dma_chan); |
952 | out: | 975 | out: |
953 | src_idx = MV_XOR_NUM_SRC_TEST; | 976 | src_idx = src_count; |
954 | while (src_idx--) | 977 | while (src_idx--) |
955 | __free_page(xor_srcs[src_idx]); | 978 | __free_page(xor_srcs[src_idx]); |
956 | __free_page(dest); | 979 | __free_page(dest); |
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1176 | int i = 0; | 1199 | int i = 0; |
1177 | 1200 | ||
1178 | for_each_child_of_node(pdev->dev.of_node, np) { | 1201 | for_each_child_of_node(pdev->dev.of_node, np) { |
1202 | struct mv_xor_chan *chan; | ||
1179 | dma_cap_mask_t cap_mask; | 1203 | dma_cap_mask_t cap_mask; |
1180 | int irq; | 1204 | int irq; |
1181 | 1205 | ||
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1193 | goto err_channel_add; | 1217 | goto err_channel_add; |
1194 | } | 1218 | } |
1195 | 1219 | ||
1196 | xordev->channels[i] = | 1220 | chan = mv_xor_channel_add(xordev, pdev, i, |
1197 | mv_xor_channel_add(xordev, pdev, i, | 1221 | cap_mask, irq); |
1198 | cap_mask, irq); | 1222 | if (IS_ERR(chan)) { |
1199 | if (IS_ERR(xordev->channels[i])) { | 1223 | ret = PTR_ERR(chan); |
1200 | ret = PTR_ERR(xordev->channels[i]); | ||
1201 | xordev->channels[i] = NULL; | ||
1202 | irq_dispose_mapping(irq); | 1224 | irq_dispose_mapping(irq); |
1203 | goto err_channel_add; | 1225 | goto err_channel_add; |
1204 | } | 1226 | } |
1205 | 1227 | ||
1228 | xordev->channels[i] = chan; | ||
1206 | i++; | 1229 | i++; |
1207 | } | 1230 | } |
1208 | } else if (pdata && pdata->channels) { | 1231 | } else if (pdata && pdata->channels) { |
1209 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | 1232 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { |
1210 | struct mv_xor_channel_data *cd; | 1233 | struct mv_xor_channel_data *cd; |
1234 | struct mv_xor_chan *chan; | ||
1211 | int irq; | 1235 | int irq; |
1212 | 1236 | ||
1213 | cd = &pdata->channels[i]; | 1237 | cd = &pdata->channels[i]; |
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1222 | goto err_channel_add; | 1246 | goto err_channel_add; |
1223 | } | 1247 | } |
1224 | 1248 | ||
1225 | xordev->channels[i] = | 1249 | chan = mv_xor_channel_add(xordev, pdev, i, |
1226 | mv_xor_channel_add(xordev, pdev, i, | 1250 | cd->cap_mask, irq); |
1227 | cd->cap_mask, irq); | 1251 | if (IS_ERR(chan)) { |
1228 | if (IS_ERR(xordev->channels[i])) { | 1252 | ret = PTR_ERR(chan); |
1229 | ret = PTR_ERR(xordev->channels[i]); | ||
1230 | goto err_channel_add; | 1253 | goto err_channel_add; |
1231 | } | 1254 | } |
1255 | |||
1256 | xordev->channels[i] = chan; | ||
1232 | } | 1257 | } |
1233 | } | 1258 | } |
1234 | 1259 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483b8f2d..536632f6479c 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2492 | 2492 | ||
2493 | static inline void _init_desc(struct dma_pl330_desc *desc) | 2493 | static inline void _init_desc(struct dma_pl330_desc *desc) |
2494 | { | 2494 | { |
2495 | desc->pchan = NULL; | ||
2496 | desc->req.x = &desc->px; | 2495 | desc->req.x = &desc->px; |
2497 | desc->req.token = desc; | 2496 | desc->req.token = desc; |
2498 | desc->rqcfg.swap = SWAP_NO; | 2497 | desc->rqcfg.swap = SWAP_NO; |
2499 | desc->rqcfg.privileged = 0; | ||
2500 | desc->rqcfg.insnaccess = 0; | ||
2501 | desc->rqcfg.scctl = SCCTRL0; | 2498 | desc->rqcfg.scctl = SCCTRL0; |
2502 | desc->rqcfg.dcctl = DCCTRL0; | 2499 | desc->rqcfg.dcctl = DCCTRL0; |
2503 | desc->req.cfg = &desc->rqcfg; | 2500 | desc->req.cfg = &desc->rqcfg; |
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | |||
2517 | if (!pdmac) | 2514 | if (!pdmac) |
2518 | return 0; | 2515 | return 0; |
2519 | 2516 | ||
2520 | desc = kmalloc(count * sizeof(*desc), flg); | 2517 | desc = kcalloc(count, sizeof(*desc), flg); |
2521 | if (!desc) | 2518 | if (!desc) |
2522 | return 0; | 2519 | return 0; |
2523 | 2520 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6b2a38..8bba298535b0 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, | |||
533 | } | 533 | } |
534 | 534 | ||
535 | /** | 535 | /** |
536 | * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation | ||
537 | */ | ||
538 | static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, | ||
539 | int value, unsigned long flags) | ||
540 | { | ||
541 | struct dma_cdb *hw_desc = desc->hw_desc; | ||
542 | |||
543 | memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); | ||
544 | desc->hw_next = NULL; | ||
545 | desc->src_cnt = 1; | ||
546 | desc->dst_cnt = 1; | ||
547 | |||
548 | if (flags & DMA_PREP_INTERRUPT) | ||
549 | set_bit(PPC440SPE_DESC_INT, &desc->flags); | ||
550 | else | ||
551 | clear_bit(PPC440SPE_DESC_INT, &desc->flags); | ||
552 | |||
553 | hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); | ||
554 | hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); | ||
555 | hw_desc->opc = DMA_CDB_OPC_DFILL128; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * ppc440spe_desc_set_src_addr - set source address into the descriptor | 536 | * ppc440spe_desc_set_src_addr - set source address into the descriptor |
560 | */ | 537 | */ |
561 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, | 538 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, |
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
1504 | struct ppc440spe_adma_chan *chan, | 1481 | struct ppc440spe_adma_chan *chan, |
1505 | dma_cookie_t cookie) | 1482 | dma_cookie_t cookie) |
1506 | { | 1483 | { |
1507 | int i; | ||
1508 | |||
1509 | BUG_ON(desc->async_tx.cookie < 0); | 1484 | BUG_ON(desc->async_tx.cookie < 0); |
1510 | if (desc->async_tx.cookie > 0) { | 1485 | if (desc->async_tx.cookie > 0) { |
1511 | cookie = desc->async_tx.cookie; | 1486 | cookie = desc->async_tx.cookie; |
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
3898 | ppc440spe_adma_prep_dma_interrupt; | 3873 | ppc440spe_adma_prep_dma_interrupt; |
3899 | } | 3874 | } |
3900 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " | 3875 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " |
3901 | "( %s%s%s%s%s%s%s)\n", | 3876 | "( %s%s%s%s%s%s)\n", |
3902 | dev_name(adev->dev), | 3877 | dev_name(adev->dev), |
3903 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", | 3878 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", |
3904 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", | 3879 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 4cb127978636..4eddedb6eb7d 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -628,42 +628,13 @@ retry: | |||
628 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | 628 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; |
629 | } | 629 | } |
630 | 630 | ||
631 | static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd) | ||
632 | { | ||
633 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
634 | struct s3c24xx_sg *dsg; | ||
635 | |||
636 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
637 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
638 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
639 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
640 | DMA_TO_DEVICE); | ||
641 | else { | ||
642 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
643 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
644 | DMA_TO_DEVICE); | ||
645 | } | ||
646 | } | ||
647 | |||
648 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
649 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
650 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
651 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
652 | DMA_FROM_DEVICE); | ||
653 | else | ||
654 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
655 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
656 | DMA_FROM_DEVICE); | ||
657 | } | ||
658 | } | ||
659 | |||
660 | static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) | 631 | static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) |
661 | { | 632 | { |
662 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); | 633 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); |
663 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); | 634 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); |
664 | 635 | ||
665 | if (!s3cchan->slave) | 636 | if (!s3cchan->slave) |
666 | s3c24xx_dma_unmap_buffers(txd); | 637 | dma_descriptor_unmap(&vd->tx); |
667 | 638 | ||
668 | s3c24xx_dma_free_txd(txd); | 639 | s3c24xx_dma_free_txd(txd); |
669 | } | 640 | } |
@@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, | |||
795 | 766 | ||
796 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | 767 | spin_lock_irqsave(&s3cchan->vc.lock, flags); |
797 | ret = dma_cookie_status(chan, cookie, txstate); | 768 | ret = dma_cookie_status(chan, cookie, txstate); |
798 | if (ret == DMA_SUCCESS) { | 769 | if (ret == DMA_COMPLETE) { |
799 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | 770 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); |
800 | return ret; | 771 | return ret; |
801 | } | 772 | } |
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index ebad84591a6e..3083d901a414 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) | 60 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) |
61 | 61 | ||
62 | /* DMA status register (DSTSR) bits */ | 62 | /* DMA status register (DSTSR) bits */ |
63 | #define HPB_DMAE_DSTSR_DQSTS BIT(2) | ||
63 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) | 64 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) |
64 | 65 | ||
65 | /* DMA common registers */ | 66 | /* DMA common registers */ |
@@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan) | |||
286 | 287 | ||
287 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); | 288 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); |
288 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); | 289 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); |
290 | |||
291 | chan->plane_idx = 0; | ||
292 | chan->first_desc = true; | ||
289 | } | 293 | } |
290 | 294 | ||
291 | static const struct hpb_dmae_slave_config * | 295 | static const struct hpb_dmae_slave_config * |
@@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan) | |||
385 | struct hpb_dmae_chan *chan = to_chan(schan); | 389 | struct hpb_dmae_chan *chan = to_chan(schan); |
386 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); | 390 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); |
387 | 391 | ||
388 | return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; | 392 | if (chan->xfer_mode == XFER_DOUBLE) |
393 | return dstsr & HPB_DMAE_DSTSR_DQSTS; | ||
394 | else | ||
395 | return dstsr & HPB_DMAE_DSTSR_DMSTS; | ||
389 | } | 396 | } |
390 | 397 | ||
391 | static int | 398 | static int |
@@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | |||
510 | } | 517 | } |
511 | 518 | ||
512 | schan = &new_hpb_chan->shdma_chan; | 519 | schan = &new_hpb_chan->shdma_chan; |
520 | schan->max_xfer_len = HPB_DMA_TCR_MAX; | ||
521 | |||
513 | shdma_chan_probe(sdev, schan, id); | 522 | shdma_chan_probe(sdev, schan, id); |
514 | 523 | ||
515 | if (pdev->id >= 0) | 524 | if (pdev->id >= 0) |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index bae6c29f5502..17686caf64d5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
406 | dma_async_tx_callback callback; | 406 | dma_async_tx_callback callback; |
407 | void *param; | 407 | void *param; |
408 | struct dma_async_tx_descriptor *txd = &desc->txd; | 408 | struct dma_async_tx_descriptor *txd = &desc->txd; |
409 | struct txx9dmac_slave *ds = dc->chan.private; | ||
410 | 409 | ||
411 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 410 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
412 | txd->cookie, desc); | 411 | txd->cookie, desc); |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 8472405c5586..d7f1b57bd3be 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
945 | u32 tad_offset; | 945 | u32 tad_offset; |
946 | u32 rir_way; | 946 | u32 rir_way; |
947 | u32 mb, kb; | 947 | u32 mb, kb; |
948 | u64 ch_addr, offset, limit, prv = 0; | 948 | u64 ch_addr, offset, limit = 0, prv = 0; |
949 | 949 | ||
950 | 950 | ||
951 | /* | 951 | /* |
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index 3c55ec856e39..a287cece0593 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c | |||
@@ -1082,7 +1082,7 @@ static void arizona_micd_set_level(struct arizona *arizona, int index, | |||
1082 | static int arizona_extcon_probe(struct platform_device *pdev) | 1082 | static int arizona_extcon_probe(struct platform_device *pdev) |
1083 | { | 1083 | { |
1084 | struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); | 1084 | struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); |
1085 | struct arizona_pdata *pdata; | 1085 | struct arizona_pdata *pdata = &arizona->pdata; |
1086 | struct arizona_extcon_info *info; | 1086 | struct arizona_extcon_info *info; |
1087 | unsigned int val; | 1087 | unsigned int val; |
1088 | int jack_irq_fall, jack_irq_rise; | 1088 | int jack_irq_fall, jack_irq_rise; |
@@ -1091,8 +1091,6 @@ static int arizona_extcon_probe(struct platform_device *pdev) | |||
1091 | if (!arizona->dapm || !arizona->dapm->card) | 1091 | if (!arizona->dapm || !arizona->dapm->card) |
1092 | return -EPROBE_DEFER; | 1092 | return -EPROBE_DEFER; |
1093 | 1093 | ||
1094 | pdata = dev_get_platdata(arizona->dev); | ||
1095 | |||
1096 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); | 1094 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); |
1097 | if (!info) { | 1095 | if (!info) { |
1098 | dev_err(&pdev->dev, "Failed to allocate memory\n"); | 1096 | dev_err(&pdev->dev, "Failed to allocate memory\n"); |
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index 15443d3b6be1..76322330cbd7 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c | |||
@@ -792,6 +792,8 @@ void extcon_dev_unregister(struct extcon_dev *edev) | |||
792 | return; | 792 | return; |
793 | } | 793 | } |
794 | 794 | ||
795 | device_unregister(&edev->dev); | ||
796 | |||
795 | if (edev->mutually_exclusive && edev->max_supported) { | 797 | if (edev->mutually_exclusive && edev->max_supported) { |
796 | for (index = 0; edev->mutually_exclusive[index]; | 798 | for (index = 0; edev->mutually_exclusive[index]; |
797 | index++) | 799 | index++) |
@@ -812,7 +814,6 @@ void extcon_dev_unregister(struct extcon_dev *edev) | |||
812 | if (switch_class) | 814 | if (switch_class) |
813 | class_compat_remove_link(switch_class, &edev->dev, NULL); | 815 | class_compat_remove_link(switch_class, &edev->dev, NULL); |
814 | #endif | 816 | #endif |
815 | device_unregister(&edev->dev); | ||
816 | put_device(&edev->dev); | 817 | put_device(&edev->dev); |
817 | } | 818 | } |
818 | EXPORT_SYMBOL_GPL(extcon_dev_unregister); | 819 | EXPORT_SYMBOL_GPL(extcon_dev_unregister); |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index b0bb056458a3..281029daf98c 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = { | |||
1623 | .cmd_per_lun = 1, | 1623 | .cmd_per_lun = 1, |
1624 | .can_queue = 1, | 1624 | .can_queue = 1, |
1625 | .sdev_attrs = sbp2_scsi_sysfs_attrs, | 1625 | .sdev_attrs = sbp2_scsi_sysfs_attrs, |
1626 | .no_write_same = 1, | ||
1627 | }; | 1626 | }; |
1628 | 1627 | ||
1629 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | 1628 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 299fad6b5867..5373dc5b6011 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | |||
14 | 14 | ||
15 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ | 15 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ |
16 | obj-$(CONFIG_EFI) += efi/ | 16 | obj-$(CONFIG_EFI) += efi/ |
17 | obj-$(CONFIG_UEFI_CPER) += efi/ | ||
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 3150aa4874e8..6aecbc86ec94 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig | |||
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE | |||
36 | backend for pstore by default. This setting can be overridden | 36 | backend for pstore by default. This setting can be overridden |
37 | using the efivars module's pstore_disable parameter. | 37 | using the efivars module's pstore_disable parameter. |
38 | 38 | ||
39 | config UEFI_CPER | ||
40 | def_bool n | ||
41 | |||
42 | endmenu | 39 | endmenu |
40 | |||
41 | config UEFI_CPER | ||
42 | bool | ||
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 9ba156d3c775..6c2a41ec21ba 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Makefile for linux kernel | 2 | # Makefile for linux kernel |
3 | # | 3 | # |
4 | obj-y += efi.o vars.o | 4 | obj-$(CONFIG_EFI) += efi.o vars.o |
5 | obj-$(CONFIG_EFI_VARS) += efivars.o | 5 | obj-$(CONFIG_EFI_VARS) += efivars.o |
6 | obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o | 6 | obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o |
7 | obj-$(CONFIG_UEFI_CPER) += cper.o | 7 | obj-$(CONFIG_UEFI_CPER) += cper.o |
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 743fd426f21b..4b9dc836dcf9 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c | |||
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, | |||
356 | static struct pstore_info efi_pstore_info = { | 356 | static struct pstore_info efi_pstore_info = { |
357 | .owner = THIS_MODULE, | 357 | .owner = THIS_MODULE, |
358 | .name = "efi", | 358 | .name = "efi", |
359 | .flags = PSTORE_FLAGS_FRAGILE, | ||
359 | .open = efi_pstore_open, | 360 | .open = efi_pstore_open, |
360 | .close = efi_pstore_close, | 361 | .close = efi_pstore_close, |
361 | .read = efi_pstore_read, | 362 | .read = efi_pstore_read, |
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 8847adf392b7..84be70157ad6 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c | |||
@@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) | |||
327 | * NOTE: we assume for now that only irqs in the first gpio_chip | 327 | * NOTE: we assume for now that only irqs in the first gpio_chip |
328 | * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). | 328 | * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). |
329 | */ | 329 | */ |
330 | if (offset < d->irq_base) | 330 | if (offset < d->gpio_unbanked) |
331 | return d->gpio_irq + offset; | 331 | return d->gpio_irq + offset; |
332 | else | 332 | else |
333 | return -ENODEV; | 333 | return -ENODEV; |
@@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) | |||
419 | 419 | ||
420 | /* pass "bank 0" GPIO IRQs to AINTC */ | 420 | /* pass "bank 0" GPIO IRQs to AINTC */ |
421 | chips[0].chip.to_irq = gpio_to_irq_unbanked; | 421 | chips[0].chip.to_irq = gpio_to_irq_unbanked; |
422 | chips[0].gpio_irq = bank_irq; | ||
423 | chips[0].gpio_unbanked = pdata->gpio_unbanked; | ||
422 | binten = BIT(0); | 424 | binten = BIT(0); |
423 | 425 | ||
424 | /* AINTC handles mask/unmask; GPIO handles triggering */ | 426 | /* AINTC handles mask/unmask; GPIO handles triggering */ |
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index 7b37300973db..2baf0ddf7e02 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c | |||
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) | |||
252 | 252 | ||
253 | spin_lock_irqsave(&tlmm_lock, irq_flags); | 253 | spin_lock_irqsave(&tlmm_lock, irq_flags); |
254 | writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); | 254 | writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); |
255 | clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | 255 | clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); |
256 | __clear_bit(gpio, msm_gpio.enabled_irqs); | 256 | __clear_bit(gpio, msm_gpio.enabled_irqs); |
257 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 257 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); |
258 | } | 258 | } |
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) | |||
264 | 264 | ||
265 | spin_lock_irqsave(&tlmm_lock, irq_flags); | 265 | spin_lock_irqsave(&tlmm_lock, irq_flags); |
266 | __set_bit(gpio, msm_gpio.enabled_irqs); | 266 | __set_bit(gpio, msm_gpio.enabled_irqs); |
267 | set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | 267 | set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); |
268 | writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); | 268 | writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); |
269 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 269 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); |
270 | } | 270 | } |
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index fe088a30567a..8b7e719a68c3 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c | |||
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) | |||
169 | u32 pending; | 169 | u32 pending; |
170 | unsigned int offset, irqs_handled = 0; | 170 | unsigned int offset, irqs_handled = 0; |
171 | 171 | ||
172 | while ((pending = gpio_rcar_read(p, INTDT))) { | 172 | while ((pending = gpio_rcar_read(p, INTDT) & |
173 | gpio_rcar_read(p, INTMSK))) { | ||
173 | offset = __ffs(pending); | 174 | offset = __ffs(pending); |
174 | gpio_rcar_write(p, INTCLR, BIT(offset)); | 175 | gpio_rcar_write(p, INTCLR, BIT(offset)); |
175 | generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); | 176 | generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); |
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index b97d6a6577b9..f9996899c1f2 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c | |||
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset) | |||
300 | if (offset < TWL4030_GPIO_MAX) | 300 | if (offset < TWL4030_GPIO_MAX) |
301 | ret = twl4030_set_gpio_direction(offset, 1); | 301 | ret = twl4030_set_gpio_direction(offset, 1); |
302 | else | 302 | else |
303 | ret = -EINVAL; | 303 | ret = -EINVAL; /* LED outputs can't be set as input */ |
304 | 304 | ||
305 | if (!ret) | 305 | if (!ret) |
306 | priv->direction &= ~BIT(offset); | 306 | priv->direction &= ~BIT(offset); |
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) | |||
354 | static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) | 354 | static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) |
355 | { | 355 | { |
356 | struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); | 356 | struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); |
357 | int ret = -EINVAL; | 357 | int ret = 0; |
358 | 358 | ||
359 | mutex_lock(&priv->mutex); | 359 | mutex_lock(&priv->mutex); |
360 | if (offset < TWL4030_GPIO_MAX) | 360 | if (offset < TWL4030_GPIO_MAX) { |
361 | ret = twl4030_set_gpio_direction(offset, 0); | 361 | ret = twl4030_set_gpio_direction(offset, 0); |
362 | if (ret) { | ||
363 | mutex_unlock(&priv->mutex); | ||
364 | return ret; | ||
365 | } | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output | ||
370 | */ | ||
362 | 371 | ||
363 | priv->direction |= BIT(offset); | 372 | priv->direction |= BIT(offset); |
364 | mutex_unlock(&priv->mutex); | 373 | mutex_unlock(&priv->mutex); |
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index eef09ec9a5ff..a72cae03b99b 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h | |||
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *, | |||
103 | extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; | 103 | extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; |
104 | 104 | ||
105 | int armada_fbdev_init(struct drm_device *); | 105 | int armada_fbdev_init(struct drm_device *); |
106 | void armada_fbdev_lastclose(struct drm_device *); | ||
106 | void armada_fbdev_fini(struct drm_device *); | 107 | void armada_fbdev_fini(struct drm_device *); |
107 | 108 | ||
108 | int armada_overlay_plane_create(struct drm_device *, unsigned long); | 109 | int armada_overlay_plane_create(struct drm_device *, unsigned long); |
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 4f2b28354915..62d0ff3efddf 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = { | |||
321 | DRM_UNLOCKED), | 321 | DRM_UNLOCKED), |
322 | }; | 322 | }; |
323 | 323 | ||
324 | static void armada_drm_lastclose(struct drm_device *dev) | ||
325 | { | ||
326 | armada_fbdev_lastclose(dev); | ||
327 | } | ||
328 | |||
324 | static const struct file_operations armada_drm_fops = { | 329 | static const struct file_operations armada_drm_fops = { |
325 | .owner = THIS_MODULE, | 330 | .owner = THIS_MODULE, |
326 | .llseek = no_llseek, | 331 | .llseek = no_llseek, |
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = { | |||
337 | .open = NULL, | 342 | .open = NULL, |
338 | .preclose = NULL, | 343 | .preclose = NULL, |
339 | .postclose = NULL, | 344 | .postclose = NULL, |
340 | .lastclose = NULL, | 345 | .lastclose = armada_drm_lastclose, |
341 | .unload = armada_drm_unload, | 346 | .unload = armada_drm_unload, |
342 | .get_vblank_counter = drm_vblank_count, | 347 | .get_vblank_counter = drm_vblank_count, |
343 | .enable_vblank = armada_drm_enable_vblank, | 348 | .enable_vblank = armada_drm_enable_vblank, |
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index dd5ea77dac96..948cb14c561e 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c | |||
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh, | |||
105 | drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); | 105 | drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); |
106 | drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); | 106 | drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); |
107 | 107 | ||
108 | DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", | 108 | DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n", |
109 | dfb->fb.width, dfb->fb.height, | 109 | dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel, |
110 | dfb->fb.bits_per_pixel, obj->phys_addr); | 110 | (unsigned long long)obj->phys_addr); |
111 | 111 | ||
112 | return 0; | 112 | return 0; |
113 | 113 | ||
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev) | |||
177 | return ret; | 177 | return ret; |
178 | } | 178 | } |
179 | 179 | ||
180 | void armada_fbdev_lastclose(struct drm_device *dev) | ||
181 | { | ||
182 | struct armada_private *priv = dev->dev_private; | ||
183 | |||
184 | drm_modeset_lock_all(dev); | ||
185 | if (priv->fbdev) | ||
186 | drm_fb_helper_restore_fbdev_mode(priv->fbdev); | ||
187 | drm_modeset_unlock_all(dev); | ||
188 | } | ||
189 | |||
180 | void armada_fbdev_fini(struct drm_device *dev) | 190 | void armada_fbdev_fini(struct drm_device *dev) |
181 | { | 191 | { |
182 | struct armada_private *priv = dev->dev_private; | 192 | struct armada_private *priv = dev->dev_private; |
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev) | |||
192 | framebuffer_release(info); | 202 | framebuffer_release(info); |
193 | } | 203 | } |
194 | 204 | ||
205 | drm_fb_helper_fini(fbh); | ||
206 | |||
195 | if (fbh->fb) | 207 | if (fbh->fb) |
196 | fbh->fb->funcs->destroy(fbh->fb); | 208 | fbh->fb->funcs->destroy(fbh->fb); |
197 | 209 | ||
198 | drm_fb_helper_fini(fbh); | ||
199 | |||
200 | priv->fbdev = NULL; | 210 | priv->fbdev = NULL; |
201 | } | 211 | } |
202 | } | 212 | } |
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 9f2356bae7fd..887816f43476 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c | |||
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) | |||
172 | obj->dev_addr = obj->linear->start; | 172 | obj->dev_addr = obj->linear->start; |
173 | } | 173 | } |
174 | 174 | ||
175 | DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", | 175 | DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, |
176 | obj, obj->phys_addr, obj->dev_addr); | 176 | (unsigned long long)obj->phys_addr, |
177 | (unsigned long long)obj->dev_addr); | ||
177 | 178 | ||
178 | return 0; | 179 | return 0; |
179 | } | 180 | } |
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) | |||
557 | * refcount on the gem object itself. | 558 | * refcount on the gem object itself. |
558 | */ | 559 | */ |
559 | drm_gem_object_reference(obj); | 560 | drm_gem_object_reference(obj); |
560 | dma_buf_put(buf); | ||
561 | return obj; | 561 | return obj; |
562 | } | 562 | } |
563 | } | 563 | } |
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) | |||
573 | } | 573 | } |
574 | 574 | ||
575 | dobj->obj.import_attach = attach; | 575 | dobj->obj.import_attach = attach; |
576 | get_dma_buf(buf); | ||
576 | 577 | ||
577 | /* | 578 | /* |
578 | * Don't call dma_buf_map_attachment() here - it maps the | 579 | * Don't call dma_buf_map_attachment() here - it maps the |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fb7cf0e796f6..8835dcddfac3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -68,6 +68,8 @@ | |||
68 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) | 68 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) |
69 | /* Force reduced-blanking timings for detailed modes */ | 69 | /* Force reduced-blanking timings for detailed modes */ |
70 | #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) | 70 | #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) |
71 | /* Force 8bpc */ | ||
72 | #define EDID_QUIRK_FORCE_8BPC (1 << 8) | ||
71 | 73 | ||
72 | struct detailed_mode_closure { | 74 | struct detailed_mode_closure { |
73 | struct drm_connector *connector; | 75 | struct drm_connector *connector; |
@@ -128,6 +130,9 @@ static struct edid_quirk { | |||
128 | 130 | ||
129 | /* Medion MD 30217 PG */ | 131 | /* Medion MD 30217 PG */ |
130 | { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, | 132 | { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, |
133 | |||
134 | /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ | ||
135 | { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, | ||
131 | }; | 136 | }; |
132 | 137 | ||
133 | /* | 138 | /* |
@@ -2674,7 +2679,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, | |||
2674 | int modes = 0; | 2679 | int modes = 0; |
2675 | u8 cea_mode; | 2680 | u8 cea_mode; |
2676 | 2681 | ||
2677 | if (video_db == NULL || video_index > video_len) | 2682 | if (video_db == NULL || video_index >= video_len) |
2678 | return 0; | 2683 | return 0; |
2679 | 2684 | ||
2680 | /* CEA modes are numbered 1..127 */ | 2685 | /* CEA modes are numbered 1..127 */ |
@@ -2701,7 +2706,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, | |||
2701 | if (structure & (1 << 8)) { | 2706 | if (structure & (1 << 8)) { |
2702 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | 2707 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); |
2703 | if (newmode) { | 2708 | if (newmode) { |
2704 | newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; | 2709 | newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; |
2705 | drm_mode_probed_add(connector, newmode); | 2710 | drm_mode_probed_add(connector, newmode); |
2706 | modes++; | 2711 | modes++; |
2707 | } | 2712 | } |
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) | |||
3435 | 3440 | ||
3436 | drm_add_display_info(edid, &connector->display_info); | 3441 | drm_add_display_info(edid, &connector->display_info); |
3437 | 3442 | ||
3443 | if (quirks & EDID_QUIRK_FORCE_8BPC) | ||
3444 | connector->display_info.bpc = 8; | ||
3445 | |||
3438 | return num_modes; | 3446 | return num_modes; |
3439 | } | 3447 | } |
3440 | EXPORT_SYMBOL(drm_add_edid_modes); | 3448 | EXPORT_SYMBOL(drm_add_edid_modes); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 85071a1c4547..b0733153dfd2 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector) | |||
1041 | /* if equal delete the probed mode */ | 1041 | /* if equal delete the probed mode */ |
1042 | mode->status = pmode->status; | 1042 | mode->status = pmode->status; |
1043 | /* Merge type bits together */ | 1043 | /* Merge type bits together */ |
1044 | mode->type = pmode->type; | 1044 | mode->type |= pmode->type; |
1045 | list_del(&pmode->head); | 1045 | list_del(&pmode->head); |
1046 | drm_mode_destroy(connector->dev, pmode); | 1046 | drm_mode_destroy(connector->dev, pmode); |
1047 | break; | 1047 | break; |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index f53d5246979c..66dd3a001cf1 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -566,11 +566,11 @@ err_unload: | |||
566 | if (dev->driver->unload) | 566 | if (dev->driver->unload) |
567 | dev->driver->unload(dev); | 567 | dev->driver->unload(dev); |
568 | err_primary_node: | 568 | err_primary_node: |
569 | drm_put_minor(dev->primary); | 569 | drm_unplug_minor(dev->primary); |
570 | err_render_node: | 570 | err_render_node: |
571 | drm_put_minor(dev->render); | 571 | drm_unplug_minor(dev->render); |
572 | err_control_node: | 572 | err_control_node: |
573 | drm_put_minor(dev->control); | 573 | drm_unplug_minor(dev->control); |
574 | err_agp: | 574 | err_agp: |
575 | if (dev->driver->bus->agp_destroy) | 575 | if (dev->driver->bus->agp_destroy) |
576 | dev->driver->bus->agp_destroy(dev); | 576 | dev->driver->bus->agp_destroy(dev); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index b676006a95a0..22b8f5eced80 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
173 | static void exynos_drm_preclose(struct drm_device *dev, | 173 | static void exynos_drm_preclose(struct drm_device *dev, |
174 | struct drm_file *file) | 174 | struct drm_file *file) |
175 | { | 175 | { |
176 | exynos_drm_subdrv_close(dev, file); | ||
177 | } | ||
178 | |||
179 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | ||
180 | { | ||
176 | struct exynos_drm_private *private = dev->dev_private; | 181 | struct exynos_drm_private *private = dev->dev_private; |
177 | struct drm_pending_vblank_event *e, *t; | 182 | struct drm_pending_vblank_event *v, *vt; |
183 | struct drm_pending_event *e, *et; | ||
178 | unsigned long flags; | 184 | unsigned long flags; |
179 | 185 | ||
180 | /* release events of current file */ | 186 | if (!file->driver_priv) |
187 | return; | ||
188 | |||
189 | /* Release all events not unhandled by page flip handler. */ | ||
181 | spin_lock_irqsave(&dev->event_lock, flags); | 190 | spin_lock_irqsave(&dev->event_lock, flags); |
182 | list_for_each_entry_safe(e, t, &private->pageflip_event_list, | 191 | list_for_each_entry_safe(v, vt, &private->pageflip_event_list, |
183 | base.link) { | 192 | base.link) { |
184 | if (e->base.file_priv == file) { | 193 | if (v->base.file_priv == file) { |
185 | list_del(&e->base.link); | 194 | list_del(&v->base.link); |
186 | e->base.destroy(&e->base); | 195 | drm_vblank_put(dev, v->pipe); |
196 | v->base.destroy(&v->base); | ||
187 | } | 197 | } |
188 | } | 198 | } |
189 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
190 | 199 | ||
191 | exynos_drm_subdrv_close(dev, file); | 200 | /* Release all events handled by page flip handler but not freed. */ |
192 | } | 201 | list_for_each_entry_safe(e, et, &file->event_list, link) { |
202 | list_del(&e->link); | ||
203 | e->destroy(e); | ||
204 | } | ||
205 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
193 | 206 | ||
194 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | ||
195 | { | ||
196 | if (!file->driver_priv) | ||
197 | return; | ||
198 | 207 | ||
199 | kfree(file->driver_priv); | 208 | kfree(file->driver_priv); |
200 | file->driver_priv = NULL; | 209 | file->driver_priv = NULL; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 23da72b5eae9..a61878bf5dcd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include "exynos_drm_iommu.h" | 31 | #include "exynos_drm_iommu.h" |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * FIMD is stand for Fully Interactive Mobile Display and | 34 | * FIMD stands for Fully Interactive Mobile Display and |
35 | * as a display controller, it transfers contents drawn on memory | 35 | * as a display controller, it transfers contents drawn on memory |
36 | * to a LCD Panel through Display Interfaces such as RGB or | 36 | * to a LCD Panel through Display Interfaces such as RGB or |
37 | * CPU Interface. | 37 | * CPU Interface. |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1a25f9eaca59..35542eaabe89 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -85,6 +85,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) | |||
85 | drm_i915_private_t *dev_priv = dev->dev_private; | 85 | drm_i915_private_t *dev_priv = dev->dev_private; |
86 | struct drm_i915_master_private *master_priv; | 86 | struct drm_i915_master_private *master_priv; |
87 | 87 | ||
88 | /* | ||
89 | * The dri breadcrumb update races against the drm master disappearing. | ||
90 | * Instead of trying to fix this (this is by far not the only ums issue) | ||
91 | * just don't do the update in kms mode. | ||
92 | */ | ||
93 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
94 | return; | ||
95 | |||
88 | if (dev->primary->master) { | 96 | if (dev->primary->master) { |
89 | master_priv = dev->primary->master->driver_priv; | 97 | master_priv = dev->primary->master->driver_priv; |
90 | if (master_priv->sarea_priv) | 98 | if (master_priv->sarea_priv) |
@@ -1492,16 +1500,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1492 | spin_lock_init(&dev_priv->uncore.lock); | 1500 | spin_lock_init(&dev_priv->uncore.lock); |
1493 | spin_lock_init(&dev_priv->mm.object_stat_lock); | 1501 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1494 | mutex_init(&dev_priv->dpio_lock); | 1502 | mutex_init(&dev_priv->dpio_lock); |
1495 | mutex_init(&dev_priv->rps.hw_lock); | ||
1496 | mutex_init(&dev_priv->modeset_restore_lock); | 1503 | mutex_init(&dev_priv->modeset_restore_lock); |
1497 | 1504 | ||
1498 | mutex_init(&dev_priv->pc8.lock); | 1505 | intel_pm_setup(dev); |
1499 | dev_priv->pc8.requirements_met = false; | ||
1500 | dev_priv->pc8.gpu_idle = false; | ||
1501 | dev_priv->pc8.irqs_disabled = false; | ||
1502 | dev_priv->pc8.enabled = false; | ||
1503 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ | ||
1504 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); | ||
1505 | 1506 | ||
1506 | intel_display_crc_init(dev); | 1507 | intel_display_crc_init(dev); |
1507 | 1508 | ||
@@ -1605,7 +1606,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1605 | } | 1606 | } |
1606 | 1607 | ||
1607 | intel_irq_init(dev); | 1608 | intel_irq_init(dev); |
1608 | intel_pm_init(dev); | ||
1609 | intel_uncore_sanitize(dev); | 1609 | intel_uncore_sanitize(dev); |
1610 | 1610 | ||
1611 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1611 | /* Try to make sure MCHBAR is enabled before poking at it */ |
@@ -1851,8 +1851,10 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
1851 | 1851 | ||
1852 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | 1852 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1853 | { | 1853 | { |
1854 | mutex_lock(&dev->struct_mutex); | ||
1854 | i915_gem_context_close(dev, file_priv); | 1855 | i915_gem_context_close(dev, file_priv); |
1855 | i915_gem_release(dev, file_priv); | 1856 | i915_gem_release(dev, file_priv); |
1857 | mutex_unlock(&dev->struct_mutex); | ||
1856 | } | 1858 | } |
1857 | 1859 | ||
1858 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | 1860 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 31ffe39d2b79..bb27f0dde03d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -540,8 +540,10 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
540 | * Disable CRTCs directly since we want to preserve sw state | 540 | * Disable CRTCs directly since we want to preserve sw state |
541 | * for _thaw. | 541 | * for _thaw. |
542 | */ | 542 | */ |
543 | mutex_lock(&dev->mode_config.mutex); | ||
543 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 544 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
544 | dev_priv->display.crtc_disable(crtc); | 545 | dev_priv->display.crtc_disable(crtc); |
546 | mutex_unlock(&dev->mode_config.mutex); | ||
545 | 547 | ||
546 | intel_modeset_suspend_hw(dev); | 548 | intel_modeset_suspend_hw(dev); |
547 | } | 549 | } |
@@ -655,6 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | |||
655 | intel_modeset_init_hw(dev); | 657 | intel_modeset_init_hw(dev); |
656 | 658 | ||
657 | drm_modeset_lock_all(dev); | 659 | drm_modeset_lock_all(dev); |
660 | drm_mode_config_reset(dev); | ||
658 | intel_modeset_setup_hw_state(dev, true); | 661 | intel_modeset_setup_hw_state(dev, true); |
659 | drm_modeset_unlock_all(dev); | 662 | drm_modeset_unlock_all(dev); |
660 | 663 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cf7922bdf87c..ff6f870d6621 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1937,9 +1937,7 @@ void i915_queue_hangcheck(struct drm_device *dev); | |||
1937 | void i915_handle_error(struct drm_device *dev, bool wedged); | 1937 | void i915_handle_error(struct drm_device *dev, bool wedged); |
1938 | 1938 | ||
1939 | extern void intel_irq_init(struct drm_device *dev); | 1939 | extern void intel_irq_init(struct drm_device *dev); |
1940 | extern void intel_pm_init(struct drm_device *dev); | ||
1941 | extern void intel_hpd_init(struct drm_device *dev); | 1940 | extern void intel_hpd_init(struct drm_device *dev); |
1942 | extern void intel_pm_init(struct drm_device *dev); | ||
1943 | 1941 | ||
1944 | extern void intel_uncore_sanitize(struct drm_device *dev); | 1942 | extern void intel_uncore_sanitize(struct drm_device *dev); |
1945 | extern void intel_uncore_early_sanitize(struct drm_device *dev); | 1943 | extern void intel_uncore_early_sanitize(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c5a99c46ca9c..32636a470367 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2370,15 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) | |||
2370 | kfree(request); | 2370 | kfree(request); |
2371 | } | 2371 | } |
2372 | 2372 | ||
2373 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | 2373 | static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, |
2374 | struct intel_ring_buffer *ring) | 2374 | struct intel_ring_buffer *ring) |
2375 | { | 2375 | { |
2376 | u32 completed_seqno; | 2376 | u32 completed_seqno = ring->get_seqno(ring, false); |
2377 | u32 acthd; | 2377 | u32 acthd = intel_ring_get_active_head(ring); |
2378 | struct drm_i915_gem_request *request; | ||
2379 | |||
2380 | list_for_each_entry(request, &ring->request_list, list) { | ||
2381 | if (i915_seqno_passed(completed_seqno, request->seqno)) | ||
2382 | continue; | ||
2378 | 2383 | ||
2379 | acthd = intel_ring_get_active_head(ring); | 2384 | i915_set_reset_status(ring, request, acthd); |
2380 | completed_seqno = ring->get_seqno(ring, false); | 2385 | } |
2386 | } | ||
2381 | 2387 | ||
2388 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | ||
2389 | struct intel_ring_buffer *ring) | ||
2390 | { | ||
2382 | while (!list_empty(&ring->request_list)) { | 2391 | while (!list_empty(&ring->request_list)) { |
2383 | struct drm_i915_gem_request *request; | 2392 | struct drm_i915_gem_request *request; |
2384 | 2393 | ||
@@ -2386,9 +2395,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
2386 | struct drm_i915_gem_request, | 2395 | struct drm_i915_gem_request, |
2387 | list); | 2396 | list); |
2388 | 2397 | ||
2389 | if (request->seqno > completed_seqno) | ||
2390 | i915_set_reset_status(ring, request, acthd); | ||
2391 | |||
2392 | i915_gem_free_request(request); | 2398 | i915_gem_free_request(request); |
2393 | } | 2399 | } |
2394 | 2400 | ||
@@ -2430,8 +2436,16 @@ void i915_gem_reset(struct drm_device *dev) | |||
2430 | struct intel_ring_buffer *ring; | 2436 | struct intel_ring_buffer *ring; |
2431 | int i; | 2437 | int i; |
2432 | 2438 | ||
2439 | /* | ||
2440 | * Before we free the objects from the requests, we need to inspect | ||
2441 | * them for finding the guilty party. As the requests only borrow | ||
2442 | * their reference to the objects, the inspection must be done first. | ||
2443 | */ | ||
2444 | for_each_ring(ring, dev_priv, i) | ||
2445 | i915_gem_reset_ring_status(dev_priv, ring); | ||
2446 | |||
2433 | for_each_ring(ring, dev_priv, i) | 2447 | for_each_ring(ring, dev_priv, i) |
2434 | i915_gem_reset_ring_lists(dev_priv, ring); | 2448 | i915_gem_reset_ring_cleanup(dev_priv, ring); |
2435 | 2449 | ||
2436 | i915_gem_cleanup_ringbuffer(dev); | 2450 | i915_gem_cleanup_ringbuffer(dev); |
2437 | 2451 | ||
@@ -4477,10 +4491,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4477 | if (dev_priv->ellc_size) | 4491 | if (dev_priv->ellc_size) |
4478 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4492 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4479 | 4493 | ||
4480 | if (IS_HSW_GT3(dev)) | 4494 | if (IS_HASWELL(dev)) |
4481 | I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); | 4495 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? |
4482 | else | 4496 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
4483 | I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED); | ||
4484 | 4497 | ||
4485 | if (HAS_PCH_NOP(dev)) { | 4498 | if (HAS_PCH_NOP(dev)) { |
4486 | u32 temp = I915_READ(GEN7_MSG_CTL); | 4499 | u32 temp = I915_READ(GEN7_MSG_CTL); |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 41877045a1a0..e08acaba5402 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -345,10 +345,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) | |||
345 | { | 345 | { |
346 | struct drm_i915_file_private *file_priv = file->driver_priv; | 346 | struct drm_i915_file_private *file_priv = file->driver_priv; |
347 | 347 | ||
348 | mutex_lock(&dev->struct_mutex); | ||
349 | idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); | 348 | idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); |
350 | idr_destroy(&file_priv->context_idr); | 349 | idr_destroy(&file_priv->context_idr); |
351 | mutex_unlock(&dev->struct_mutex); | ||
352 | } | 350 | } |
353 | 351 | ||
354 | static struct i915_hw_context * | 352 | static struct i915_hw_context * |
@@ -421,11 +419,21 @@ static int do_switch(struct i915_hw_context *to) | |||
421 | if (ret) | 419 | if (ret) |
422 | return ret; | 420 | return ret; |
423 | 421 | ||
424 | /* Clear this page out of any CPU caches for coherent swap-in/out. Note | 422 | /* |
423 | * Pin can switch back to the default context if we end up calling into | ||
424 | * evict_everything - as a last ditch gtt defrag effort that also | ||
425 | * switches to the default context. Hence we need to reload from here. | ||
426 | */ | ||
427 | from = ring->last_context; | ||
428 | |||
429 | /* | ||
430 | * Clear this page out of any CPU caches for coherent swap-in/out. Note | ||
425 | * that thanks to write = false in this call and us not setting any gpu | 431 | * that thanks to write = false in this call and us not setting any gpu |
426 | * write domains when putting a context object onto the active list | 432 | * write domains when putting a context object onto the active list |
427 | * (when switching away from it), this won't block. | 433 | * (when switching away from it), this won't block. |
428 | * XXX: We need a real interface to do this instead of trickery. */ | 434 | * |
435 | * XXX: We need a real interface to do this instead of trickery. | ||
436 | */ | ||
429 | ret = i915_gem_object_set_to_gtt_domain(to->obj, false); | 437 | ret = i915_gem_object_set_to_gtt_domain(to->obj, false); |
430 | if (ret) { | 438 | if (ret) { |
431 | i915_gem_object_unpin(to->obj); | 439 | i915_gem_object_unpin(to->obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 7d5752fda5f1..9bb533e0d762 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) | |||
125 | 125 | ||
126 | ret = i915_gem_object_get_pages(obj); | 126 | ret = i915_gem_object_get_pages(obj); |
127 | if (ret) | 127 | if (ret) |
128 | goto error; | 128 | goto err; |
129 | |||
130 | i915_gem_object_pin_pages(obj); | ||
129 | 131 | ||
130 | ret = -ENOMEM; | 132 | ret = -ENOMEM; |
131 | 133 | ||
132 | pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); | 134 | pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); |
133 | if (pages == NULL) | 135 | if (pages == NULL) |
134 | goto error; | 136 | goto err_unpin; |
135 | 137 | ||
136 | i = 0; | 138 | i = 0; |
137 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) | 139 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) |
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) | |||
141 | drm_free_large(pages); | 143 | drm_free_large(pages); |
142 | 144 | ||
143 | if (!obj->dma_buf_vmapping) | 145 | if (!obj->dma_buf_vmapping) |
144 | goto error; | 146 | goto err_unpin; |
145 | 147 | ||
146 | obj->vmapping_count = 1; | 148 | obj->vmapping_count = 1; |
147 | i915_gem_object_pin_pages(obj); | ||
148 | out_unlock: | 149 | out_unlock: |
149 | mutex_unlock(&dev->struct_mutex); | 150 | mutex_unlock(&dev->struct_mutex); |
150 | return obj->dma_buf_vmapping; | 151 | return obj->dma_buf_vmapping; |
151 | 152 | ||
152 | error: | 153 | err_unpin: |
154 | i915_gem_object_unpin_pages(obj); | ||
155 | err: | ||
153 | mutex_unlock(&dev->struct_mutex); | 156 | mutex_unlock(&dev->struct_mutex); |
154 | return ERR_PTR(ret); | 157 | return ERR_PTR(ret); |
155 | } | 158 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index b7376533633d..8f3adc7d0dc8 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
88 | } else | 88 | } else |
89 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | 89 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
90 | 90 | ||
91 | search_again: | ||
91 | /* First see if there is a large enough contiguous idle region... */ | 92 | /* First see if there is a large enough contiguous idle region... */ |
92 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { | 93 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { |
93 | if (mark_free(vma, &unwind_list)) | 94 | if (mark_free(vma, &unwind_list)) |
@@ -115,10 +116,17 @@ none: | |||
115 | list_del_init(&vma->exec_list); | 116 | list_del_init(&vma->exec_list); |
116 | } | 117 | } |
117 | 118 | ||
118 | /* We expect the caller to unpin, evict all and try again, or give up. | 119 | /* Can we unpin some objects such as idle hw contents, |
119 | * So calling i915_gem_evict_vm() is unnecessary. | 120 | * or pending flips? |
120 | */ | 121 | */ |
121 | return -ENOSPC; | 122 | ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev); |
123 | if (ret) | ||
124 | return ret; | ||
125 | |||
126 | /* Only idle the GPU and repeat the search once */ | ||
127 | i915_gem_retire_requests(dev); | ||
128 | nonblocking = true; | ||
129 | goto search_again; | ||
122 | 130 | ||
123 | found: | 131 | found: |
124 | /* drm_mm doesn't allow any other other operations while | 132 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bceddf5a04bc..8d795626a25e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include <linux/dma_remapping.h> | 34 | #include <linux/dma_remapping.h> |
35 | 35 | ||
36 | #define __EXEC_OBJECT_HAS_PIN (1<<31) | ||
37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | ||
38 | |||
36 | struct eb_vmas { | 39 | struct eb_vmas { |
37 | struct list_head vmas; | 40 | struct list_head vmas; |
38 | int and; | 41 | int and; |
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
90 | { | 93 | { |
91 | struct drm_i915_gem_object *obj; | 94 | struct drm_i915_gem_object *obj; |
92 | struct list_head objects; | 95 | struct list_head objects; |
93 | int i, ret = 0; | 96 | int i, ret; |
94 | 97 | ||
95 | INIT_LIST_HEAD(&objects); | 98 | INIT_LIST_HEAD(&objects); |
96 | spin_lock(&file->table_lock); | 99 | spin_lock(&file->table_lock); |
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
103 | DRM_DEBUG("Invalid object handle %d at index %d\n", | 106 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
104 | exec[i].handle, i); | 107 | exec[i].handle, i); |
105 | ret = -ENOENT; | 108 | ret = -ENOENT; |
106 | goto out; | 109 | goto err; |
107 | } | 110 | } |
108 | 111 | ||
109 | if (!list_empty(&obj->obj_exec_link)) { | 112 | if (!list_empty(&obj->obj_exec_link)) { |
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
111 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", | 114 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
112 | obj, exec[i].handle, i); | 115 | obj, exec[i].handle, i); |
113 | ret = -EINVAL; | 116 | ret = -EINVAL; |
114 | goto out; | 117 | goto err; |
115 | } | 118 | } |
116 | 119 | ||
117 | drm_gem_object_reference(&obj->base); | 120 | drm_gem_object_reference(&obj->base); |
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
120 | spin_unlock(&file->table_lock); | 123 | spin_unlock(&file->table_lock); |
121 | 124 | ||
122 | i = 0; | 125 | i = 0; |
123 | list_for_each_entry(obj, &objects, obj_exec_link) { | 126 | while (!list_empty(&objects)) { |
124 | struct i915_vma *vma; | 127 | struct i915_vma *vma; |
125 | 128 | ||
129 | obj = list_first_entry(&objects, | ||
130 | struct drm_i915_gem_object, | ||
131 | obj_exec_link); | ||
132 | |||
126 | /* | 133 | /* |
127 | * NOTE: We can leak any vmas created here when something fails | 134 | * NOTE: We can leak any vmas created here when something fails |
128 | * later on. But that's no issue since vma_unbind can deal with | 135 | * later on. But that's no issue since vma_unbind can deal with |
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
135 | if (IS_ERR(vma)) { | 142 | if (IS_ERR(vma)) { |
136 | DRM_DEBUG("Failed to lookup VMA\n"); | 143 | DRM_DEBUG("Failed to lookup VMA\n"); |
137 | ret = PTR_ERR(vma); | 144 | ret = PTR_ERR(vma); |
138 | goto out; | 145 | goto err; |
139 | } | 146 | } |
140 | 147 | ||
148 | /* Transfer ownership from the objects list to the vmas list. */ | ||
141 | list_add_tail(&vma->exec_list, &eb->vmas); | 149 | list_add_tail(&vma->exec_list, &eb->vmas); |
150 | list_del_init(&obj->obj_exec_link); | ||
142 | 151 | ||
143 | vma->exec_entry = &exec[i]; | 152 | vma->exec_entry = &exec[i]; |
144 | if (eb->and < 0) { | 153 | if (eb->and < 0) { |
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
152 | ++i; | 161 | ++i; |
153 | } | 162 | } |
154 | 163 | ||
164 | return 0; | ||
155 | 165 | ||
156 | out: | 166 | |
167 | err: | ||
157 | while (!list_empty(&objects)) { | 168 | while (!list_empty(&objects)) { |
158 | obj = list_first_entry(&objects, | 169 | obj = list_first_entry(&objects, |
159 | struct drm_i915_gem_object, | 170 | struct drm_i915_gem_object, |
160 | obj_exec_link); | 171 | obj_exec_link); |
161 | list_del_init(&obj->obj_exec_link); | 172 | list_del_init(&obj->obj_exec_link); |
162 | if (ret) | 173 | drm_gem_object_unreference(&obj->base); |
163 | drm_gem_object_unreference(&obj->base); | ||
164 | } | 174 | } |
175 | /* | ||
176 | * Objects already transfered to the vmas list will be unreferenced by | ||
177 | * eb_destroy. | ||
178 | */ | ||
179 | |||
165 | return ret; | 180 | return ret; |
166 | } | 181 | } |
167 | 182 | ||
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) | |||
187 | } | 202 | } |
188 | } | 203 | } |
189 | 204 | ||
190 | static void eb_destroy(struct eb_vmas *eb) { | 205 | static void |
206 | i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) | ||
207 | { | ||
208 | struct drm_i915_gem_exec_object2 *entry; | ||
209 | struct drm_i915_gem_object *obj = vma->obj; | ||
210 | |||
211 | if (!drm_mm_node_allocated(&vma->node)) | ||
212 | return; | ||
213 | |||
214 | entry = vma->exec_entry; | ||
215 | |||
216 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) | ||
217 | i915_gem_object_unpin_fence(obj); | ||
218 | |||
219 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) | ||
220 | i915_gem_object_unpin(obj); | ||
221 | |||
222 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); | ||
223 | } | ||
224 | |||
225 | static void eb_destroy(struct eb_vmas *eb) | ||
226 | { | ||
191 | while (!list_empty(&eb->vmas)) { | 227 | while (!list_empty(&eb->vmas)) { |
192 | struct i915_vma *vma; | 228 | struct i915_vma *vma; |
193 | 229 | ||
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) { | |||
195 | struct i915_vma, | 231 | struct i915_vma, |
196 | exec_list); | 232 | exec_list); |
197 | list_del_init(&vma->exec_list); | 233 | list_del_init(&vma->exec_list); |
234 | i915_gem_execbuffer_unreserve_vma(vma); | ||
198 | drm_gem_object_unreference(&vma->obj->base); | 235 | drm_gem_object_unreference(&vma->obj->base); |
199 | } | 236 | } |
200 | kfree(eb); | 237 | kfree(eb); |
@@ -477,9 +514,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb) | |||
477 | return ret; | 514 | return ret; |
478 | } | 515 | } |
479 | 516 | ||
480 | #define __EXEC_OBJECT_HAS_PIN (1<<31) | ||
481 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | ||
482 | |||
483 | static int | 517 | static int |
484 | need_reloc_mappable(struct i915_vma *vma) | 518 | need_reloc_mappable(struct i915_vma *vma) |
485 | { | 519 | { |
@@ -551,26 +585,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
551 | return 0; | 585 | return 0; |
552 | } | 586 | } |
553 | 587 | ||
554 | static void | ||
555 | i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) | ||
556 | { | ||
557 | struct drm_i915_gem_exec_object2 *entry; | ||
558 | struct drm_i915_gem_object *obj = vma->obj; | ||
559 | |||
560 | if (!drm_mm_node_allocated(&vma->node)) | ||
561 | return; | ||
562 | |||
563 | entry = vma->exec_entry; | ||
564 | |||
565 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) | ||
566 | i915_gem_object_unpin_fence(obj); | ||
567 | |||
568 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) | ||
569 | i915_gem_object_unpin(obj); | ||
570 | |||
571 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); | ||
572 | } | ||
573 | |||
574 | static int | 588 | static int |
575 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | 589 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
576 | struct list_head *vmas, | 590 | struct list_head *vmas, |
@@ -669,13 +683,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
669 | goto err; | 683 | goto err; |
670 | } | 684 | } |
671 | 685 | ||
672 | err: /* Decrement pin count for bound objects */ | 686 | err: |
673 | list_for_each_entry(vma, vmas, exec_list) | ||
674 | i915_gem_execbuffer_unreserve_vma(vma); | ||
675 | |||
676 | if (ret != -ENOSPC || retry++) | 687 | if (ret != -ENOSPC || retry++) |
677 | return ret; | 688 | return ret; |
678 | 689 | ||
690 | /* Decrement pin count for bound objects */ | ||
691 | list_for_each_entry(vma, vmas, exec_list) | ||
692 | i915_gem_execbuffer_unreserve_vma(vma); | ||
693 | |||
679 | ret = i915_gem_evict_vm(vm, true); | 694 | ret = i915_gem_evict_vm(vm, true); |
680 | if (ret) | 695 | if (ret) |
681 | return ret; | 696 | return ret; |
@@ -707,6 +722,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
707 | while (!list_empty(&eb->vmas)) { | 722 | while (!list_empty(&eb->vmas)) { |
708 | vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); | 723 | vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); |
709 | list_del_init(&vma->exec_list); | 724 | list_del_init(&vma->exec_list); |
725 | i915_gem_execbuffer_unreserve_vma(vma); | ||
710 | drm_gem_object_unreference(&vma->obj->base); | 726 | drm_gem_object_unreference(&vma->obj->base); |
711 | } | 727 | } |
712 | 728 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8c7ebfa3bd56..6c3a6e60aeac 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; | |||
57 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) | 57 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
58 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) | 58 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
59 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) | 59 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
60 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) | ||
60 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) | 61 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
62 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) | ||
61 | 63 | ||
62 | #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) | 64 | #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) |
63 | #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) | 65 | #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) |
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, | |||
185 | case I915_CACHE_NONE: | 187 | case I915_CACHE_NONE: |
186 | break; | 188 | break; |
187 | case I915_CACHE_WT: | 189 | case I915_CACHE_WT: |
188 | pte |= HSW_WT_ELLC_LLC_AGE0; | 190 | pte |= HSW_WT_ELLC_LLC_AGE3; |
189 | break; | 191 | break; |
190 | default: | 192 | default: |
191 | pte |= HSW_WB_ELLC_LLC_AGE0; | 193 | pte |= HSW_WB_ELLC_LLC_AGE3; |
192 | break; | 194 | break; |
193 | } | 195 | } |
194 | 196 | ||
@@ -918,14 +920,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | |||
918 | WARN_ON(readq(>t_entries[i-1]) | 920 | WARN_ON(readq(>t_entries[i-1]) |
919 | != gen8_pte_encode(addr, level, true)); | 921 | != gen8_pte_encode(addr, level, true)); |
920 | 922 | ||
921 | #if 0 /* TODO: Still needed on GEN8? */ | ||
922 | /* This next bit makes the above posting read even more important. We | 923 | /* This next bit makes the above posting read even more important. We |
923 | * want to flush the TLBs only after we're certain all the PTE updates | 924 | * want to flush the TLBs only after we're certain all the PTE updates |
924 | * have finished. | 925 | * have finished. |
925 | */ | 926 | */ |
926 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 927 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
927 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 928 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
928 | #endif | ||
929 | } | 929 | } |
930 | 930 | ||
931 | /* | 931 | /* |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d64da4fe36e5..6d11e253218a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2715,6 +2715,8 @@ static void gen8_irq_preinstall(struct drm_device *dev) | |||
2715 | #undef GEN8_IRQ_INIT_NDX | 2715 | #undef GEN8_IRQ_INIT_NDX |
2716 | 2716 | ||
2717 | POSTING_READ(GEN8_PCU_IIR); | 2717 | POSTING_READ(GEN8_PCU_IIR); |
2718 | |||
2719 | ibx_irq_preinstall(dev); | ||
2718 | } | 2720 | } |
2719 | 2721 | ||
2720 | static void ibx_hpd_irq_setup(struct drm_device *dev) | 2722 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index cec06a5453cc..74749c6f897e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
173 | ddi_translations = ddi_translations_dp; | 173 | ddi_translations = ddi_translations_dp; |
174 | break; | 174 | break; |
175 | case PORT_D: | 175 | case PORT_D: |
176 | if (intel_dpd_is_edp(dev)) | 176 | if (intel_dp_is_edp(dev, PORT_D)) |
177 | ddi_translations = ddi_translations_edp; | 177 | ddi_translations = ddi_translations_edp; |
178 | else | 178 | else |
179 | ddi_translations = ddi_translations_dp; | 179 | ddi_translations = ddi_translations_dp; |
@@ -1136,12 +1136,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev) | |||
1136 | enum pipe pipe; | 1136 | enum pipe pipe; |
1137 | struct intel_crtc *intel_crtc; | 1137 | struct intel_crtc *intel_crtc; |
1138 | 1138 | ||
1139 | dev_priv->ddi_plls.spll_refcount = 0; | ||
1140 | dev_priv->ddi_plls.wrpll1_refcount = 0; | ||
1141 | dev_priv->ddi_plls.wrpll2_refcount = 0; | ||
1142 | |||
1139 | for_each_pipe(pipe) { | 1143 | for_each_pipe(pipe) { |
1140 | intel_crtc = | 1144 | intel_crtc = |
1141 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 1145 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
1142 | 1146 | ||
1143 | if (!intel_crtc->active) | 1147 | if (!intel_crtc->active) { |
1148 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; | ||
1144 | continue; | 1149 | continue; |
1150 | } | ||
1145 | 1151 | ||
1146 | intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, | 1152 | intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, |
1147 | pipe); | 1153 | pipe); |
@@ -1235,9 +1241,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
1235 | if (wait) | 1241 | if (wait) |
1236 | intel_wait_ddi_buf_idle(dev_priv, port); | 1242 | intel_wait_ddi_buf_idle(dev_priv, port); |
1237 | 1243 | ||
1238 | if (type == INTEL_OUTPUT_EDP) { | 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
1239 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1240 | 1246 | ||
1247 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | ||
1241 | ironlake_edp_panel_off(intel_dp); | 1248 | ironlake_edp_panel_off(intel_dp); |
1242 | } | 1249 | } |
1243 | 1250 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9db009c55c88..e77d4b8856a7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -6027,7 +6027,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc) | |||
6027 | uint16_t postoff = 0; | 6027 | uint16_t postoff = 0; |
6028 | 6028 | ||
6029 | if (intel_crtc->config.limited_color_range) | 6029 | if (intel_crtc->config.limited_color_range) |
6030 | postoff = (16 * (1 << 13) / 255) & 0x1fff; | 6030 | postoff = (16 * (1 << 12) / 255) & 0x1fff; |
6031 | 6031 | ||
6032 | I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); | 6032 | I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); |
6033 | I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); | 6033 | I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); |
@@ -6614,7 +6614,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
6614 | 6614 | ||
6615 | /* Make sure we're not on PC8 state before disabling PC8, otherwise | 6615 | /* Make sure we're not on PC8 state before disabling PC8, otherwise |
6616 | * we'll hang the machine! */ | 6616 | * we'll hang the machine! */ |
6617 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); | 6617 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
6618 | 6618 | ||
6619 | if (val & LCPLL_POWER_DOWN_ALLOW) { | 6619 | if (val & LCPLL_POWER_DOWN_ALLOW) { |
6620 | val &= ~LCPLL_POWER_DOWN_ALLOW; | 6620 | val &= ~LCPLL_POWER_DOWN_ALLOW; |
@@ -6648,7 +6648,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
6648 | DRM_ERROR("Switching back to LCPLL failed\n"); | 6648 | DRM_ERROR("Switching back to LCPLL failed\n"); |
6649 | } | 6649 | } |
6650 | 6650 | ||
6651 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); | 6651 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
6652 | } | 6652 | } |
6653 | 6653 | ||
6654 | void hsw_enable_pc8_work(struct work_struct *__work) | 6654 | void hsw_enable_pc8_work(struct work_struct *__work) |
@@ -8581,7 +8581,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
8581 | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | | 8581 | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | |
8582 | DERRMR_PIPEB_PRI_FLIP_DONE | | 8582 | DERRMR_PIPEB_PRI_FLIP_DONE | |
8583 | DERRMR_PIPEC_PRI_FLIP_DONE)); | 8583 | DERRMR_PIPEC_PRI_FLIP_DONE)); |
8584 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); | 8584 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | |
8585 | MI_SRM_LRM_GLOBAL_GTT); | ||
8585 | intel_ring_emit(ring, DERRMR); | 8586 | intel_ring_emit(ring, DERRMR); |
8586 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | 8587 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); |
8587 | } | 8588 | } |
@@ -9363,7 +9364,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
9363 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) | 9364 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) |
9364 | PIPE_CONF_CHECK_I(pipe_bpp); | 9365 | PIPE_CONF_CHECK_I(pipe_bpp); |
9365 | 9366 | ||
9366 | if (!IS_HASWELL(dev)) { | 9367 | if (!HAS_DDI(dev)) { |
9367 | PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); | 9368 | PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); |
9368 | PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); | 9369 | PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); |
9369 | } | 9370 | } |
@@ -10330,7 +10331,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
10330 | intel_ddi_init(dev, PORT_D); | 10331 | intel_ddi_init(dev, PORT_D); |
10331 | } else if (HAS_PCH_SPLIT(dev)) { | 10332 | } else if (HAS_PCH_SPLIT(dev)) { |
10332 | int found; | 10333 | int found; |
10333 | dpd_is_edp = intel_dpd_is_edp(dev); | 10334 | dpd_is_edp = intel_dp_is_edp(dev, PORT_D); |
10334 | 10335 | ||
10335 | if (has_edp_a(dev)) | 10336 | if (has_edp_a(dev)) |
10336 | intel_dp_init(dev, DP_A, PORT_A); | 10337 | intel_dp_init(dev, DP_A, PORT_A); |
@@ -10367,8 +10368,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
10367 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, | 10368 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, |
10368 | PORT_C); | 10369 | PORT_C); |
10369 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) | 10370 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) |
10370 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, | 10371 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); |
10371 | PORT_C); | ||
10372 | } | 10372 | } |
10373 | 10373 | ||
10374 | intel_dsi_init(dev); | 10374 | intel_dsi_init(dev); |
@@ -10816,11 +10816,20 @@ static struct intel_quirk intel_quirks[] = { | |||
10816 | /* Sony Vaio Y cannot use SSC on LVDS */ | 10816 | /* Sony Vaio Y cannot use SSC on LVDS */ |
10817 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | 10817 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
10818 | 10818 | ||
10819 | /* | 10819 | /* Acer Aspire 5734Z must invert backlight brightness */ |
10820 | * All GM45 Acer (and its brands eMachines and Packard Bell) laptops | 10820 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
10821 | * seem to use inverted backlight PWM. | 10821 | |
10822 | */ | 10822 | /* Acer/eMachines G725 */ |
10823 | { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, | 10823 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, |
10824 | |||
10825 | /* Acer/eMachines e725 */ | ||
10826 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, | ||
10827 | |||
10828 | /* Acer/Packard Bell NCL20 */ | ||
10829 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, | ||
10830 | |||
10831 | /* Acer Aspire 4736Z */ | ||
10832 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | ||
10824 | }; | 10833 | }; |
10825 | 10834 | ||
10826 | static void intel_init_quirks(struct drm_device *dev) | 10835 | static void intel_init_quirks(struct drm_device *dev) |
@@ -11302,8 +11311,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
11302 | } | 11311 | } |
11303 | 11312 | ||
11304 | intel_modeset_check_state(dev); | 11313 | intel_modeset_check_state(dev); |
11305 | |||
11306 | drm_mode_config_reset(dev); | ||
11307 | } | 11314 | } |
11308 | 11315 | ||
11309 | void intel_modeset_gem_init(struct drm_device *dev) | 11316 | void intel_modeset_gem_init(struct drm_device *dev) |
@@ -11312,7 +11319,10 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
11312 | 11319 | ||
11313 | intel_setup_overlay(dev); | 11320 | intel_setup_overlay(dev); |
11314 | 11321 | ||
11322 | mutex_lock(&dev->mode_config.mutex); | ||
11323 | drm_mode_config_reset(dev); | ||
11315 | intel_modeset_setup_hw_state(dev, false); | 11324 | intel_modeset_setup_hw_state(dev, false); |
11325 | mutex_unlock(&dev->mode_config.mutex); | ||
11316 | } | 11326 | } |
11317 | 11327 | ||
11318 | void intel_modeset_cleanup(struct drm_device *dev) | 11328 | void intel_modeset_cleanup(struct drm_device *dev) |
@@ -11390,14 +11400,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector, | |||
11390 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | 11400 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) |
11391 | { | 11401 | { |
11392 | struct drm_i915_private *dev_priv = dev->dev_private; | 11402 | struct drm_i915_private *dev_priv = dev->dev_private; |
11403 | unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; | ||
11393 | u16 gmch_ctrl; | 11404 | u16 gmch_ctrl; |
11394 | 11405 | ||
11395 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); | 11406 | pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); |
11396 | if (state) | 11407 | if (state) |
11397 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; | 11408 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; |
11398 | else | 11409 | else |
11399 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; | 11410 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; |
11400 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 11411 | pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); |
11401 | return 0; | 11412 | return 0; |
11402 | } | 11413 | } |
11403 | 11414 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8f17f8fbd0b1..9b40113f4fa1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -3324,11 +3324,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) | |||
3324 | } | 3324 | } |
3325 | 3325 | ||
3326 | /* check the VBT to see whether the eDP is on DP-D port */ | 3326 | /* check the VBT to see whether the eDP is on DP-D port */ |
3327 | bool intel_dpd_is_edp(struct drm_device *dev) | 3327 | bool intel_dp_is_edp(struct drm_device *dev, enum port port) |
3328 | { | 3328 | { |
3329 | struct drm_i915_private *dev_priv = dev->dev_private; | 3329 | struct drm_i915_private *dev_priv = dev->dev_private; |
3330 | union child_device_config *p_child; | 3330 | union child_device_config *p_child; |
3331 | int i; | 3331 | int i; |
3332 | static const short port_mapping[] = { | ||
3333 | [PORT_B] = PORT_IDPB, | ||
3334 | [PORT_C] = PORT_IDPC, | ||
3335 | [PORT_D] = PORT_IDPD, | ||
3336 | }; | ||
3337 | |||
3338 | if (port == PORT_A) | ||
3339 | return true; | ||
3332 | 3340 | ||
3333 | if (!dev_priv->vbt.child_dev_num) | 3341 | if (!dev_priv->vbt.child_dev_num) |
3334 | return false; | 3342 | return false; |
@@ -3336,7 +3344,7 @@ bool intel_dpd_is_edp(struct drm_device *dev) | |||
3336 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | 3344 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { |
3337 | p_child = dev_priv->vbt.child_dev + i; | 3345 | p_child = dev_priv->vbt.child_dev + i; |
3338 | 3346 | ||
3339 | if (p_child->common.dvo_port == PORT_IDPD && | 3347 | if (p_child->common.dvo_port == port_mapping[port] && |
3340 | (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == | 3348 | (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == |
3341 | (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) | 3349 | (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) |
3342 | return true; | 3350 | return true; |
@@ -3614,26 +3622,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3614 | intel_dp->DP = I915_READ(intel_dp->output_reg); | 3622 | intel_dp->DP = I915_READ(intel_dp->output_reg); |
3615 | intel_dp->attached_connector = intel_connector; | 3623 | intel_dp->attached_connector = intel_connector; |
3616 | 3624 | ||
3617 | type = DRM_MODE_CONNECTOR_DisplayPort; | 3625 | if (intel_dp_is_edp(dev, port)) |
3618 | /* | ||
3619 | * FIXME : We need to initialize built-in panels before external panels. | ||
3620 | * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup | ||
3621 | */ | ||
3622 | switch (port) { | ||
3623 | case PORT_A: | ||
3624 | type = DRM_MODE_CONNECTOR_eDP; | 3626 | type = DRM_MODE_CONNECTOR_eDP; |
3625 | break; | 3627 | else |
3626 | case PORT_C: | 3628 | type = DRM_MODE_CONNECTOR_DisplayPort; |
3627 | if (IS_VALLEYVIEW(dev)) | ||
3628 | type = DRM_MODE_CONNECTOR_eDP; | ||
3629 | break; | ||
3630 | case PORT_D: | ||
3631 | if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev)) | ||
3632 | type = DRM_MODE_CONNECTOR_eDP; | ||
3633 | break; | ||
3634 | default: /* silence GCC warning */ | ||
3635 | break; | ||
3636 | } | ||
3637 | 3629 | ||
3638 | /* | 3630 | /* |
3639 | * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but | 3631 | * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4cbf49051b9c..8754db9e3d52 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -722,7 +722,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder); | |||
722 | void intel_dp_check_link_status(struct intel_dp *intel_dp); | 722 | void intel_dp_check_link_status(struct intel_dp *intel_dp); |
723 | bool intel_dp_compute_config(struct intel_encoder *encoder, | 723 | bool intel_dp_compute_config(struct intel_encoder *encoder, |
724 | struct intel_crtc_config *pipe_config); | 724 | struct intel_crtc_config *pipe_config); |
725 | bool intel_dpd_is_edp(struct drm_device *dev); | 725 | bool intel_dp_is_edp(struct drm_device *dev, enum port port); |
726 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp); | 726 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp); |
727 | void ironlake_edp_backlight_off(struct intel_dp *intel_dp); | 727 | void ironlake_edp_backlight_off(struct intel_dp *intel_dp); |
728 | void ironlake_edp_panel_on(struct intel_dp *intel_dp); | 728 | void ironlake_edp_panel_on(struct intel_dp *intel_dp); |
@@ -839,6 +839,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, | |||
839 | uint32_t sprite_width, int pixel_size, | 839 | uint32_t sprite_width, int pixel_size, |
840 | bool enabled, bool scaled); | 840 | bool enabled, bool scaled); |
841 | void intel_init_pm(struct drm_device *dev); | 841 | void intel_init_pm(struct drm_device *dev); |
842 | void intel_pm_setup(struct drm_device *dev); | ||
842 | bool intel_fbc_enabled(struct drm_device *dev); | 843 | bool intel_fbc_enabled(struct drm_device *dev); |
843 | void intel_update_fbc(struct drm_device *dev); | 844 | void intel_update_fbc(struct drm_device *dev); |
844 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 845 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9998185fdb22..d77cc81900f9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -1113,7 +1113,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, | |||
1113 | 1113 | ||
1114 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; | 1114 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; |
1115 | clock = adjusted_mode->crtc_clock; | 1115 | clock = adjusted_mode->crtc_clock; |
1116 | htotal = adjusted_mode->htotal; | 1116 | htotal = adjusted_mode->crtc_htotal; |
1117 | hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; | 1117 | hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; |
1118 | pixel_size = crtc->fb->bits_per_pixel / 8; | 1118 | pixel_size = crtc->fb->bits_per_pixel / 8; |
1119 | 1119 | ||
@@ -1200,7 +1200,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, | |||
1200 | crtc = intel_get_crtc_for_plane(dev, plane); | 1200 | crtc = intel_get_crtc_for_plane(dev, plane); |
1201 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; | 1201 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; |
1202 | clock = adjusted_mode->crtc_clock; | 1202 | clock = adjusted_mode->crtc_clock; |
1203 | htotal = adjusted_mode->htotal; | 1203 | htotal = adjusted_mode->crtc_htotal; |
1204 | hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; | 1204 | hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; |
1205 | pixel_size = crtc->fb->bits_per_pixel / 8; | 1205 | pixel_size = crtc->fb->bits_per_pixel / 8; |
1206 | 1206 | ||
@@ -1431,7 +1431,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1431 | const struct drm_display_mode *adjusted_mode = | 1431 | const struct drm_display_mode *adjusted_mode = |
1432 | &to_intel_crtc(crtc)->config.adjusted_mode; | 1432 | &to_intel_crtc(crtc)->config.adjusted_mode; |
1433 | int clock = adjusted_mode->crtc_clock; | 1433 | int clock = adjusted_mode->crtc_clock; |
1434 | int htotal = adjusted_mode->htotal; | 1434 | int htotal = adjusted_mode->crtc_htotal; |
1435 | int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; | 1435 | int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; |
1436 | int pixel_size = crtc->fb->bits_per_pixel / 8; | 1436 | int pixel_size = crtc->fb->bits_per_pixel / 8; |
1437 | unsigned long line_time_us; | 1437 | unsigned long line_time_us; |
@@ -1557,7 +1557,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1557 | const struct drm_display_mode *adjusted_mode = | 1557 | const struct drm_display_mode *adjusted_mode = |
1558 | &to_intel_crtc(enabled)->config.adjusted_mode; | 1558 | &to_intel_crtc(enabled)->config.adjusted_mode; |
1559 | int clock = adjusted_mode->crtc_clock; | 1559 | int clock = adjusted_mode->crtc_clock; |
1560 | int htotal = adjusted_mode->htotal; | 1560 | int htotal = adjusted_mode->crtc_htotal; |
1561 | int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; | 1561 | int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; |
1562 | int pixel_size = enabled->fb->bits_per_pixel / 8; | 1562 | int pixel_size = enabled->fb->bits_per_pixel / 8; |
1563 | unsigned long line_time_us; | 1563 | unsigned long line_time_us; |
@@ -1985,8 +1985,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) | |||
1985 | /* The WM are computed with base on how long it takes to fill a single | 1985 | /* The WM are computed with base on how long it takes to fill a single |
1986 | * row at the given clock rate, multiplied by 8. | 1986 | * row at the given clock rate, multiplied by 8. |
1987 | * */ | 1987 | * */ |
1988 | linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); | 1988 | linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, |
1989 | ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, | 1989 | mode->crtc_clock); |
1990 | ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, | ||
1990 | intel_ddi_get_cdclk_freq(dev_priv)); | 1991 | intel_ddi_get_cdclk_freq(dev_priv)); |
1991 | 1992 | ||
1992 | return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | | 1993 | return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | |
@@ -5722,10 +5723,19 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) | |||
5722 | return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; | 5723 | return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; |
5723 | } | 5724 | } |
5724 | 5725 | ||
5725 | void intel_pm_init(struct drm_device *dev) | 5726 | void intel_pm_setup(struct drm_device *dev) |
5726 | { | 5727 | { |
5727 | struct drm_i915_private *dev_priv = dev->dev_private; | 5728 | struct drm_i915_private *dev_priv = dev->dev_private; |
5728 | 5729 | ||
5730 | mutex_init(&dev_priv->rps.hw_lock); | ||
5731 | |||
5732 | mutex_init(&dev_priv->pc8.lock); | ||
5733 | dev_priv->pc8.requirements_met = false; | ||
5734 | dev_priv->pc8.gpu_idle = false; | ||
5735 | dev_priv->pc8.irqs_disabled = false; | ||
5736 | dev_priv->pc8.enabled = false; | ||
5737 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ | ||
5738 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); | ||
5729 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 5739 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
5730 | intel_gen6_powersave_work); | 5740 | intel_gen6_powersave_work); |
5731 | } | 5741 | } |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index edcf801613e6..b3fa1ba191b7 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o | |||
59 | nouveau-y += core/subdev/clock/nv50.o | 59 | nouveau-y += core/subdev/clock/nv50.o |
60 | nouveau-y += core/subdev/clock/nv84.o | 60 | nouveau-y += core/subdev/clock/nv84.o |
61 | nouveau-y += core/subdev/clock/nva3.o | 61 | nouveau-y += core/subdev/clock/nva3.o |
62 | nouveau-y += core/subdev/clock/nvaa.o | ||
62 | nouveau-y += core/subdev/clock/nvc0.o | 63 | nouveau-y += core/subdev/clock/nvc0.o |
63 | nouveau-y += core/subdev/clock/nve0.o | 64 | nouveau-y += core/subdev/clock/nve0.o |
64 | nouveau-y += core/subdev/clock/pllnv04.o | 65 | nouveau-y += core/subdev/clock/pllnv04.o |
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index 48f06378d3f9..2ea5568b6cf5 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c | |||
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent, | |||
104 | 104 | ||
105 | if (parent) { | 105 | if (parent) { |
106 | struct nouveau_device *device = nv_device(parent); | 106 | struct nouveau_device *device = nv_device(parent); |
107 | int subidx = nv_hclass(subdev) & 0xff; | ||
108 | |||
109 | subdev->debug = nouveau_dbgopt(device->dbgopt, subname); | 107 | subdev->debug = nouveau_dbgopt(device->dbgopt, subname); |
110 | subdev->mmio = nv_subdev(device)->mmio; | 108 | subdev->mmio = nv_subdev(device)->mmio; |
111 | device->subdev[subidx] = *pobject; | ||
112 | } | 109 | } |
113 | 110 | ||
114 | return 0; | 111 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c index 9135b25a29d0..dd01c6c435d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c | |||
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
268 | if (ret) | 268 | if (ret) |
269 | return ret; | 269 | return ret; |
270 | 270 | ||
271 | device->subdev[i] = devobj->subdev[i]; | ||
272 | |||
271 | /* note: can't init *any* subdevs until devinit has been run | 273 | /* note: can't init *any* subdevs until devinit has been run |
272 | * due to not knowing exactly what the vbios init tables will | 274 | * due to not knowing exactly what the vbios init tables will |
273 | * mess with. devinit also can't be run until all of its | 275 | * mess with. devinit also can't be run until all of its |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c index db139827047c..db3fc7be856a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c | |||
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device) | |||
283 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; | 283 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; |
284 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; | 284 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; |
285 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; | 285 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; |
286 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 286 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; |
287 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 287 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
288 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 288 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
289 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 289 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; |
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device) | |||
311 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; | 311 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; |
312 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; | 312 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; |
313 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; | 313 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; |
314 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 314 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; |
315 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 315 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
316 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 316 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
317 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 317 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 8d06eef2b9ee..dbc5e33de94f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | |||
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device) | |||
161 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 161 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
162 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 162 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
163 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 163 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; |
164 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 164 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index 5f555788121c..e6352bd5b4ff 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <engine/dmaobj.h> | 33 | #include <engine/dmaobj.h> |
34 | #include <engine/fifo.h> | 34 | #include <engine/fifo.h> |
35 | 35 | ||
36 | #include "nv04.h" | ||
36 | #include "nv50.h" | 37 | #include "nv50.h" |
37 | 38 | ||
38 | /******************************************************************************* | 39 | /******************************************************************************* |
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
460 | nv_subdev(priv)->intr = nv04_fifo_intr; | 461 | nv_subdev(priv)->intr = nv04_fifo_intr; |
461 | nv_engine(priv)->cclass = &nv50_fifo_cclass; | 462 | nv_engine(priv)->cclass = &nv50_fifo_cclass; |
462 | nv_engine(priv)->sclass = nv50_fifo_sclass; | 463 | nv_engine(priv)->sclass = nv50_fifo_sclass; |
464 | priv->base.pause = nv04_fifo_pause; | ||
465 | priv->base.start = nv04_fifo_start; | ||
463 | return 0; | 466 | return 0; |
464 | } | 467 | } |
465 | 468 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 0908dc834c84..fe0f41e65d9b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <engine/dmaobj.h> | 35 | #include <engine/dmaobj.h> |
36 | #include <engine/fifo.h> | 36 | #include <engine/fifo.h> |
37 | 37 | ||
38 | #include "nv04.h" | ||
38 | #include "nv50.h" | 39 | #include "nv50.h" |
39 | 40 | ||
40 | /******************************************************************************* | 41 | /******************************************************************************* |
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
432 | nv_subdev(priv)->intr = nv04_fifo_intr; | 433 | nv_subdev(priv)->intr = nv04_fifo_intr; |
433 | nv_engine(priv)->cclass = &nv84_fifo_cclass; | 434 | nv_engine(priv)->cclass = &nv84_fifo_cclass; |
434 | nv_engine(priv)->sclass = nv84_fifo_sclass; | 435 | nv_engine(priv)->sclass = nv84_fifo_sclass; |
436 | priv->base.pause = nv04_fifo_pause; | ||
437 | priv->base.start = nv04_fifo_start; | ||
435 | return 0; | 438 | return 0; |
436 | } | 439 | } |
437 | 440 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index 434bb4b0fa2e..5c8a63dc506a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c | |||
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds) | |||
334 | while ((mthd = &mthds[i++]) && (init = mthd->init)) { | 334 | while ((mthd = &mthds[i++]) && (init = mthd->init)) { |
335 | u32 addr = 0x80000000 | mthd->oclass; | 335 | u32 addr = 0x80000000 | mthd->oclass; |
336 | for (data = 0; init->count; init++) { | 336 | for (data = 0; init->count; init++) { |
337 | if (data != init->data) { | 337 | if (init == mthd->init || data != init->data) { |
338 | nv_wr32(priv, 0x40448c, init->data); | 338 | nv_wr32(priv, 0x40448c, init->data); |
339 | data = init->data; | 339 | data = init->data; |
340 | } | 340 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c index b574dd4bb828..5ce686ee729e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c | |||
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent, | |||
176 | if (ret) | 176 | if (ret) |
177 | return ret; | 177 | return ret; |
178 | 178 | ||
179 | chan->vblank.nr_event = pdisp->vblank->index_nr; | 179 | chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0; |
180 | chan->vblank.event = kzalloc(chan->vblank.nr_event * | 180 | chan->vblank.event = kzalloc(chan->vblank.nr_event * |
181 | sizeof(*chan->vblank.event), GFP_KERNEL); | 181 | sizeof(*chan->vblank.event), GFP_KERNEL); |
182 | if (!chan->vblank.event) | 182 | if (!chan->vblank.event) |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index e2675bc0edba..8f4ced75444a 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h | |||
@@ -14,6 +14,9 @@ enum nv_clk_src { | |||
14 | nv_clk_src_hclk, | 14 | nv_clk_src_hclk, |
15 | nv_clk_src_hclkm3, | 15 | nv_clk_src_hclkm3, |
16 | nv_clk_src_hclkm3d2, | 16 | nv_clk_src_hclkm3d2, |
17 | nv_clk_src_hclkm2d3, /* NVAA */ | ||
18 | nv_clk_src_hclkm4, /* NVAA */ | ||
19 | nv_clk_src_cclk, /* NVAA */ | ||
17 | 20 | ||
18 | nv_clk_src_host, | 21 | nv_clk_src_host, |
19 | 22 | ||
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass; | |||
127 | extern struct nouveau_oclass nv40_clock_oclass; | 130 | extern struct nouveau_oclass nv40_clock_oclass; |
128 | extern struct nouveau_oclass *nv50_clock_oclass; | 131 | extern struct nouveau_oclass *nv50_clock_oclass; |
129 | extern struct nouveau_oclass *nv84_clock_oclass; | 132 | extern struct nouveau_oclass *nv84_clock_oclass; |
133 | extern struct nouveau_oclass *nvaa_clock_oclass; | ||
130 | extern struct nouveau_oclass nva3_clock_oclass; | 134 | extern struct nouveau_oclass nva3_clock_oclass; |
131 | extern struct nouveau_oclass nvc0_clock_oclass; | 135 | extern struct nouveau_oclass nvc0_clock_oclass; |
132 | extern struct nouveau_oclass nve0_clock_oclass; | 136 | extern struct nouveau_oclass nve0_clock_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h index 8541aa382ff2..d89dbdf39b0d 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h | |||
@@ -75,6 +75,11 @@ struct nouveau_fb { | |||
75 | static inline struct nouveau_fb * | 75 | static inline struct nouveau_fb * |
76 | nouveau_fb(void *obj) | 76 | nouveau_fb(void *obj) |
77 | { | 77 | { |
78 | /* fbram uses this before device subdev pointer is valid */ | ||
79 | if (nv_iclass(obj, NV_SUBDEV_CLASS) && | ||
80 | nv_subidx(obj) == NVDEV_SUBDEV_FB) | ||
81 | return obj; | ||
82 | |||
78 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; | 83 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; |
79 | } | 84 | } |
80 | 85 | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h index 9fa5da723871..7f50a858b16f 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h | |||
@@ -73,7 +73,7 @@ struct nouveau_i2c { | |||
73 | int (*identify)(struct nouveau_i2c *, int index, | 73 | int (*identify)(struct nouveau_i2c *, int index, |
74 | const char *what, struct nouveau_i2c_board_info *, | 74 | const char *what, struct nouveau_i2c_board_info *, |
75 | bool (*match)(struct nouveau_i2c_port *, | 75 | bool (*match)(struct nouveau_i2c_port *, |
76 | struct i2c_board_info *)); | 76 | struct i2c_board_info *, void *), void *); |
77 | struct list_head ports; | 77 | struct list_head ports; |
78 | }; | 78 | }; |
79 | 79 | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h index ec7a54e91a08..4aca33887aaa 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h | |||
@@ -50,6 +50,13 @@ struct nouveau_instmem { | |||
50 | static inline struct nouveau_instmem * | 50 | static inline struct nouveau_instmem * |
51 | nouveau_instmem(void *obj) | 51 | nouveau_instmem(void *obj) |
52 | { | 52 | { |
53 | /* nv04/nv40 impls need to create objects in their constructor, | ||
54 | * which is before the subdev pointer is valid | ||
55 | */ | ||
56 | if (nv_iclass(obj, NV_SUBDEV_CLASS) && | ||
57 | nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM) | ||
58 | return obj; | ||
59 | |||
53 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; | 60 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; |
54 | } | 61 | } |
55 | 62 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index 420908cb82b6..df1b1b423093 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c | |||
@@ -365,13 +365,13 @@ static u16 | |||
365 | init_script(struct nouveau_bios *bios, int index) | 365 | init_script(struct nouveau_bios *bios, int index) |
366 | { | 366 | { |
367 | struct nvbios_init init = { .bios = bios }; | 367 | struct nvbios_init init = { .bios = bios }; |
368 | u16 data; | 368 | u16 bmp_ver = bmp_version(bios), data; |
369 | 369 | ||
370 | if (bmp_version(bios) && bmp_version(bios) < 0x0510) { | 370 | if (bmp_ver && bmp_ver < 0x0510) { |
371 | if (index > 1) | 371 | if (index > 1 || bmp_ver < 0x0100) |
372 | return 0x0000; | 372 | return 0x0000; |
373 | 373 | ||
374 | data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); | 374 | data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18); |
375 | return nv_ro16(bios, data + (index * 2)); | 375 | return nv_ro16(bios, data + (index * 2)); |
376 | } | 376 | } |
377 | 377 | ||
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init) | |||
1294 | u16 offset = nv_ro16(bios, init->offset + 1); | 1294 | u16 offset = nv_ro16(bios, init->offset + 1); |
1295 | 1295 | ||
1296 | trace("JUMP\t0x%04x\n", offset); | 1296 | trace("JUMP\t0x%04x\n", offset); |
1297 | init->offset = offset; | 1297 | |
1298 | if (init_exec(init)) | ||
1299 | init->offset = offset; | ||
1300 | else | ||
1301 | init->offset += 3; | ||
1298 | } | 1302 | } |
1299 | 1303 | ||
1300 | /** | 1304 | /** |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c index da50c1b12928..30c1f3a4158e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c | |||
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1, | |||
69 | return 0; | 69 | return 0; |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct nouveau_clocks | ||
73 | nv04_domain[] = { | ||
74 | { nv_clk_src_max } | ||
75 | }; | ||
76 | |||
72 | static int | 77 | static int |
73 | nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 78 | nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
74 | struct nouveau_oclass *oclass, void *data, u32 size, | 79 | struct nouveau_oclass *oclass, void *data, u32 size, |
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
77 | struct nv04_clock_priv *priv; | 82 | struct nv04_clock_priv *priv; |
78 | int ret; | 83 | int ret; |
79 | 84 | ||
80 | ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); | 85 | ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv); |
81 | *pobject = nv_object(priv); | 86 | *pobject = nv_object(priv); |
82 | if (ret) | 87 | if (ret) |
83 | return ret; | 88 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c new file mode 100644 index 000000000000..7a723b4f564d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include <engine/fifo.h> | ||
26 | #include <subdev/bios.h> | ||
27 | #include <subdev/bios/pll.h> | ||
28 | #include <subdev/timer.h> | ||
29 | #include <subdev/clock.h> | ||
30 | |||
31 | #include "pll.h" | ||
32 | |||
33 | struct nvaa_clock_priv { | ||
34 | struct nouveau_clock base; | ||
35 | enum nv_clk_src csrc, ssrc, vsrc; | ||
36 | u32 cctrl, sctrl; | ||
37 | u32 ccoef, scoef; | ||
38 | u32 cpost, spost; | ||
39 | u32 vdiv; | ||
40 | }; | ||
41 | |||
42 | static u32 | ||
43 | read_div(struct nouveau_clock *clk) | ||
44 | { | ||
45 | return nv_rd32(clk, 0x004600); | ||
46 | } | ||
47 | |||
48 | static u32 | ||
49 | read_pll(struct nouveau_clock *clk, u32 base) | ||
50 | { | ||
51 | u32 ctrl = nv_rd32(clk, base + 0); | ||
52 | u32 coef = nv_rd32(clk, base + 4); | ||
53 | u32 ref = clk->read(clk, nv_clk_src_href); | ||
54 | u32 post_div = 0; | ||
55 | u32 clock = 0; | ||
56 | int N1, M1; | ||
57 | |||
58 | switch (base){ | ||
59 | case 0x4020: | ||
60 | post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16); | ||
61 | break; | ||
62 | case 0x4028: | ||
63 | post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16; | ||
64 | break; | ||
65 | default: | ||
66 | break; | ||
67 | } | ||
68 | |||
69 | N1 = (coef & 0x0000ff00) >> 8; | ||
70 | M1 = (coef & 0x000000ff); | ||
71 | if ((ctrl & 0x80000000) && M1) { | ||
72 | clock = ref * N1 / M1; | ||
73 | clock = clock / post_div; | ||
74 | } | ||
75 | |||
76 | return clock; | ||
77 | } | ||
78 | |||
79 | static int | ||
80 | nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src) | ||
81 | { | ||
82 | struct nvaa_clock_priv *priv = (void *)clk; | ||
83 | u32 mast = nv_rd32(clk, 0x00c054); | ||
84 | u32 P = 0; | ||
85 | |||
86 | switch (src) { | ||
87 | case nv_clk_src_crystal: | ||
88 | return nv_device(priv)->crystal; | ||
89 | case nv_clk_src_href: | ||
90 | return 100000; /* PCIE reference clock */ | ||
91 | case nv_clk_src_hclkm4: | ||
92 | return clk->read(clk, nv_clk_src_href) * 4; | ||
93 | case nv_clk_src_hclkm2d3: | ||
94 | return clk->read(clk, nv_clk_src_href) * 2 / 3; | ||
95 | case nv_clk_src_host: | ||
96 | switch (mast & 0x000c0000) { | ||
97 | case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3); | ||
98 | case 0x00040000: break; | ||
99 | case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4); | ||
100 | case 0x000c0000: return clk->read(clk, nv_clk_src_cclk); | ||
101 | } | ||
102 | break; | ||
103 | case nv_clk_src_core: | ||
104 | P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16; | ||
105 | |||
106 | switch (mast & 0x00000003) { | ||
107 | case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P; | ||
108 | case 0x00000001: return 0; | ||
109 | case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P; | ||
110 | case 0x00000003: return read_pll(clk, 0x004028) >> P; | ||
111 | } | ||
112 | break; | ||
113 | case nv_clk_src_cclk: | ||
114 | if ((mast & 0x03000000) != 0x03000000) | ||
115 | return clk->read(clk, nv_clk_src_core); | ||
116 | |||
117 | if ((mast & 0x00000200) == 0x00000000) | ||
118 | return clk->read(clk, nv_clk_src_core); | ||
119 | |||
120 | switch (mast & 0x00000c00) { | ||
121 | case 0x00000000: return clk->read(clk, nv_clk_src_href); | ||
122 | case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4); | ||
123 | case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3); | ||
124 | default: return 0; | ||
125 | } | ||
126 | case nv_clk_src_shader: | ||
127 | P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16; | ||
128 | switch (mast & 0x00000030) { | ||
129 | case 0x00000000: | ||
130 | if (mast & 0x00000040) | ||
131 | return clk->read(clk, nv_clk_src_href) >> P; | ||
132 | return clk->read(clk, nv_clk_src_crystal) >> P; | ||
133 | case 0x00000010: break; | ||
134 | case 0x00000020: return read_pll(clk, 0x004028) >> P; | ||
135 | case 0x00000030: return read_pll(clk, 0x004020) >> P; | ||
136 | } | ||
137 | break; | ||
138 | case nv_clk_src_mem: | ||
139 | return 0; | ||
140 | break; | ||
141 | case nv_clk_src_vdec: | ||
142 | P = (read_div(clk) & 0x00000700) >> 8; | ||
143 | |||
144 | switch (mast & 0x00400000) { | ||
145 | case 0x00400000: | ||
146 | return clk->read(clk, nv_clk_src_core) >> P; | ||
147 | break; | ||
148 | default: | ||
149 | return 500000 >> P; | ||
150 | break; | ||
151 | } | ||
152 | break; | ||
153 | default: | ||
154 | break; | ||
155 | } | ||
156 | |||
157 | nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static u32 | ||
162 | calc_pll(struct nvaa_clock_priv *priv, u32 reg, | ||
163 | u32 clock, int *N, int *M, int *P) | ||
164 | { | ||
165 | struct nouveau_bios *bios = nouveau_bios(priv); | ||
166 | struct nvbios_pll pll; | ||
167 | struct nouveau_clock *clk = &priv->base; | ||
168 | int ret; | ||
169 | |||
170 | ret = nvbios_pll_parse(bios, reg, &pll); | ||
171 | if (ret) | ||
172 | return 0; | ||
173 | |||
174 | pll.vco2.max_freq = 0; | ||
175 | pll.refclk = clk->read(clk, nv_clk_src_href); | ||
176 | if (!pll.refclk) | ||
177 | return 0; | ||
178 | |||
179 | return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P); | ||
180 | } | ||
181 | |||
182 | static inline u32 | ||
183 | calc_P(u32 src, u32 target, int *div) | ||
184 | { | ||
185 | u32 clk0 = src, clk1 = src; | ||
186 | for (*div = 0; *div <= 7; (*div)++) { | ||
187 | if (clk0 <= target) { | ||
188 | clk1 = clk0 << (*div ? 1 : 0); | ||
189 | break; | ||
190 | } | ||
191 | clk0 >>= 1; | ||
192 | } | ||
193 | |||
194 | if (target - clk0 <= clk1 - target) | ||
195 | return clk0; | ||
196 | (*div)--; | ||
197 | return clk1; | ||
198 | } | ||
199 | |||
200 | static int | ||
201 | nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate) | ||
202 | { | ||
203 | struct nvaa_clock_priv *priv = (void *)clk; | ||
204 | const int shader = cstate->domain[nv_clk_src_shader]; | ||
205 | const int core = cstate->domain[nv_clk_src_core]; | ||
206 | const int vdec = cstate->domain[nv_clk_src_vdec]; | ||
207 | u32 out = 0, clock = 0; | ||
208 | int N, M, P1, P2 = 0; | ||
209 | int divs = 0; | ||
210 | |||
211 | /* cclk: find suitable source, disable PLL if we can */ | ||
212 | if (core < clk->read(clk, nv_clk_src_hclkm4)) | ||
213 | out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs); | ||
214 | |||
215 | /* Calculate clock * 2, so shader clock can use it too */ | ||
216 | clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1); | ||
217 | |||
218 | if (abs(core - out) <= | ||
219 | abs(core - (clock >> 1))) { | ||
220 | priv->csrc = nv_clk_src_hclkm4; | ||
221 | priv->cctrl = divs << 16; | ||
222 | } else { | ||
223 | /* NVCTRL is actually used _after_ NVPOST, and after what we | ||
224 | * call NVPLL. To make matters worse, NVPOST is an integer | ||
225 | * divider instead of a right-shift number. */ | ||
226 | if(P1 > 2) { | ||
227 | P2 = P1 - 2; | ||
228 | P1 = 2; | ||
229 | } | ||
230 | |||
231 | priv->csrc = nv_clk_src_core; | ||
232 | priv->ccoef = (N << 8) | M; | ||
233 | |||
234 | priv->cctrl = (P2 + 1) << 16; | ||
235 | priv->cpost = (1 << P1) << 16; | ||
236 | } | ||
237 | |||
238 | /* sclk: nvpll + divisor, href or spll */ | ||
239 | out = 0; | ||
240 | if (shader == clk->read(clk, nv_clk_src_href)) { | ||
241 | priv->ssrc = nv_clk_src_href; | ||
242 | } else { | ||
243 | clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1); | ||
244 | if (priv->csrc == nv_clk_src_core) { | ||
245 | out = calc_P((core << 1), shader, &divs); | ||
246 | } | ||
247 | |||
248 | if (abs(shader - out) <= | ||
249 | abs(shader - clock) && | ||
250 | (divs + P2) <= 7) { | ||
251 | priv->ssrc = nv_clk_src_core; | ||
252 | priv->sctrl = (divs + P2) << 16; | ||
253 | } else { | ||
254 | priv->ssrc = nv_clk_src_shader; | ||
255 | priv->scoef = (N << 8) | M; | ||
256 | priv->sctrl = P1 << 16; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | /* vclk */ | ||
261 | out = calc_P(core, vdec, &divs); | ||
262 | clock = calc_P(500000, vdec, &P1); | ||
263 | if(abs(vdec - out) <= | ||
264 | abs(vdec - clock)) { | ||
265 | priv->vsrc = nv_clk_src_cclk; | ||
266 | priv->vdiv = divs << 16; | ||
267 | } else { | ||
268 | priv->vsrc = nv_clk_src_vdec; | ||
269 | priv->vdiv = P1 << 16; | ||
270 | } | ||
271 | |||
272 | /* Print strategy! */ | ||
273 | nv_debug(priv, "nvpll: %08x %08x %08x\n", | ||
274 | priv->ccoef, priv->cpost, priv->cctrl); | ||
275 | nv_debug(priv, " spll: %08x %08x %08x\n", | ||
276 | priv->scoef, priv->spost, priv->sctrl); | ||
277 | nv_debug(priv, " vdiv: %08x\n", priv->vdiv); | ||
278 | if (priv->csrc == nv_clk_src_hclkm4) | ||
279 | nv_debug(priv, "core: hrefm4\n"); | ||
280 | else | ||
281 | nv_debug(priv, "core: nvpll\n"); | ||
282 | |||
283 | if (priv->ssrc == nv_clk_src_hclkm4) | ||
284 | nv_debug(priv, "shader: hrefm4\n"); | ||
285 | else if (priv->ssrc == nv_clk_src_core) | ||
286 | nv_debug(priv, "shader: nvpll\n"); | ||
287 | else | ||
288 | nv_debug(priv, "shader: spll\n"); | ||
289 | |||
290 | if (priv->vsrc == nv_clk_src_hclkm4) | ||
291 | nv_debug(priv, "vdec: 500MHz\n"); | ||
292 | else | ||
293 | nv_debug(priv, "vdec: core\n"); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int | ||
299 | nvaa_clock_prog(struct nouveau_clock *clk) | ||
300 | { | ||
301 | struct nvaa_clock_priv *priv = (void *)clk; | ||
302 | struct nouveau_fifo *pfifo = nouveau_fifo(clk); | ||
303 | unsigned long flags; | ||
304 | u32 pllmask = 0, mast, ptherm_gate; | ||
305 | int ret = -EBUSY; | ||
306 | |||
307 | /* halt and idle execution engines */ | ||
308 | ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000); | ||
309 | nv_mask(clk, 0x002504, 0x00000001, 0x00000001); | ||
310 | /* Wait until the interrupt handler is finished */ | ||
311 | if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000)) | ||
312 | goto resume; | ||
313 | |||
314 | if (pfifo) | ||
315 | pfifo->pause(pfifo, &flags); | ||
316 | |||
317 | if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010)) | ||
318 | goto resume; | ||
319 | if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f)) | ||
320 | goto resume; | ||
321 | |||
322 | /* First switch to safe clocks: href */ | ||
323 | mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640); | ||
324 | mast &= ~0x00400e73; | ||
325 | mast |= 0x03000000; | ||
326 | |||
327 | switch (priv->csrc) { | ||
328 | case nv_clk_src_hclkm4: | ||
329 | nv_mask(clk, 0x4028, 0x00070000, priv->cctrl); | ||
330 | mast |= 0x00000002; | ||
331 | break; | ||
332 | case nv_clk_src_core: | ||
333 | nv_wr32(clk, 0x402c, priv->ccoef); | ||
334 | nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl); | ||
335 | nv_wr32(clk, 0x4040, priv->cpost); | ||
336 | pllmask |= (0x3 << 8); | ||
337 | mast |= 0x00000003; | ||
338 | break; | ||
339 | default: | ||
340 | nv_warn(priv,"Reclocking failed: unknown core clock\n"); | ||
341 | goto resume; | ||
342 | } | ||
343 | |||
344 | switch (priv->ssrc) { | ||
345 | case nv_clk_src_href: | ||
346 | nv_mask(clk, 0x4020, 0x00070000, 0x00000000); | ||
347 | /* mast |= 0x00000000; */ | ||
348 | break; | ||
349 | case nv_clk_src_core: | ||
350 | nv_mask(clk, 0x4020, 0x00070000, priv->sctrl); | ||
351 | mast |= 0x00000020; | ||
352 | break; | ||
353 | case nv_clk_src_shader: | ||
354 | nv_wr32(clk, 0x4024, priv->scoef); | ||
355 | nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl); | ||
356 | nv_wr32(clk, 0x4070, priv->spost); | ||
357 | pllmask |= (0x3 << 12); | ||
358 | mast |= 0x00000030; | ||
359 | break; | ||
360 | default: | ||
361 | nv_warn(priv,"Reclocking failed: unknown sclk clock\n"); | ||
362 | goto resume; | ||
363 | } | ||
364 | |||
365 | if (!nv_wait(clk, 0x004080, pllmask, pllmask)) { | ||
366 | nv_warn(priv,"Reclocking failed: unstable PLLs\n"); | ||
367 | goto resume; | ||
368 | } | ||
369 | |||
370 | switch (priv->vsrc) { | ||
371 | case nv_clk_src_cclk: | ||
372 | mast |= 0x00400000; | ||
373 | default: | ||
374 | nv_wr32(clk, 0x4600, priv->vdiv); | ||
375 | } | ||
376 | |||
377 | nv_wr32(clk, 0xc054, mast); | ||
378 | ret = 0; | ||
379 | |||
380 | resume: | ||
381 | if (pfifo) | ||
382 | pfifo->start(pfifo, &flags); | ||
383 | |||
384 | nv_mask(clk, 0x002504, 0x00000001, 0x00000000); | ||
385 | nv_wr32(clk, 0x020060, ptherm_gate); | ||
386 | |||
387 | /* Disable some PLLs and dividers when unused */ | ||
388 | if (priv->csrc != nv_clk_src_core) { | ||
389 | nv_wr32(clk, 0x4040, 0x00000000); | ||
390 | nv_mask(clk, 0x4028, 0x80000000, 0x00000000); | ||
391 | } | ||
392 | |||
393 | if (priv->ssrc != nv_clk_src_shader) { | ||
394 | nv_wr32(clk, 0x4070, 0x00000000); | ||
395 | nv_mask(clk, 0x4020, 0x80000000, 0x00000000); | ||
396 | } | ||
397 | |||
398 | return ret; | ||
399 | } | ||
400 | |||
401 | static void | ||
402 | nvaa_clock_tidy(struct nouveau_clock *clk) | ||
403 | { | ||
404 | } | ||
405 | |||
406 | static struct nouveau_clocks | ||
407 | nvaa_domains[] = { | ||
408 | { nv_clk_src_crystal, 0xff }, | ||
409 | { nv_clk_src_href , 0xff }, | ||
410 | { nv_clk_src_core , 0xff, 0, "core", 1000 }, | ||
411 | { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, | ||
412 | { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 }, | ||
413 | { nv_clk_src_max } | ||
414 | }; | ||
415 | |||
416 | static int | ||
417 | nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
418 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
419 | struct nouveau_object **pobject) | ||
420 | { | ||
421 | struct nvaa_clock_priv *priv; | ||
422 | int ret; | ||
423 | |||
424 | ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv); | ||
425 | *pobject = nv_object(priv); | ||
426 | if (ret) | ||
427 | return ret; | ||
428 | |||
429 | priv->base.read = nvaa_clock_read; | ||
430 | priv->base.calc = nvaa_clock_calc; | ||
431 | priv->base.prog = nvaa_clock_prog; | ||
432 | priv->base.tidy = nvaa_clock_tidy; | ||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | struct nouveau_oclass * | ||
437 | nvaa_clock_oclass = &(struct nouveau_oclass) { | ||
438 | .handle = NV_SUBDEV(CLOCK, 0xaa), | ||
439 | .ofuncs = &(struct nouveau_ofuncs) { | ||
440 | .ctor = nvaa_clock_ctor, | ||
441 | .dtor = _nouveau_clock_dtor, | ||
442 | .init = _nouveau_clock_init, | ||
443 | .fini = _nouveau_clock_fini, | ||
444 | }, | ||
445 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index 041fd5edaebf..c33c03d2f4af 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c | |||
@@ -197,7 +197,7 @@ static int | |||
197 | nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, | 197 | nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, |
198 | struct nouveau_i2c_board_info *info, | 198 | struct nouveau_i2c_board_info *info, |
199 | bool (*match)(struct nouveau_i2c_port *, | 199 | bool (*match)(struct nouveau_i2c_port *, |
200 | struct i2c_board_info *)) | 200 | struct i2c_board_info *, void *), void *data) |
201 | { | 201 | { |
202 | struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); | 202 | struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); |
203 | int i; | 203 | int i; |
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, | |||
221 | } | 221 | } |
222 | 222 | ||
223 | if (nv_probe_i2c(port, info[i].dev.addr) && | 223 | if (nv_probe_i2c(port, info[i].dev.addr) && |
224 | (!match || match(port, &info[i].dev))) { | 224 | (!match || match(port, &info[i].dev, data))) { |
225 | nv_info(i2c, "detected %s: %s\n", what, | 225 | nv_info(i2c, "detected %s: %s\n", what, |
226 | info[i].dev.type); | 226 | info[i].dev.type); |
227 | return i; | 227 | return i; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index e44ed7b93c6d..7610fc5f8fa2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c | |||
@@ -29,9 +29,9 @@ | |||
29 | 29 | ||
30 | static bool | 30 | static bool |
31 | probe_monitoring_device(struct nouveau_i2c_port *i2c, | 31 | probe_monitoring_device(struct nouveau_i2c_port *i2c, |
32 | struct i2c_board_info *info) | 32 | struct i2c_board_info *info, void *data) |
33 | { | 33 | { |
34 | struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); | 34 | struct nouveau_therm_priv *priv = data; |
35 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; | 35 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; |
36 | struct i2c_client *client; | 36 | struct i2c_client *client; |
37 | 37 | ||
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm) | |||
96 | }; | 96 | }; |
97 | 97 | ||
98 | i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", | 98 | i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", |
99 | board, probe_monitoring_device); | 99 | board, probe_monitoring_device, therm); |
100 | if (priv->ic) | 100 | if (priv->ic) |
101 | return; | 101 | return; |
102 | } | 102 | } |
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm) | |||
108 | }; | 108 | }; |
109 | 109 | ||
110 | i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", | 110 | i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", |
111 | board, probe_monitoring_device); | 111 | board, probe_monitoring_device, therm); |
112 | if (priv->ic) | 112 | if (priv->ic) |
113 | return; | 113 | return; |
114 | } | 114 | } |
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm) | |||
117 | device. Let's try our static list. | 117 | device. Let's try our static list. |
118 | */ | 118 | */ |
119 | i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", | 119 | i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", |
120 | nv_board_infos, probe_monitoring_device); | 120 | nv_board_infos, probe_monitoring_device, therm); |
121 | } | 121 | } |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 936a71c59080..7fdc51e2a571 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c | |||
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) | |||
643 | get_tmds_slave(encoder)) | 643 | get_tmds_slave(encoder)) |
644 | return; | 644 | return; |
645 | 645 | ||
646 | type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); | 646 | type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL); |
647 | if (type < 0) | 647 | if (type < 0) |
648 | return; | 648 | return; |
649 | 649 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 3618ac6b6316..32e7064b819b 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c | |||
@@ -58,8 +58,8 @@ struct nouveau_plane { | |||
58 | }; | 58 | }; |
59 | 59 | ||
60 | static uint32_t formats[] = { | 60 | static uint32_t formats[] = { |
61 | DRM_FORMAT_NV12, | ||
62 | DRM_FORMAT_UYVY, | 61 | DRM_FORMAT_UYVY, |
62 | DRM_FORMAT_NV12, | ||
63 | }; | 63 | }; |
64 | 64 | ||
65 | /* Sine can be approximated with | 65 | /* Sine can be approximated with |
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
99 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 99 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
100 | struct nouveau_bo *cur = nv_plane->cur; | 100 | struct nouveau_bo *cur = nv_plane->cur; |
101 | bool flip = nv_plane->flip; | 101 | bool flip = nv_plane->flip; |
102 | int format = ALIGN(src_w * 4, 0x100); | ||
103 | int soff = NV_PCRTC0_SIZE * nv_crtc->index; | 102 | int soff = NV_PCRTC0_SIZE * nv_crtc->index; |
104 | int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; | 103 | int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; |
105 | int ret; | 104 | int format, ret; |
105 | |||
106 | /* Source parameters given in 16.16 fixed point, ignore fractional. */ | ||
107 | src_x >>= 16; | ||
108 | src_y >>= 16; | ||
109 | src_w >>= 16; | ||
110 | src_h >>= 16; | ||
111 | |||
112 | format = ALIGN(src_w * 4, 0x100); | ||
106 | 113 | ||
107 | if (format > 0xffff) | 114 | if (format > 0xffff) |
108 | return -EINVAL; | 115 | return -ERANGE; |
116 | |||
117 | if (dev->chipset >= 0x30) { | ||
118 | if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) | ||
119 | return -ERANGE; | ||
120 | } else { | ||
121 | if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3)) | ||
122 | return -ERANGE; | ||
123 | } | ||
109 | 124 | ||
110 | ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); | 125 | ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); |
111 | if (ret) | 126 | if (ret) |
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
113 | 128 | ||
114 | nv_plane->cur = nv_fb->nvbo; | 129 | nv_plane->cur = nv_fb->nvbo; |
115 | 130 | ||
116 | /* Source parameters given in 16.16 fixed point, ignore fractional. */ | ||
117 | src_x = src_x >> 16; | ||
118 | src_y = src_y >> 16; | ||
119 | src_w = src_w >> 16; | ||
120 | src_h = src_h >> 16; | ||
121 | |||
122 | nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); | 131 | nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); |
123 | nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); | 132 | nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); |
124 | 133 | ||
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device) | |||
245 | { | 254 | { |
246 | struct nouveau_device *dev = nouveau_dev(device); | 255 | struct nouveau_device *dev = nouveau_dev(device); |
247 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); | 256 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); |
257 | int num_formats = ARRAY_SIZE(formats); | ||
248 | int ret; | 258 | int ret; |
249 | 259 | ||
250 | if (!plane) | 260 | if (!plane) |
251 | return; | 261 | return; |
252 | 262 | ||
263 | switch (dev->chipset) { | ||
264 | case 0x10: | ||
265 | case 0x11: | ||
266 | case 0x15: | ||
267 | case 0x1a: | ||
268 | case 0x20: | ||
269 | num_formats = 1; | ||
270 | break; | ||
271 | } | ||
272 | |||
253 | ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, | 273 | ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, |
254 | &nv10_plane_funcs, | 274 | &nv10_plane_funcs, |
255 | formats, ARRAY_SIZE(formats), false); | 275 | formats, num_formats, false); |
256 | if (ret) | 276 | if (ret) |
257 | goto err; | 277 | goto err; |
258 | 278 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index cc4b208ce546..244822df8ffc 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | |||
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index) | |||
59 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 59 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); |
60 | 60 | ||
61 | return i2c->identify(i2c, i2c_index, "TV encoder", | 61 | return i2c->identify(i2c, i2c_index, "TV encoder", |
62 | nv04_tv_encoder_info, NULL); | 62 | nv04_tv_encoder_info, NULL, NULL); |
63 | } | 63 | } |
64 | 64 | ||
65 | 65 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 6828d81ed7b9..900fae01793e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
447 | if (ret) | 447 | if (ret) |
448 | goto done; | 448 | goto done; |
449 | 449 | ||
450 | info->offset = ntfy->node->offset; | ||
451 | |||
450 | done: | 452 | done: |
451 | if (ret) | 453 | if (ret) |
452 | nouveau_abi16_ntfy_fini(chan, ntfy); | 454 | nouveau_abi16_ntfy_fini(chan, ntfy); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 95c740454049..ba0183fb84f3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv { | |||
51 | bool dsm_detected; | 51 | bool dsm_detected; |
52 | bool optimus_detected; | 52 | bool optimus_detected; |
53 | acpi_handle dhandle; | 53 | acpi_handle dhandle; |
54 | acpi_handle other_handle; | ||
54 | acpi_handle rom_handle; | 55 | acpi_handle rom_handle; |
55 | } nouveau_dsm_priv; | 56 | } nouveau_dsm_priv; |
56 | 57 | ||
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) | |||
260 | if (!dhandle) | 261 | if (!dhandle) |
261 | return false; | 262 | return false; |
262 | 263 | ||
263 | if (!acpi_has_method(dhandle, "_DSM")) | 264 | if (!acpi_has_method(dhandle, "_DSM")) { |
265 | nouveau_dsm_priv.other_handle = dhandle; | ||
264 | return false; | 266 | return false; |
265 | 267 | } | |
266 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) | 268 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) |
267 | retval |= NOUVEAU_DSM_HAS_MUX; | 269 | retval |= NOUVEAU_DSM_HAS_MUX; |
268 | 270 | ||
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void) | |||
338 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", | 340 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", |
339 | acpi_method_name); | 341 | acpi_method_name); |
340 | nouveau_dsm_priv.dsm_detected = true; | 342 | nouveau_dsm_priv.dsm_detected = true; |
343 | /* | ||
344 | * On some systems hotplug events are generated for the device | ||
345 | * being switched off when _DSM is executed. They cause ACPI | ||
346 | * hotplug to trigger and attempt to remove the device from | ||
347 | * the system, which causes it to break down. Prevent that from | ||
348 | * happening by setting the no_hotplug flag for the involved | ||
349 | * ACPI device objects. | ||
350 | */ | ||
351 | acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle); | ||
352 | acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle); | ||
341 | ret = true; | 353 | ret = true; |
342 | } | 354 | } |
343 | 355 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7809d92183c4..25ea82f8def3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -608,8 +608,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
608 | fence = nouveau_fence_ref(new_bo->bo.sync_obj); | 608 | fence = nouveau_fence_ref(new_bo->bo.sync_obj); |
609 | spin_unlock(&new_bo->bo.bdev->fence_lock); | 609 | spin_unlock(&new_bo->bo.bdev->fence_lock); |
610 | ret = nouveau_fence_sync(fence, chan); | 610 | ret = nouveau_fence_sync(fence, chan); |
611 | nouveau_fence_unref(&fence); | ||
611 | if (ret) | 612 | if (ret) |
612 | return ret; | 613 | goto fail_free; |
613 | 614 | ||
614 | if (new_bo != old_bo) { | 615 | if (new_bo != old_bo) { |
615 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 616 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, | |||
701 | 702 | ||
702 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); | 703 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); |
703 | if (s->event) | 704 | if (s->event) |
704 | drm_send_vblank_event(dev, -1, s->event); | 705 | drm_send_vblank_event(dev, s->crtc, s->event); |
705 | 706 | ||
706 | list_del(&s->head); | 707 | list_del(&s->head); |
707 | if (ps) | 708 | if (ps) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7a3759f1c41a..98a22e6e27a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) | |||
858 | if (nouveau_runtime_pm == 0) | 858 | if (nouveau_runtime_pm == 0) |
859 | return -EINVAL; | 859 | return -EINVAL; |
860 | 860 | ||
861 | /* are we optimus enabled? */ | ||
862 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | ||
863 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | ||
864 | return -EINVAL; | ||
865 | } | ||
866 | |||
861 | nv_debug_level(SILENT); | 867 | nv_debug_level(SILENT); |
862 | drm_kms_helper_poll_disable(drm_dev); | 868 | drm_kms_helper_poll_disable(drm_dev); |
863 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); | 869 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f8e66c08b11a..4e384a2f99c3 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, | |||
1265 | uint32_t start, uint32_t size) | 1265 | uint32_t start, uint32_t size) |
1266 | { | 1266 | { |
1267 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1267 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
1268 | u32 end = max(start + size, (u32)256); | 1268 | u32 end = min_t(u32, start + size, 256); |
1269 | u32 i; | 1269 | u32 i; |
1270 | 1270 | ||
1271 | for (i = start; i < end; i++) { | 1271 | for (i = start; i < end; i++) { |
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 037d324bf58f..66ac0ff95f5a 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig | |||
@@ -8,5 +8,6 @@ config DRM_QXL | |||
8 | select DRM_KMS_HELPER | 8 | select DRM_KMS_HELPER |
9 | select DRM_KMS_FB_HELPER | 9 | select DRM_KMS_FB_HELPER |
10 | select DRM_TTM | 10 | select DRM_TTM |
11 | select CRC32 | ||
11 | help | 12 | help |
12 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. | 13 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5e827c29d194..d70aafb83307 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | 26 | ||
27 | #include "linux/crc32.h" | 27 | #include <linux/crc32.h> |
28 | 28 | ||
29 | #include "qxl_drv.h" | 29 | #include "qxl_drv.h" |
30 | #include "qxl_object.h" | 30 | #include "qxl_object.h" |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 80a20120e625..0b9621c9aeea 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | if (tiling_flags & RADEON_TILING_MACRO) { | 1145 | if (tiling_flags & RADEON_TILING_MACRO) { |
1146 | if (rdev->family >= CHIP_BONAIRE) | 1146 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); |
1147 | tmp = rdev->config.cik.tile_config; | ||
1148 | else if (rdev->family >= CHIP_TAHITI) | ||
1149 | tmp = rdev->config.si.tile_config; | ||
1150 | else if (rdev->family >= CHIP_CAYMAN) | ||
1151 | tmp = rdev->config.cayman.tile_config; | ||
1152 | else | ||
1153 | tmp = rdev->config.evergreen.tile_config; | ||
1154 | 1147 | ||
1155 | switch ((tmp & 0xf0) >> 4) { | 1148 | /* Set NUM_BANKS. */ |
1156 | case 0: /* 4 banks */ | 1149 | if (rdev->family >= CHIP_BONAIRE) { |
1157 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); | 1150 | unsigned tileb, index, num_banks, tile_split_bytes; |
1158 | break; | 1151 | |
1159 | case 1: /* 8 banks */ | 1152 | /* Calculate the macrotile mode index. */ |
1160 | default: | 1153 | tile_split_bytes = 64 << tile_split; |
1161 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); | 1154 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; |
1162 | break; | 1155 | tileb = min(tile_split_bytes, tileb); |
1163 | case 2: /* 16 banks */ | 1156 | |
1164 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); | 1157 | for (index = 0; tileb > 64; index++) { |
1165 | break; | 1158 | tileb >>= 1; |
1159 | } | ||
1160 | |||
1161 | if (index >= 16) { | ||
1162 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", | ||
1163 | target_fb->bits_per_pixel, tile_split); | ||
1164 | return -EINVAL; | ||
1165 | } | ||
1166 | |||
1167 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | ||
1168 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); | ||
1169 | } else { | ||
1170 | /* SI and older. */ | ||
1171 | if (rdev->family >= CHIP_TAHITI) | ||
1172 | tmp = rdev->config.si.tile_config; | ||
1173 | else if (rdev->family >= CHIP_CAYMAN) | ||
1174 | tmp = rdev->config.cayman.tile_config; | ||
1175 | else | ||
1176 | tmp = rdev->config.evergreen.tile_config; | ||
1177 | |||
1178 | switch ((tmp & 0xf0) >> 4) { | ||
1179 | case 0: /* 4 banks */ | ||
1180 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); | ||
1181 | break; | ||
1182 | case 1: /* 8 banks */ | ||
1183 | default: | ||
1184 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); | ||
1185 | break; | ||
1186 | case 2: /* 16 banks */ | ||
1187 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); | ||
1188 | break; | ||
1189 | } | ||
1166 | } | 1190 | } |
1167 | 1191 | ||
1168 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); | 1192 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); |
1169 | |||
1170 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | ||
1171 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); | 1193 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); |
1172 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); | 1194 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); |
1173 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); | 1195 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); |
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1180 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); | 1202 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); |
1181 | 1203 | ||
1182 | if (rdev->family >= CHIP_BONAIRE) { | 1204 | if (rdev->family >= CHIP_BONAIRE) { |
1183 | u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; | 1205 | /* Read the pipe config from the 2D TILED SCANOUT mode. |
1184 | u32 num_rb = rdev->config.cik.max_backends_per_se; | 1206 | * It should be the same for the other modes too, but not all |
1185 | if (num_pipe_configs > 8) | 1207 | * modes set the pipe config field. */ |
1186 | num_pipe_configs = 8; | 1208 | u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f; |
1187 | if (num_pipe_configs == 8) | 1209 | |
1188 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); | 1210 | fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config); |
1189 | else if (num_pipe_configs == 4) { | ||
1190 | if (num_rb == 4) | ||
1191 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16); | ||
1192 | else if (num_rb < 4) | ||
1193 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16); | ||
1194 | } else if (num_pipe_configs == 2) | ||
1195 | fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2); | ||
1196 | } else if ((rdev->family == CHIP_TAHITI) || | 1211 | } else if ((rdev->family == CHIP_TAHITI) || |
1197 | (rdev->family == CHIP_PITCAIRN)) | 1212 | (rdev->family == CHIP_PITCAIRN)) |
1198 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); | 1213 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); |
1199 | else if (rdev->family == CHIP_VERDE) | 1214 | else if ((rdev->family == CHIP_VERDE) || |
1215 | (rdev->family == CHIP_OLAND) || | ||
1216 | (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */ | ||
1200 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); | 1217 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); |
1201 | 1218 | ||
1202 | switch (radeon_crtc->crtc_id) { | 1219 | switch (radeon_crtc->crtc_id) { |
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 0652ee0a2098..f685035dbe39 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c | |||
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, | |||
44 | PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; | 44 | PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; |
45 | int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); | 45 | int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); |
46 | unsigned char *base; | 46 | unsigned char *base; |
47 | u16 out; | 47 | u16 out = cpu_to_le16(0); |
48 | 48 | ||
49 | memset(&args, 0, sizeof(args)); | 49 | memset(&args, 0, sizeof(args)); |
50 | 50 | ||
@@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, | |||
55 | DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); | 55 | DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); |
56 | return -EINVAL; | 56 | return -EINVAL; |
57 | } | 57 | } |
58 | args.ucRegIndex = buf[0]; | 58 | if (buf == NULL) |
59 | if (num > 1) { | 59 | args.ucRegIndex = 0; |
60 | else | ||
61 | args.ucRegIndex = buf[0]; | ||
62 | if (num) | ||
60 | num--; | 63 | num--; |
64 | if (num) | ||
61 | memcpy(&out, &buf[1], num); | 65 | memcpy(&out, &buf[1], num); |
62 | } | ||
63 | args.lpI2CDataOut = cpu_to_le16(out); | 66 | args.lpI2CDataOut = cpu_to_le16(out); |
64 | } else { | 67 | } else { |
65 | if (num > ATOM_MAX_HW_I2C_READ) { | 68 | if (num > ATOM_MAX_HW_I2C_READ) { |
@@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, | |||
96 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | 99 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); |
97 | struct i2c_msg *p; | 100 | struct i2c_msg *p; |
98 | int i, remaining, current_count, buffer_offset, max_bytes, ret; | 101 | int i, remaining, current_count, buffer_offset, max_bytes, ret; |
99 | u8 buf = 0, flags; | 102 | u8 flags; |
100 | 103 | ||
101 | /* check for bus probe */ | 104 | /* check for bus probe */ |
102 | p = &msgs[0]; | 105 | p = &msgs[0]; |
103 | if ((num == 1) && (p->len == 0)) { | 106 | if ((num == 1) && (p->len == 0)) { |
104 | ret = radeon_process_i2c_ch(i2c, | 107 | ret = radeon_process_i2c_ch(i2c, |
105 | p->addr, HW_I2C_WRITE, | 108 | p->addr, HW_I2C_WRITE, |
106 | &buf, 1); | 109 | NULL, 0); |
107 | if (ret) | 110 | if (ret) |
108 | return ret; | 111 | return ret; |
109 | else | 112 | else |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b43a3a3c9067..e950fabd7f5e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width) | |||
3057 | * Returns the disabled RB bitmask. | 3057 | * Returns the disabled RB bitmask. |
3058 | */ | 3058 | */ |
3059 | static u32 cik_get_rb_disabled(struct radeon_device *rdev, | 3059 | static u32 cik_get_rb_disabled(struct radeon_device *rdev, |
3060 | u32 max_rb_num, u32 se_num, | 3060 | u32 max_rb_num_per_se, |
3061 | u32 sh_per_se) | 3061 | u32 sh_per_se) |
3062 | { | 3062 | { |
3063 | u32 data, mask; | 3063 | u32 data, mask; |
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev, | |||
3071 | 3071 | ||
3072 | data >>= BACKEND_DISABLE_SHIFT; | 3072 | data >>= BACKEND_DISABLE_SHIFT; |
3073 | 3073 | ||
3074 | mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); | 3074 | mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se); |
3075 | 3075 | ||
3076 | return data & mask; | 3076 | return data & mask; |
3077 | } | 3077 | } |
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev, | |||
3088 | */ | 3088 | */ |
3089 | static void cik_setup_rb(struct radeon_device *rdev, | 3089 | static void cik_setup_rb(struct radeon_device *rdev, |
3090 | u32 se_num, u32 sh_per_se, | 3090 | u32 se_num, u32 sh_per_se, |
3091 | u32 max_rb_num) | 3091 | u32 max_rb_num_per_se) |
3092 | { | 3092 | { |
3093 | int i, j; | 3093 | int i, j; |
3094 | u32 data, mask; | 3094 | u32 data, mask; |
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev, | |||
3098 | for (i = 0; i < se_num; i++) { | 3098 | for (i = 0; i < se_num; i++) { |
3099 | for (j = 0; j < sh_per_se; j++) { | 3099 | for (j = 0; j < sh_per_se; j++) { |
3100 | cik_select_se_sh(rdev, i, j); | 3100 | cik_select_se_sh(rdev, i, j); |
3101 | data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | 3101 | data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); |
3102 | if (rdev->family == CHIP_HAWAII) | 3102 | if (rdev->family == CHIP_HAWAII) |
3103 | disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); | 3103 | disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); |
3104 | else | 3104 | else |
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev, | |||
3108 | cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 3108 | cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
3109 | 3109 | ||
3110 | mask = 1; | 3110 | mask = 1; |
3111 | for (i = 0; i < max_rb_num; i++) { | 3111 | for (i = 0; i < max_rb_num_per_se * se_num; i++) { |
3112 | if (!(disabled_rbs & mask)) | 3112 | if (!(disabled_rbs & mask)) |
3113 | enabled_rbs |= mask; | 3113 | enabled_rbs |= mask; |
3114 | mask <<= 1; | 3114 | mask <<= 1; |
3115 | } | 3115 | } |
3116 | 3116 | ||
3117 | rdev->config.cik.backend_enable_mask = enabled_rbs; | ||
3118 | |||
3117 | for (i = 0; i < se_num; i++) { | 3119 | for (i = 0; i < se_num; i++) { |
3118 | cik_select_se_sh(rdev, i, 0xffffffff); | 3120 | cik_select_se_sh(rdev, i, 0xffffffff); |
3119 | data = 0; | 3121 | data = 0; |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 0300727a4f70..d08b83c6267b 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev, | |||
458 | radeon_ring_write(ring, 0); /* src/dst endian swap */ | 458 | radeon_ring_write(ring, 0); /* src/dst endian swap */ |
459 | radeon_ring_write(ring, src_offset & 0xffffffff); | 459 | radeon_ring_write(ring, src_offset & 0xffffffff); |
460 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); | 460 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); |
461 | radeon_ring_write(ring, dst_offset & 0xfffffffc); | 461 | radeon_ring_write(ring, dst_offset & 0xffffffff); |
462 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); | 462 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); |
463 | src_offset += cur_size_in_bytes; | 463 | src_offset += cur_size_in_bytes; |
464 | dst_offset += cur_size_in_bytes; | 464 | dst_offset += cur_size_in_bytes; |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 009f46e0ce72..713a5d359901 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) | |||
93 | struct radeon_device *rdev = encoder->dev->dev_private; | 93 | struct radeon_device *rdev = encoder->dev->dev_private; |
94 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 94 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
95 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 95 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
96 | u32 offset = dig->afmt->offset; | 96 | u32 offset; |
97 | 97 | ||
98 | if (!dig->afmt->pin) | 98 | if (!dig || !dig->afmt || !dig->afmt->pin) |
99 | return; | 99 | return; |
100 | 100 | ||
101 | offset = dig->afmt->offset; | ||
102 | |||
101 | WREG32(AFMT_AUDIO_SRC_CONTROL + offset, | 103 | WREG32(AFMT_AUDIO_SRC_CONTROL + offset, |
102 | AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); | 104 | AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); |
103 | } | 105 | } |
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, | |||
112 | struct radeon_connector *radeon_connector = NULL; | 114 | struct radeon_connector *radeon_connector = NULL; |
113 | u32 tmp = 0, offset; | 115 | u32 tmp = 0, offset; |
114 | 116 | ||
115 | if (!dig->afmt->pin) | 117 | if (!dig || !dig->afmt || !dig->afmt->pin) |
116 | return; | 118 | return; |
117 | 119 | ||
118 | offset = dig->afmt->pin->offset; | 120 | offset = dig->afmt->pin->offset; |
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) | |||
156 | u8 *sadb; | 158 | u8 *sadb; |
157 | int sad_count; | 159 | int sad_count; |
158 | 160 | ||
159 | if (!dig->afmt->pin) | 161 | if (!dig || !dig->afmt || !dig->afmt->pin) |
160 | return; | 162 | return; |
161 | 163 | ||
162 | offset = dig->afmt->pin->offset; | 164 | offset = dig->afmt->pin->offset; |
@@ -172,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) | |||
172 | } | 174 | } |
173 | 175 | ||
174 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); | 176 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); |
175 | if (sad_count < 0) { | 177 | if (sad_count <= 0) { |
176 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); | 178 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
177 | return; | 179 | return; |
178 | } | 180 | } |
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) | |||
217 | { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, | 219 | { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, |
218 | }; | 220 | }; |
219 | 221 | ||
220 | if (!dig->afmt->pin) | 222 | if (!dig || !dig->afmt || !dig->afmt->pin) |
221 | return; | 223 | return; |
222 | 224 | ||
223 | offset = dig->afmt->pin->offset; | 225 | offset = dig->afmt->pin->offset; |
@@ -233,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) | |||
233 | } | 235 | } |
234 | 236 | ||
235 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); | 237 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); |
236 | if (sad_count < 0) { | 238 | if (sad_count <= 0) { |
237 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); | 239 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
238 | return; | 240 | return; |
239 | } | 241 | } |
@@ -306,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev) | |||
306 | rdev->audio.enabled = true; | 308 | rdev->audio.enabled = true; |
307 | 309 | ||
308 | if (ASIC_IS_DCE8(rdev)) | 310 | if (ASIC_IS_DCE8(rdev)) |
309 | rdev->audio.num_pins = 7; | 311 | rdev->audio.num_pins = 6; |
312 | else if (ASIC_IS_DCE61(rdev)) | ||
313 | rdev->audio.num_pins = 4; | ||
310 | else | 314 | else |
311 | rdev->audio.num_pins = 6; | 315 | rdev->audio.num_pins = 6; |
312 | 316 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index aa695c4feb3d..0c6d5cef4cf1 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); | 120 | sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); |
121 | if (sad_count < 0) { | 121 | if (sad_count <= 0) { |
122 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); | 122 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
123 | return; | 123 | return; |
124 | } | 124 | } |
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); | 175 | sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); |
176 | if (sad_count < 0) { | 176 | if (sad_count <= 0) { |
177 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); | 177 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
178 | return; | 178 | return; |
179 | } | 179 | } |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 11aab2ab54ce..f59a9e9fccf8 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
895 | (rdev->pdev->device == 0x999C)) { | 895 | (rdev->pdev->device == 0x999C)) { |
896 | rdev->config.cayman.max_simds_per_se = 6; | 896 | rdev->config.cayman.max_simds_per_se = 6; |
897 | rdev->config.cayman.max_backends_per_se = 2; | 897 | rdev->config.cayman.max_backends_per_se = 2; |
898 | rdev->config.cayman.max_hw_contexts = 8; | ||
899 | rdev->config.cayman.sx_max_export_size = 256; | ||
900 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
901 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
898 | } else if ((rdev->pdev->device == 0x9903) || | 902 | } else if ((rdev->pdev->device == 0x9903) || |
899 | (rdev->pdev->device == 0x9904) || | 903 | (rdev->pdev->device == 0x9904) || |
900 | (rdev->pdev->device == 0x990A) || | 904 | (rdev->pdev->device == 0x990A) || |
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
905 | (rdev->pdev->device == 0x999D)) { | 909 | (rdev->pdev->device == 0x999D)) { |
906 | rdev->config.cayman.max_simds_per_se = 4; | 910 | rdev->config.cayman.max_simds_per_se = 4; |
907 | rdev->config.cayman.max_backends_per_se = 2; | 911 | rdev->config.cayman.max_backends_per_se = 2; |
912 | rdev->config.cayman.max_hw_contexts = 8; | ||
913 | rdev->config.cayman.sx_max_export_size = 256; | ||
914 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
915 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
908 | } else if ((rdev->pdev->device == 0x9919) || | 916 | } else if ((rdev->pdev->device == 0x9919) || |
909 | (rdev->pdev->device == 0x9990) || | 917 | (rdev->pdev->device == 0x9990) || |
910 | (rdev->pdev->device == 0x9991) || | 918 | (rdev->pdev->device == 0x9991) || |
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
915 | (rdev->pdev->device == 0x99A0)) { | 923 | (rdev->pdev->device == 0x99A0)) { |
916 | rdev->config.cayman.max_simds_per_se = 3; | 924 | rdev->config.cayman.max_simds_per_se = 3; |
917 | rdev->config.cayman.max_backends_per_se = 1; | 925 | rdev->config.cayman.max_backends_per_se = 1; |
926 | rdev->config.cayman.max_hw_contexts = 4; | ||
927 | rdev->config.cayman.sx_max_export_size = 128; | ||
928 | rdev->config.cayman.sx_max_export_pos_size = 32; | ||
929 | rdev->config.cayman.sx_max_export_smx_size = 96; | ||
918 | } else { | 930 | } else { |
919 | rdev->config.cayman.max_simds_per_se = 2; | 931 | rdev->config.cayman.max_simds_per_se = 2; |
920 | rdev->config.cayman.max_backends_per_se = 1; | 932 | rdev->config.cayman.max_backends_per_se = 1; |
933 | rdev->config.cayman.max_hw_contexts = 4; | ||
934 | rdev->config.cayman.sx_max_export_size = 128; | ||
935 | rdev->config.cayman.sx_max_export_pos_size = 32; | ||
936 | rdev->config.cayman.sx_max_export_smx_size = 96; | ||
921 | } | 937 | } |
922 | rdev->config.cayman.max_texture_channel_caches = 2; | 938 | rdev->config.cayman.max_texture_channel_caches = 2; |
923 | rdev->config.cayman.max_gprs = 256; | 939 | rdev->config.cayman.max_gprs = 256; |
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
925 | rdev->config.cayman.max_gs_threads = 32; | 941 | rdev->config.cayman.max_gs_threads = 32; |
926 | rdev->config.cayman.max_stack_entries = 512; | 942 | rdev->config.cayman.max_stack_entries = 512; |
927 | rdev->config.cayman.sx_num_of_sets = 8; | 943 | rdev->config.cayman.sx_num_of_sets = 8; |
928 | rdev->config.cayman.sx_max_export_size = 256; | ||
929 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
930 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
931 | rdev->config.cayman.max_hw_contexts = 8; | ||
932 | rdev->config.cayman.sq_num_cf_insts = 2; | 944 | rdev->config.cayman.sq_num_cf_insts = 2; |
933 | 945 | ||
934 | rdev->config.cayman.sc_prim_fifo_size = 0x40; | 946 | rdev->config.cayman.sc_prim_fifo_size = 0x40; |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index cdc003085a76..49c4d48f54d6 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, | |||
785 | struct ni_ps *ps = ni_get_ps(rps); | 785 | struct ni_ps *ps = ni_get_ps(rps); |
786 | struct radeon_clock_and_voltage_limits *max_limits; | 786 | struct radeon_clock_and_voltage_limits *max_limits; |
787 | bool disable_mclk_switching; | 787 | bool disable_mclk_switching; |
788 | u32 mclk, sclk; | 788 | u32 mclk; |
789 | u16 vddc, vddci; | 789 | u16 vddci; |
790 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | 790 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
791 | int i; | 791 | int i; |
792 | 792 | ||
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, | |||
839 | 839 | ||
840 | /* XXX validate the min clocks required for display */ | 840 | /* XXX validate the min clocks required for display */ |
841 | 841 | ||
842 | /* adjust low state */ | ||
842 | if (disable_mclk_switching) { | 843 | if (disable_mclk_switching) { |
843 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; | 844 | ps->performance_levels[0].mclk = |
844 | sclk = ps->performance_levels[0].sclk; | 845 | ps->performance_levels[ps->performance_level_count - 1].mclk; |
845 | vddc = ps->performance_levels[0].vddc; | 846 | ps->performance_levels[0].vddci = |
846 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; | 847 | ps->performance_levels[ps->performance_level_count - 1].vddci; |
847 | } else { | ||
848 | sclk = ps->performance_levels[0].sclk; | ||
849 | mclk = ps->performance_levels[0].mclk; | ||
850 | vddc = ps->performance_levels[0].vddc; | ||
851 | vddci = ps->performance_levels[0].vddci; | ||
852 | } | 848 | } |
853 | 849 | ||
854 | /* adjusted low state */ | ||
855 | ps->performance_levels[0].sclk = sclk; | ||
856 | ps->performance_levels[0].mclk = mclk; | ||
857 | ps->performance_levels[0].vddc = vddc; | ||
858 | ps->performance_levels[0].vddci = vddci; | ||
859 | |||
860 | btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, | 850 | btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, |
861 | &ps->performance_levels[0].sclk, | 851 | &ps->performance_levels[0].sclk, |
862 | &ps->performance_levels[0].mclk); | 852 | &ps->performance_levels[0].mclk); |
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, | |||
868 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | 858 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; |
869 | } | 859 | } |
870 | 860 | ||
861 | /* adjust remaining states */ | ||
871 | if (disable_mclk_switching) { | 862 | if (disable_mclk_switching) { |
872 | mclk = ps->performance_levels[0].mclk; | 863 | mclk = ps->performance_levels[0].mclk; |
864 | vddci = ps->performance_levels[0].vddci; | ||
873 | for (i = 1; i < ps->performance_level_count; i++) { | 865 | for (i = 1; i < ps->performance_level_count; i++) { |
874 | if (mclk < ps->performance_levels[i].mclk) | 866 | if (mclk < ps->performance_levels[i].mclk) |
875 | mclk = ps->performance_levels[i].mclk; | 867 | mclk = ps->performance_levels[i].mclk; |
868 | if (vddci < ps->performance_levels[i].vddci) | ||
869 | vddci = ps->performance_levels[i].vddci; | ||
876 | } | 870 | } |
877 | for (i = 0; i < ps->performance_level_count; i++) { | 871 | for (i = 0; i < ps->performance_level_count; i++) { |
878 | ps->performance_levels[i].mclk = mclk; | 872 | ps->performance_levels[i].mclk = mclk; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 4b89262f3f0e..b7d3ecba43e3 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
304 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); | 304 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); |
305 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | 305 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ |
306 | } | 306 | } |
307 | } else if (ASIC_IS_DCE3(rdev)) { | 307 | } else { |
308 | /* according to the reg specs, this should DCE3.2 only, but in | 308 | /* according to the reg specs, this should DCE3.2 only, but in |
309 | * practice it seems to cover DCE3.0/3.1 as well. | 309 | * practice it seems to cover DCE2.0/3.0/3.1 as well. |
310 | */ | 310 | */ |
311 | if (dig->dig_encoder == 0) { | 311 | if (dig->dig_encoder == 0) { |
312 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | 312 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); |
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
317 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); | 317 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); |
318 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | 318 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ |
319 | } | 319 | } |
320 | } else { | ||
321 | /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */ | ||
322 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | | ||
323 | AUDIO_DTO_MODULE(clock / 10)); | ||
324 | } | 320 | } |
325 | } | 321 | } |
326 | 322 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ecf2a3960c07..45e1f447bc79 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1940,7 +1940,7 @@ struct si_asic { | |||
1940 | unsigned sc_earlyz_tile_fifo_size; | 1940 | unsigned sc_earlyz_tile_fifo_size; |
1941 | 1941 | ||
1942 | unsigned num_tile_pipes; | 1942 | unsigned num_tile_pipes; |
1943 | unsigned num_backends_per_se; | 1943 | unsigned backend_enable_mask; |
1944 | unsigned backend_disable_mask_per_asic; | 1944 | unsigned backend_disable_mask_per_asic; |
1945 | unsigned backend_map; | 1945 | unsigned backend_map; |
1946 | unsigned num_texture_channel_caches; | 1946 | unsigned num_texture_channel_caches; |
@@ -1970,7 +1970,7 @@ struct cik_asic { | |||
1970 | unsigned sc_earlyz_tile_fifo_size; | 1970 | unsigned sc_earlyz_tile_fifo_size; |
1971 | 1971 | ||
1972 | unsigned num_tile_pipes; | 1972 | unsigned num_tile_pipes; |
1973 | unsigned num_backends_per_se; | 1973 | unsigned backend_enable_mask; |
1974 | unsigned backend_disable_mask_per_asic; | 1974 | unsigned backend_disable_mask_per_asic; |
1975 | unsigned backend_map; | 1975 | unsigned backend_map; |
1976 | unsigned num_texture_channel_caches; | 1976 | unsigned num_texture_channel_caches; |
@@ -2710,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev, | |||
2710 | struct radeon_vm *vm, | 2710 | struct radeon_vm *vm, |
2711 | struct radeon_fence *fence); | 2711 | struct radeon_fence *fence); |
2712 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); | 2712 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); |
2713 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, | 2713 | int radeon_vm_bo_update(struct radeon_device *rdev, |
2714 | struct radeon_vm *vm, | 2714 | struct radeon_vm *vm, |
2715 | struct radeon_bo *bo, | 2715 | struct radeon_bo *bo, |
2716 | struct ttm_mem_reg *mem); | 2716 | struct ttm_mem_reg *mem); |
2717 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, | 2717 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
2718 | struct radeon_bo *bo); | 2718 | struct radeon_bo *bo); |
2719 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, | 2719 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e354ce94cdd1..c0425bb6223a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = { | |||
2021 | .hdmi_setmode = &evergreen_hdmi_setmode, | 2021 | .hdmi_setmode = &evergreen_hdmi_setmode, |
2022 | }, | 2022 | }, |
2023 | .copy = { | 2023 | .copy = { |
2024 | .blit = NULL, | 2024 | .blit = &cik_copy_cpdma, |
2025 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 2025 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
2026 | .dma = &cik_copy_dma, | 2026 | .dma = &cik_copy_dma, |
2027 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 2027 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = { | |||
2122 | .hdmi_setmode = &evergreen_hdmi_setmode, | 2122 | .hdmi_setmode = &evergreen_hdmi_setmode, |
2123 | }, | 2123 | }, |
2124 | .copy = { | 2124 | .copy = { |
2125 | .blit = NULL, | 2125 | .blit = &cik_copy_cpdma, |
2126 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 2126 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
2127 | .dma = &cik_copy_dma, | 2127 | .dma = &cik_copy_dma, |
2128 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 2128 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index f79ee184ffd5..5c39bf7c3d88 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev, | |||
2918 | mpll_param->dll_speed = args.ucDllSpeed; | 2918 | mpll_param->dll_speed = args.ucDllSpeed; |
2919 | mpll_param->bwcntl = args.ucBWCntl; | 2919 | mpll_param->bwcntl = args.ucBWCntl; |
2920 | mpll_param->vco_mode = | 2920 | mpll_param->vco_mode = |
2921 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; | 2921 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK); |
2922 | mpll_param->yclk_sel = | 2922 | mpll_param->yclk_sel = |
2923 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; | 2923 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; |
2924 | mpll_param->qdr = | 2924 | mpll_param->qdr = |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 9d302eaeea15..485848f889f5 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv { | |||
33 | bool atpx_detected; | 33 | bool atpx_detected; |
34 | /* handle for device - and atpx */ | 34 | /* handle for device - and atpx */ |
35 | acpi_handle dhandle; | 35 | acpi_handle dhandle; |
36 | acpi_handle other_handle; | ||
36 | struct radeon_atpx atpx; | 37 | struct radeon_atpx atpx; |
37 | } radeon_atpx_priv; | 38 | } radeon_atpx_priv; |
38 | 39 | ||
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | |||
451 | return false; | 452 | return false; |
452 | 453 | ||
453 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); | 454 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); |
454 | if (ACPI_FAILURE(status)) | 455 | if (ACPI_FAILURE(status)) { |
456 | radeon_atpx_priv.other_handle = dhandle; | ||
455 | return false; | 457 | return false; |
456 | 458 | } | |
457 | radeon_atpx_priv.dhandle = dhandle; | 459 | radeon_atpx_priv.dhandle = dhandle; |
458 | radeon_atpx_priv.atpx.handle = atpx_handle; | 460 | radeon_atpx_priv.atpx.handle = atpx_handle; |
459 | return true; | 461 | return true; |
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void) | |||
530 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", | 532 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", |
531 | acpi_method_name); | 533 | acpi_method_name); |
532 | radeon_atpx_priv.atpx_detected = true; | 534 | radeon_atpx_priv.atpx_detected = true; |
535 | /* | ||
536 | * On some systems hotplug events are generated for the device | ||
537 | * being switched off when ATPX is executed. They cause ACPI | ||
538 | * hotplug to trigger and attempt to remove the device from | ||
539 | * the system, which causes it to break down. Prevent that from | ||
540 | * happening by setting the no_hotplug flag for the involved | ||
541 | * ACPI device objects. | ||
542 | */ | ||
543 | acpi_bus_no_hotplug(radeon_atpx_priv.dhandle); | ||
544 | acpi_bus_no_hotplug(radeon_atpx_priv.other_handle); | ||
533 | return true; | 545 | return true; |
534 | } | 546 | } |
535 | return false; | 547 | return false; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index f41594b2eeac..0b366169d64d 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -360,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, | |||
360 | struct radeon_bo *bo; | 360 | struct radeon_bo *bo; |
361 | int r; | 361 | int r; |
362 | 362 | ||
363 | r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); | 363 | r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); |
364 | if (r) { | 364 | if (r) { |
365 | return r; | 365 | return r; |
366 | } | 366 | } |
367 | list_for_each_entry(lobj, &parser->validated, tv.head) { | 367 | list_for_each_entry(lobj, &parser->validated, tv.head) { |
368 | bo = lobj->bo; | 368 | bo = lobj->bo; |
369 | r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); | 369 | r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem); |
370 | if (r) { | 370 | if (r) { |
371 | return r; | 371 | return r; |
372 | } | 372 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 9f5ff28864f6..db39ea36bf22 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -77,9 +77,10 @@ | |||
77 | * 2.33.0 - Add SI tiling mode array query | 77 | * 2.33.0 - Add SI tiling mode array query |
78 | * 2.34.0 - Add CIK tiling mode array query | 78 | * 2.34.0 - Add CIK tiling mode array query |
79 | * 2.35.0 - Add CIK macrotile mode array query | 79 | * 2.35.0 - Add CIK macrotile mode array query |
80 | * 2.36.0 - Fix CIK DCE tiling setup | ||
80 | */ | 81 | */ |
81 | #define KMS_DRIVER_MAJOR 2 | 82 | #define KMS_DRIVER_MAJOR 2 |
82 | #define KMS_DRIVER_MINOR 35 | 83 | #define KMS_DRIVER_MINOR 36 |
83 | #define KMS_DRIVER_PATCHLEVEL 0 | 84 | #define KMS_DRIVER_PATCHLEVEL 0 |
84 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 85 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
85 | int radeon_driver_unload_kms(struct drm_device *dev); | 86 | int radeon_driver_unload_kms(struct drm_device *dev); |
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = { | |||
508 | #endif | 509 | #endif |
509 | }; | 510 | }; |
510 | 511 | ||
511 | |||
512 | static void | ||
513 | radeon_pci_shutdown(struct pci_dev *pdev) | ||
514 | { | ||
515 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
516 | |||
517 | radeon_driver_unload_kms(dev); | ||
518 | } | ||
519 | |||
520 | static struct drm_driver kms_driver = { | 512 | static struct drm_driver kms_driver = { |
521 | .driver_features = | 513 | .driver_features = |
522 | DRIVER_USE_AGP | | 514 | DRIVER_USE_AGP | |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = { | |||
586 | .probe = radeon_pci_probe, | 578 | .probe = radeon_pci_probe, |
587 | .remove = radeon_pci_remove, | 579 | .remove = radeon_pci_remove, |
588 | .driver.pm = &radeon_pm_ops, | 580 | .driver.pm = &radeon_pm_ops, |
589 | .shutdown = radeon_pci_shutdown, | ||
590 | }; | 581 | }; |
591 | 582 | ||
592 | static int __init radeon_init(void) | 583 | static int __init radeon_init(void) |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 543dcfae7e6f..00e0d449021c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -108,9 +108,10 @@ | |||
108 | * 1.31- Add support for num Z pipes from GET_PARAM | 108 | * 1.31- Add support for num Z pipes from GET_PARAM |
109 | * 1.32- fixes for rv740 setup | 109 | * 1.32- fixes for rv740 setup |
110 | * 1.33- Add r6xx/r7xx const buffer support | 110 | * 1.33- Add r6xx/r7xx const buffer support |
111 | * 1.34- fix evergreen/cayman GS register | ||
111 | */ | 112 | */ |
112 | #define DRIVER_MAJOR 1 | 113 | #define DRIVER_MAJOR 1 |
113 | #define DRIVER_MINOR 33 | 114 | #define DRIVER_MINOR 34 |
114 | #define DRIVER_PATCHLEVEL 0 | 115 | #define DRIVER_PATCHLEVEL 0 |
115 | 116 | ||
116 | long radeon_drm_ioctl(struct file *filp, | 117 | long radeon_drm_ioctl(struct file *filp, |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 3044e504f4ec..96e440061bdb 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon_trace.h" | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * GART | 35 | * GART |
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | |||
737 | for (i = 0; i < 2; ++i) { | 738 | for (i = 0; i < 2; ++i) { |
738 | if (choices[i]) { | 739 | if (choices[i]) { |
739 | vm->id = choices[i]; | 740 | vm->id = choices[i]; |
741 | trace_radeon_vm_grab_id(vm->id, ring); | ||
740 | return rdev->vm_manager.active[choices[i]]; | 742 | return rdev->vm_manager.active[choices[i]]; |
741 | } | 743 | } |
742 | } | 744 | } |
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, | |||
1116 | } | 1118 | } |
1117 | 1119 | ||
1118 | /** | 1120 | /** |
1119 | * radeon_vm_bo_update_pte - map a bo into the vm page table | 1121 | * radeon_vm_bo_update - map a bo into the vm page table |
1120 | * | 1122 | * |
1121 | * @rdev: radeon_device pointer | 1123 | * @rdev: radeon_device pointer |
1122 | * @vm: requested vm | 1124 | * @vm: requested vm |
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, | |||
1128 | * | 1130 | * |
1129 | * Object have to be reserved & global and local mutex must be locked! | 1131 | * Object have to be reserved & global and local mutex must be locked! |
1130 | */ | 1132 | */ |
1131 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, | 1133 | int radeon_vm_bo_update(struct radeon_device *rdev, |
1132 | struct radeon_vm *vm, | 1134 | struct radeon_vm *vm, |
1133 | struct radeon_bo *bo, | 1135 | struct radeon_bo *bo, |
1134 | struct ttm_mem_reg *mem) | 1136 | struct ttm_mem_reg *mem) |
1135 | { | 1137 | { |
1136 | struct radeon_ib ib; | 1138 | struct radeon_ib ib; |
1137 | struct radeon_bo_va *bo_va; | 1139 | struct radeon_bo_va *bo_va; |
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
1176 | bo_va->valid = false; | 1178 | bo_va->valid = false; |
1177 | } | 1179 | } |
1178 | 1180 | ||
1181 | trace_radeon_vm_bo_update(bo_va); | ||
1182 | |||
1179 | nptes = radeon_bo_ngpu_pages(bo); | 1183 | nptes = radeon_bo_ngpu_pages(bo); |
1180 | 1184 | ||
1181 | /* assume two extra pdes in case the mapping overlaps the borders */ | 1185 | /* assume two extra pdes in case the mapping overlaps the borders */ |
@@ -1257,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
1257 | mutex_lock(&rdev->vm_manager.lock); | 1261 | mutex_lock(&rdev->vm_manager.lock); |
1258 | mutex_lock(&bo_va->vm->mutex); | 1262 | mutex_lock(&bo_va->vm->mutex); |
1259 | if (bo_va->soffset) { | 1263 | if (bo_va->soffset) { |
1260 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); | 1264 | r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); |
1261 | } | 1265 | } |
1262 | mutex_unlock(&rdev->vm_manager.lock); | 1266 | mutex_unlock(&rdev->vm_manager.lock); |
1263 | list_del(&bo_va->vm_list); | 1267 | list_del(&bo_va->vm_list); |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 55d0b474bd37..21d593c0ecaf 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
461 | case RADEON_INFO_SI_CP_DMA_COMPUTE: | 461 | case RADEON_INFO_SI_CP_DMA_COMPUTE: |
462 | *value = 1; | 462 | *value = 1; |
463 | break; | 463 | break; |
464 | case RADEON_INFO_SI_BACKEND_ENABLED_MASK: | ||
465 | if (rdev->family >= CHIP_BONAIRE) { | ||
466 | *value = rdev->config.cik.backend_enable_mask; | ||
467 | } else if (rdev->family >= CHIP_TAHITI) { | ||
468 | *value = rdev->config.si.backend_enable_mask; | ||
469 | } else { | ||
470 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); | ||
471 | } | ||
472 | break; | ||
464 | default: | 473 | default: |
465 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 474 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
466 | return -EINVAL; | 475 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index d1385ccc672c..984097b907ef 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
537 | struct device_attribute *attr, | 537 | struct device_attribute *attr, |
538 | char *buf) | 538 | char *buf) |
539 | { | 539 | { |
540 | struct drm_device *ddev = dev_get_drvdata(dev); | 540 | struct radeon_device *rdev = dev_get_drvdata(dev); |
541 | struct radeon_device *rdev = ddev->dev_private; | ||
542 | int temp; | 541 | int temp; |
543 | 542 | ||
544 | if (rdev->asic->pm.get_temperature) | 543 | if (rdev->asic->pm.get_temperature) |
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, | |||
553 | struct device_attribute *attr, | 552 | struct device_attribute *attr, |
554 | char *buf) | 553 | char *buf) |
555 | { | 554 | { |
556 | struct drm_device *ddev = dev_get_drvdata(dev); | 555 | struct radeon_device *rdev = dev_get_drvdata(dev); |
557 | struct radeon_device *rdev = ddev->dev_private; | ||
558 | int hyst = to_sensor_dev_attr(attr)->index; | 556 | int hyst = to_sensor_dev_attr(attr)->index; |
559 | int temp; | 557 | int temp; |
560 | 558 | ||
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, | |||
566 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | 564 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); |
567 | } | 565 | } |
568 | 566 | ||
569 | static ssize_t radeon_hwmon_show_name(struct device *dev, | ||
570 | struct device_attribute *attr, | ||
571 | char *buf) | ||
572 | { | ||
573 | return sprintf(buf, "radeon\n"); | ||
574 | } | ||
575 | |||
576 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); | 567 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); |
577 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); | 568 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); |
578 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); | 569 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); |
579 | static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); | ||
580 | 570 | ||
581 | static struct attribute *hwmon_attributes[] = { | 571 | static struct attribute *hwmon_attributes[] = { |
582 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 572 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
583 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | 573 | &sensor_dev_attr_temp1_crit.dev_attr.attr, |
584 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, | 574 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, |
585 | &sensor_dev_attr_name.dev_attr.attr, | ||
586 | NULL | 575 | NULL |
587 | }; | 576 | }; |
588 | 577 | ||
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
590 | struct attribute *attr, int index) | 579 | struct attribute *attr, int index) |
591 | { | 580 | { |
592 | struct device *dev = container_of(kobj, struct device, kobj); | 581 | struct device *dev = container_of(kobj, struct device, kobj); |
593 | struct drm_device *ddev = dev_get_drvdata(dev); | 582 | struct radeon_device *rdev = dev_get_drvdata(dev); |
594 | struct radeon_device *rdev = ddev->dev_private; | ||
595 | 583 | ||
596 | /* Skip limit attributes if DPM is not enabled */ | 584 | /* Skip limit attributes if DPM is not enabled */ |
597 | if (rdev->pm.pm_method != PM_METHOD_DPM && | 585 | if (rdev->pm.pm_method != PM_METHOD_DPM && |
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = { | |||
607 | .is_visible = hwmon_attributes_visible, | 595 | .is_visible = hwmon_attributes_visible, |
608 | }; | 596 | }; |
609 | 597 | ||
598 | static const struct attribute_group *hwmon_groups[] = { | ||
599 | &hwmon_attrgroup, | ||
600 | NULL | ||
601 | }; | ||
602 | |||
610 | static int radeon_hwmon_init(struct radeon_device *rdev) | 603 | static int radeon_hwmon_init(struct radeon_device *rdev) |
611 | { | 604 | { |
612 | int err = 0; | 605 | int err = 0; |
613 | 606 | struct device *hwmon_dev; | |
614 | rdev->pm.int_hwmon_dev = NULL; | ||
615 | 607 | ||
616 | switch (rdev->pm.int_thermal_type) { | 608 | switch (rdev->pm.int_thermal_type) { |
617 | case THERMAL_TYPE_RV6XX: | 609 | case THERMAL_TYPE_RV6XX: |
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev) | |||
624 | case THERMAL_TYPE_KV: | 616 | case THERMAL_TYPE_KV: |
625 | if (rdev->asic->pm.get_temperature == NULL) | 617 | if (rdev->asic->pm.get_temperature == NULL) |
626 | return err; | 618 | return err; |
627 | rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); | 619 | hwmon_dev = hwmon_device_register_with_groups(rdev->dev, |
628 | if (IS_ERR(rdev->pm.int_hwmon_dev)) { | 620 | "radeon", rdev, |
629 | err = PTR_ERR(rdev->pm.int_hwmon_dev); | 621 | hwmon_groups); |
622 | if (IS_ERR(hwmon_dev)) { | ||
623 | err = PTR_ERR(hwmon_dev); | ||
630 | dev_err(rdev->dev, | 624 | dev_err(rdev->dev, |
631 | "Unable to register hwmon device: %d\n", err); | 625 | "Unable to register hwmon device: %d\n", err); |
632 | break; | ||
633 | } | ||
634 | dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); | ||
635 | err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, | ||
636 | &hwmon_attrgroup); | ||
637 | if (err) { | ||
638 | dev_err(rdev->dev, | ||
639 | "Unable to create hwmon sysfs file: %d\n", err); | ||
640 | hwmon_device_unregister(rdev->dev); | ||
641 | } | 626 | } |
642 | break; | 627 | break; |
643 | default: | 628 | default: |
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev) | |||
647 | return err; | 632 | return err; |
648 | } | 633 | } |
649 | 634 | ||
650 | static void radeon_hwmon_fini(struct radeon_device *rdev) | ||
651 | { | ||
652 | if (rdev->pm.int_hwmon_dev) { | ||
653 | sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); | ||
654 | hwmon_device_unregister(rdev->pm.int_hwmon_dev); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | static void radeon_dpm_thermal_work_handler(struct work_struct *work) | 635 | static void radeon_dpm_thermal_work_handler(struct work_struct *work) |
659 | { | 636 | { |
660 | struct radeon_device *rdev = | 637 | struct radeon_device *rdev = |
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev) | |||
1337 | 1314 | ||
1338 | if (rdev->pm.power_state) | 1315 | if (rdev->pm.power_state) |
1339 | kfree(rdev->pm.power_state); | 1316 | kfree(rdev->pm.power_state); |
1340 | |||
1341 | radeon_hwmon_fini(rdev); | ||
1342 | } | 1317 | } |
1343 | 1318 | ||
1344 | static void radeon_pm_fini_dpm(struct radeon_device *rdev) | 1319 | static void radeon_pm_fini_dpm(struct radeon_device *rdev) |
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev) | |||
1358 | 1333 | ||
1359 | if (rdev->pm.power_state) | 1334 | if (rdev->pm.power_state) |
1360 | kfree(rdev->pm.power_state); | 1335 | kfree(rdev->pm.power_state); |
1361 | |||
1362 | radeon_hwmon_fini(rdev); | ||
1363 | } | 1336 | } |
1364 | 1337 | ||
1365 | void radeon_pm_fini(struct radeon_device *rdev) | 1338 | void radeon_pm_fini(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index 9f0e18172b6e..0473257d4078 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h | |||
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs, | |||
47 | __entry->fences) | 47 | __entry->fences) |
48 | ); | 48 | ); |
49 | 49 | ||
50 | TRACE_EVENT(radeon_vm_grab_id, | ||
51 | TP_PROTO(unsigned vmid, int ring), | ||
52 | TP_ARGS(vmid, ring), | ||
53 | TP_STRUCT__entry( | ||
54 | __field(u32, vmid) | ||
55 | __field(u32, ring) | ||
56 | ), | ||
57 | |||
58 | TP_fast_assign( | ||
59 | __entry->vmid = vmid; | ||
60 | __entry->ring = ring; | ||
61 | ), | ||
62 | TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) | ||
63 | ); | ||
64 | |||
65 | TRACE_EVENT(radeon_vm_bo_update, | ||
66 | TP_PROTO(struct radeon_bo_va *bo_va), | ||
67 | TP_ARGS(bo_va), | ||
68 | TP_STRUCT__entry( | ||
69 | __field(u64, soffset) | ||
70 | __field(u64, eoffset) | ||
71 | __field(u32, flags) | ||
72 | ), | ||
73 | |||
74 | TP_fast_assign( | ||
75 | __entry->soffset = bo_va->soffset; | ||
76 | __entry->eoffset = bo_va->eoffset; | ||
77 | __entry->flags = bo_va->flags; | ||
78 | ), | ||
79 | TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", | ||
80 | __entry->soffset, __entry->eoffset, __entry->flags) | ||
81 | ); | ||
82 | |||
50 | TRACE_EVENT(radeon_vm_set_page, | 83 | TRACE_EVENT(radeon_vm_set_page, |
51 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, | 84 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, |
52 | uint32_t incr, uint32_t flags), | 85 | uint32_t incr, uint32_t flags), |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 373d088bac66..b9c0529b4a2e 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
473 | return -EINVAL; | 473 | return -EINVAL; |
474 | } | 474 | } |
475 | 475 | ||
476 | if ((start >> 28) != (end >> 28)) { | 476 | if ((start >> 28) != ((end - 1) >> 28)) { |
477 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | 477 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", |
478 | start, end); | 478 | start, end); |
479 | return -EINVAL; | 479 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index a072fa8c46b0..d46b58d078aa 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -21,7 +21,7 @@ cayman 0x9400 | |||
21 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE | 21 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE |
22 | 0x000089B0 VGT_HS_OFFCHIP_PARAM | 22 | 0x000089B0 VGT_HS_OFFCHIP_PARAM |
23 | 0x00008A14 PA_CL_ENHANCE | 23 | 0x00008A14 PA_CL_ENHANCE |
24 | 0x00008A60 PA_SC_LINE_STIPPLE_VALUE | 24 | 0x00008A60 PA_SU_LINE_STIPPLE_VALUE |
25 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE | 25 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE |
26 | 0x00008BF0 PA_SC_ENHANCE | 26 | 0x00008BF0 PA_SC_ENHANCE |
27 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | 27 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ |
@@ -532,7 +532,7 @@ cayman 0x9400 | |||
532 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET | 532 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET |
533 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE | 533 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE |
534 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET | 534 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET |
535 | 0x00028B74 VGT_GS_INSTANCE_CNT | 535 | 0x00028B90 VGT_GS_INSTANCE_CNT |
536 | 0x00028BD4 PA_SC_CENTROID_PRIORITY_0 | 536 | 0x00028BD4 PA_SC_CENTROID_PRIORITY_0 |
537 | 0x00028BD8 PA_SC_CENTROID_PRIORITY_1 | 537 | 0x00028BD8 PA_SC_CENTROID_PRIORITY_1 |
538 | 0x00028BDC PA_SC_LINE_CNTL | 538 | 0x00028BDC PA_SC_LINE_CNTL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index b912a37689bf..57745c8761c8 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -22,7 +22,7 @@ evergreen 0x9400 | |||
22 | 0x000089A4 VGT_COMPUTE_START_Z | 22 | 0x000089A4 VGT_COMPUTE_START_Z |
23 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE | 23 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE |
24 | 0x00008A14 PA_CL_ENHANCE | 24 | 0x00008A14 PA_CL_ENHANCE |
25 | 0x00008A60 PA_SC_LINE_STIPPLE_VALUE | 25 | 0x00008A60 PA_SU_LINE_STIPPLE_VALUE |
26 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE | 26 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE |
27 | 0x00008BF0 PA_SC_ENHANCE | 27 | 0x00008BF0 PA_SC_ENHANCE |
28 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | 28 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ |
@@ -545,7 +545,7 @@ evergreen 0x9400 | |||
545 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET | 545 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET |
546 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE | 546 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE |
547 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET | 547 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET |
548 | 0x00028B74 VGT_GS_INSTANCE_CNT | 548 | 0x00028B90 VGT_GS_INSTANCE_CNT |
549 | 0x00028C00 PA_SC_LINE_CNTL | 549 | 0x00028C00 PA_SC_LINE_CNTL |
550 | 0x00028C08 PA_SU_VTX_CNTL | 550 | 0x00028C08 PA_SU_VTX_CNTL |
551 | 0x00028C0C PA_CL_GB_VERT_CLIP_ADJ | 551 | 0x00028C0C PA_CL_GB_VERT_CLIP_ADJ |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 1c560629575a..e7dab069cccf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev) | |||
162 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 162 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
163 | base = G_000100_MC_FB_START(base) << 16; | 163 | base = G_000100_MC_FB_START(base) << 16; |
164 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 164 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
165 | /* Some boards seem to be configured for 128MB of sideport memory, | ||
166 | * but really only have 64MB. Just skip the sideport and use | ||
167 | * UMA memory. | ||
168 | */ | ||
169 | if (rdev->mc.igp_sideport_enabled && | ||
170 | (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { | ||
171 | base += 128 * 1024 * 1024; | ||
172 | rdev->mc.real_vram_size -= 128 * 1024 * 1024; | ||
173 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
174 | } | ||
165 | 175 | ||
166 | /* Use K8 direct mapping for fast fb access. */ | 176 | /* Use K8 direct mapping for fast fb access. */ |
167 | rdev->fastfb_working = false; | 177 | rdev->fastfb_working = false; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 913b025ae9b3..374499db20c7 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) | |||
2328 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | 2328 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
2329 | ASIC_INTERNAL_MEMORY_SS, 0); | 2329 | ASIC_INTERNAL_MEMORY_SS, 0); |
2330 | 2330 | ||
2331 | /* disable ss, causes hangs on some cayman boards */ | ||
2332 | if (rdev->family == CHIP_CAYMAN) { | ||
2333 | pi->sclk_ss = false; | ||
2334 | pi->mclk_ss = false; | ||
2335 | } | ||
2336 | |||
2331 | if (pi->sclk_ss || pi->mclk_ss) | 2337 | if (pi->sclk_ss || pi->mclk_ss) |
2332 | pi->dynamic_ss = true; | 2338 | pi->dynamic_ss = true; |
2333 | else | 2339 | else |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6a64ccaa0695..85e1edfaa3be 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev, | |||
2811 | } | 2811 | } |
2812 | 2812 | ||
2813 | static u32 si_get_rb_disabled(struct radeon_device *rdev, | 2813 | static u32 si_get_rb_disabled(struct radeon_device *rdev, |
2814 | u32 max_rb_num, u32 se_num, | 2814 | u32 max_rb_num_per_se, |
2815 | u32 sh_per_se) | 2815 | u32 sh_per_se) |
2816 | { | 2816 | { |
2817 | u32 data, mask; | 2817 | u32 data, mask; |
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev, | |||
2825 | 2825 | ||
2826 | data >>= BACKEND_DISABLE_SHIFT; | 2826 | data >>= BACKEND_DISABLE_SHIFT; |
2827 | 2827 | ||
2828 | mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); | 2828 | mask = si_create_bitmask(max_rb_num_per_se / sh_per_se); |
2829 | 2829 | ||
2830 | return data & mask; | 2830 | return data & mask; |
2831 | } | 2831 | } |
2832 | 2832 | ||
2833 | static void si_setup_rb(struct radeon_device *rdev, | 2833 | static void si_setup_rb(struct radeon_device *rdev, |
2834 | u32 se_num, u32 sh_per_se, | 2834 | u32 se_num, u32 sh_per_se, |
2835 | u32 max_rb_num) | 2835 | u32 max_rb_num_per_se) |
2836 | { | 2836 | { |
2837 | int i, j; | 2837 | int i, j; |
2838 | u32 data, mask; | 2838 | u32 data, mask; |
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev, | |||
2842 | for (i = 0; i < se_num; i++) { | 2842 | for (i = 0; i < se_num; i++) { |
2843 | for (j = 0; j < sh_per_se; j++) { | 2843 | for (j = 0; j < sh_per_se; j++) { |
2844 | si_select_se_sh(rdev, i, j); | 2844 | si_select_se_sh(rdev, i, j); |
2845 | data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | 2845 | data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); |
2846 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); | 2846 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); |
2847 | } | 2847 | } |
2848 | } | 2848 | } |
2849 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 2849 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
2850 | 2850 | ||
2851 | mask = 1; | 2851 | mask = 1; |
2852 | for (i = 0; i < max_rb_num; i++) { | 2852 | for (i = 0; i < max_rb_num_per_se * se_num; i++) { |
2853 | if (!(disabled_rbs & mask)) | 2853 | if (!(disabled_rbs & mask)) |
2854 | enabled_rbs |= mask; | 2854 | enabled_rbs |= mask; |
2855 | mask <<= 1; | 2855 | mask <<= 1; |
2856 | } | 2856 | } |
2857 | 2857 | ||
2858 | rdev->config.si.backend_enable_mask = enabled_rbs; | ||
2859 | |||
2858 | for (i = 0; i < se_num; i++) { | 2860 | for (i = 0; i < se_num; i++) { |
2859 | si_select_se_sh(rdev, i, 0xffffffff); | 2861 | si_select_se_sh(rdev, i, 0xffffffff); |
2860 | data = 0; | 2862 | data = 0; |
@@ -3882,8 +3884,15 @@ static int si_mc_init(struct radeon_device *rdev) | |||
3882 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 3884 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
3883 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 3885 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
3884 | /* size in MB on si */ | 3886 | /* size in MB on si */ |
3885 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; | 3887 | tmp = RREG32(CONFIG_MEMSIZE); |
3886 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; | 3888 | /* some boards may have garbage in the upper 16 bits */ |
3889 | if (tmp & 0xffff0000) { | ||
3890 | DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); | ||
3891 | if (tmp & 0xffff) | ||
3892 | tmp &= 0xffff; | ||
3893 | } | ||
3894 | rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL; | ||
3895 | rdev->mc.real_vram_size = rdev->mc.mc_vram_size; | ||
3887 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 3896 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
3888 | si_vram_gtt_location(rdev, &rdev->mc); | 3897 | si_vram_gtt_location(rdev, &rdev->mc); |
3889 | radeon_update_bandwidth_info(rdev); | 3898 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 28e178137718..07eba596d458 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context, | |||
135 | unsigned int num_relocs = args->num_relocs; | 135 | unsigned int num_relocs = args->num_relocs; |
136 | unsigned int num_waitchks = args->num_waitchks; | 136 | unsigned int num_waitchks = args->num_waitchks; |
137 | struct drm_tegra_cmdbuf __user *cmdbufs = | 137 | struct drm_tegra_cmdbuf __user *cmdbufs = |
138 | (void * __user)(uintptr_t)args->cmdbufs; | 138 | (void __user *)(uintptr_t)args->cmdbufs; |
139 | struct drm_tegra_reloc __user *relocs = | 139 | struct drm_tegra_reloc __user *relocs = |
140 | (void * __user)(uintptr_t)args->relocs; | 140 | (void __user *)(uintptr_t)args->relocs; |
141 | struct drm_tegra_waitchk __user *waitchks = | 141 | struct drm_tegra_waitchk __user *waitchks = |
142 | (void * __user)(uintptr_t)args->waitchks; | 142 | (void __user *)(uintptr_t)args->waitchks; |
143 | struct drm_tegra_syncpt syncpt; | 143 | struct drm_tegra_syncpt syncpt; |
144 | struct host1x_job *job; | 144 | struct host1x_job *job; |
145 | int err; | 145 | int err; |
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context, | |||
163 | struct drm_tegra_cmdbuf cmdbuf; | 163 | struct drm_tegra_cmdbuf cmdbuf; |
164 | struct host1x_bo *bo; | 164 | struct host1x_bo *bo; |
165 | 165 | ||
166 | err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); | 166 | if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { |
167 | if (err) | 167 | err = -EFAULT; |
168 | goto fail; | 168 | goto fail; |
169 | } | ||
169 | 170 | ||
170 | bo = host1x_bo_lookup(drm, file, cmdbuf.handle); | 171 | bo = host1x_bo_lookup(drm, file, cmdbuf.handle); |
171 | if (!bo) { | 172 | if (!bo) { |
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context, | |||
178 | cmdbufs++; | 179 | cmdbufs++; |
179 | } | 180 | } |
180 | 181 | ||
181 | err = copy_from_user(job->relocarray, relocs, | 182 | if (copy_from_user(job->relocarray, relocs, |
182 | sizeof(*relocs) * num_relocs); | 183 | sizeof(*relocs) * num_relocs)) { |
183 | if (err) | 184 | err = -EFAULT; |
184 | goto fail; | 185 | goto fail; |
186 | } | ||
185 | 187 | ||
186 | while (num_relocs--) { | 188 | while (num_relocs--) { |
187 | struct host1x_reloc *reloc = &job->relocarray[num_relocs]; | 189 | struct host1x_reloc *reloc = &job->relocarray[num_relocs]; |
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context, | |||
199 | } | 201 | } |
200 | } | 202 | } |
201 | 203 | ||
202 | err = copy_from_user(job->waitchk, waitchks, | 204 | if (copy_from_user(job->waitchk, waitchks, |
203 | sizeof(*waitchks) * num_waitchks); | 205 | sizeof(*waitchks) * num_waitchks)) { |
204 | if (err) | 206 | err = -EFAULT; |
205 | goto fail; | 207 | goto fail; |
208 | } | ||
206 | 209 | ||
207 | err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, | 210 | if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, |
208 | sizeof(syncpt)); | 211 | sizeof(syncpt))) { |
209 | if (err) | 212 | err = -EFAULT; |
210 | goto fail; | 213 | goto fail; |
214 | } | ||
211 | 215 | ||
212 | job->is_addr_reg = context->client->ops->is_addr_reg; | 216 | job->is_addr_reg = context->client->ops->is_addr_reg; |
213 | job->syncpt_incrs = syncpt.incrs; | 217 | job->syncpt_incrs = syncpt.incrs; |
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor) | |||
573 | } | 577 | } |
574 | #endif | 578 | #endif |
575 | 579 | ||
576 | struct drm_driver tegra_drm_driver = { | 580 | static struct drm_driver tegra_drm_driver = { |
577 | .driver_features = DRIVER_MODESET | DRIVER_GEM, | 581 | .driver_features = DRIVER_MODESET | DRIVER_GEM, |
578 | .load = tegra_drm_load, | 582 | .load = tegra_drm_load, |
579 | .unload = tegra_drm_unload, | 583 | .unload = tegra_drm_unload, |
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index fdfe259ed7f8..7da0b923131f 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h | |||
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client) | |||
116 | 116 | ||
117 | static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) | 117 | static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) |
118 | { | 118 | { |
119 | return container_of(crtc, struct tegra_dc, base); | 119 | return crtc ? container_of(crtc, struct tegra_dc, base) : NULL; |
120 | } | 120 | } |
121 | 121 | ||
122 | static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, | 122 | static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, |
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 490f7719e317..a3835e7de184 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c | |||
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, | |||
247 | info->var.yoffset * fb->pitches[0]; | 247 | info->var.yoffset * fb->pitches[0]; |
248 | 248 | ||
249 | drm->mode_config.fb_base = (resource_size_t)bo->paddr; | 249 | drm->mode_config.fb_base = (resource_size_t)bo->paddr; |
250 | info->screen_base = bo->vaddr + offset; | 250 | info->screen_base = (void __iomem *)bo->vaddr + offset; |
251 | info->screen_size = size; | 251 | info->screen_size = size; |
252 | info->fix.smem_start = (unsigned long)(bo->paddr + offset); | 252 | info->fix.smem_start = (unsigned long)(bo->paddr + offset); |
253 | info->fix.smem_len = size; | 253 | info->fix.smem_len = size; |
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index ba47ca4fb880..3b29018913a5 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c | |||
@@ -14,6 +14,8 @@ | |||
14 | 14 | ||
15 | struct tegra_rgb { | 15 | struct tegra_rgb { |
16 | struct tegra_output output; | 16 | struct tegra_output output; |
17 | struct tegra_dc *dc; | ||
18 | |||
17 | struct clk *clk_parent; | 19 | struct clk *clk_parent; |
18 | struct clk *clk; | 20 | struct clk *clk; |
19 | }; | 21 | }; |
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, | |||
84 | 86 | ||
85 | static int tegra_output_rgb_enable(struct tegra_output *output) | 87 | static int tegra_output_rgb_enable(struct tegra_output *output) |
86 | { | 88 | { |
87 | struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); | 89 | struct tegra_rgb *rgb = to_rgb(output); |
88 | 90 | ||
89 | tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); | 91 | tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); |
90 | 92 | ||
91 | return 0; | 93 | return 0; |
92 | } | 94 | } |
93 | 95 | ||
94 | static int tegra_output_rgb_disable(struct tegra_output *output) | 96 | static int tegra_output_rgb_disable(struct tegra_output *output) |
95 | { | 97 | { |
96 | struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); | 98 | struct tegra_rgb *rgb = to_rgb(output); |
97 | 99 | ||
98 | tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); | 100 | tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); |
99 | 101 | ||
100 | return 0; | 102 | return 0; |
101 | } | 103 | } |
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) | |||
146 | 148 | ||
147 | rgb->output.dev = dc->dev; | 149 | rgb->output.dev = dc->dev; |
148 | rgb->output.of_node = np; | 150 | rgb->output.of_node = np; |
151 | rgb->dc = dc; | ||
149 | 152 | ||
150 | err = tegra_output_probe(&rgb->output); | 153 | err = tegra_output_probe(&rgb->output); |
151 | if (err < 0) | 154 | if (err < 0) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 15b86a94949d..406152152315 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
353 | * Don't move nonexistent data. Clear destination instead. | 353 | * Don't move nonexistent data. Clear destination instead. |
354 | */ | 354 | */ |
355 | if (old_iomap == NULL && | 355 | if (old_iomap == NULL && |
356 | (ttm == NULL || ttm->state == tt_unpopulated)) { | 356 | (ttm == NULL || (ttm->state == tt_unpopulated && |
357 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { | ||
357 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); | 358 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
358 | goto out2; | 359 | goto out2; |
359 | } | 360 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b249ab9b1eb2..6440eeac22d2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
169 | } | 169 | } |
170 | 170 | ||
171 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + | 171 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
172 | drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; | 172 | vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); |
173 | page_last = vma_pages(vma) + | 173 | page_last = vma_pages(vma) + vma->vm_pgoff - |
174 | drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; | 174 | drm_vma_node_start(&bo->vma_node); |
175 | 175 | ||
176 | if (unlikely(page_offset >= bo->num_pages)) { | 176 | if (unlikely(page_offset >= bo->num_pages)) { |
177 | retval = VM_FAULT_SIGBUS; | 177 | retval = VM_FAULT_SIGBUS; |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 24ffbe990736..8d67b943ac05 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) | |||
125 | 125 | ||
126 | static void udl_gem_put_pages(struct udl_gem_object *obj) | 126 | static void udl_gem_put_pages(struct udl_gem_object *obj) |
127 | { | 127 | { |
128 | if (obj->base.import_attach) { | ||
129 | drm_free_large(obj->pages); | ||
130 | obj->pages = NULL; | ||
131 | return; | ||
132 | } | ||
133 | |||
128 | drm_gem_put_pages(&obj->base, obj->pages, false, false); | 134 | drm_gem_put_pages(&obj->base, obj->pages, false, false); |
129 | obj->pages = NULL; | 135 | obj->pages = NULL; |
130 | } | 136 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 7776e6f0aef6..0489c6152482 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -150,6 +150,8 @@ struct vmw_ttm_tt { | |||
150 | bool mapped; | 150 | bool mapped; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); | ||
154 | |||
153 | /** | 155 | /** |
154 | * Helper functions to advance a struct vmw_piter iterator. | 156 | * Helper functions to advance a struct vmw_piter iterator. |
155 | * | 157 | * |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index db85985c7086..20890ad8408b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | |||
615 | * TTM buffer object driver - vmwgfx_buffer.c | 615 | * TTM buffer object driver - vmwgfx_buffer.c |
616 | */ | 616 | */ |
617 | 617 | ||
618 | extern const size_t vmw_tt_size; | ||
618 | extern struct ttm_placement vmw_vram_placement; | 619 | extern struct ttm_placement vmw_vram_placement; |
619 | extern struct ttm_placement vmw_vram_ne_placement; | 620 | extern struct ttm_placement vmw_vram_ne_placement; |
620 | extern struct ttm_placement vmw_vram_sys_placement; | 621 | extern struct ttm_placement vmw_vram_sys_placement; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a51f48e3e917..45d5b5ab6ca9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
68 | SVGA_FIFO_3D_HWVERSION)); | 68 | SVGA_FIFO_3D_HWVERSION)); |
69 | break; | 69 | break; |
70 | } | 70 | } |
71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | ||
72 | param->value = dev_priv->memory_size; | ||
73 | break; | ||
71 | default: | 74 | default: |
72 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 75 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
73 | param->param); | 76 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index ecb3d867b426..03f1c2038631 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du) | |||
75 | vmw_surface_unreference(&du->cursor_surface); | 75 | vmw_surface_unreference(&du->cursor_surface); |
76 | if (du->cursor_dmabuf) | 76 | if (du->cursor_dmabuf) |
77 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | 77 | vmw_dmabuf_unreference(&du->cursor_dmabuf); |
78 | drm_sysfs_connector_remove(&du->connector); | ||
78 | drm_crtc_cleanup(&du->crtc); | 79 | drm_crtc_cleanup(&du->crtc); |
79 | drm_encoder_cleanup(&du->encoder); | 80 | drm_encoder_cleanup(&du->encoder); |
80 | drm_connector_cleanup(&du->connector); | 81 | drm_connector_cleanup(&du->connector); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 79f7e8e60529..a055a26819c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
260 | connector->encoder = NULL; | 260 | connector->encoder = NULL; |
261 | encoder->crtc = NULL; | 261 | encoder->crtc = NULL; |
262 | crtc->fb = NULL; | 262 | crtc->fb = NULL; |
263 | crtc->enabled = false; | ||
263 | 264 | ||
264 | vmw_ldu_del_active(dev_priv, ldu); | 265 | vmw_ldu_del_active(dev_priv, ldu); |
265 | 266 | ||
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
285 | crtc->x = set->x; | 286 | crtc->x = set->x; |
286 | crtc->y = set->y; | 287 | crtc->y = set->y; |
287 | crtc->mode = *mode; | 288 | crtc->mode = *mode; |
289 | crtc->enabled = true; | ||
288 | 290 | ||
289 | vmw_ldu_add_active(dev_priv, ldu, vfb); | 291 | vmw_ldu_add_active(dev_priv, ldu, vfb); |
290 | 292 | ||
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
369 | encoder->possible_crtcs = (1 << unit); | 371 | encoder->possible_crtcs = (1 << unit); |
370 | encoder->possible_clones = 0; | 372 | encoder->possible_clones = 0; |
371 | 373 | ||
374 | (void) drm_sysfs_connector_add(connector); | ||
375 | |||
372 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); | 376 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); |
373 | 377 | ||
374 | drm_mode_crtc_set_gamma_size(crtc, 256); | 378 | drm_mode_crtc_set_gamma_size(crtc, 256); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index efe2b74c5eb1..9b5ea2ac7ddf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, | |||
352 | /** | 352 | /** |
353 | * Buffer management. | 353 | * Buffer management. |
354 | */ | 354 | */ |
355 | |||
356 | /** | ||
357 | * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers | ||
358 | * | ||
359 | * @dev_priv: Pointer to a struct vmw_private identifying the device. | ||
360 | * @size: The requested buffer size. | ||
361 | * @user: Whether this is an ordinary dma buffer or a user dma buffer. | ||
362 | */ | ||
363 | static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, | ||
364 | bool user) | ||
365 | { | ||
366 | static size_t struct_size, user_struct_size; | ||
367 | size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
368 | size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); | ||
369 | |||
370 | if (unlikely(struct_size == 0)) { | ||
371 | size_t backend_size = ttm_round_pot(vmw_tt_size); | ||
372 | |||
373 | struct_size = backend_size + | ||
374 | ttm_round_pot(sizeof(struct vmw_dma_buffer)); | ||
375 | user_struct_size = backend_size + | ||
376 | ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); | ||
377 | } | ||
378 | |||
379 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) | ||
380 | page_array_size += | ||
381 | ttm_round_pot(num_pages * sizeof(dma_addr_t)); | ||
382 | |||
383 | return ((user) ? user_struct_size : struct_size) + | ||
384 | page_array_size; | ||
385 | } | ||
386 | |||
355 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | 387 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
356 | { | 388 | { |
357 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | 389 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | |||
359 | kfree(vmw_bo); | 391 | kfree(vmw_bo); |
360 | } | 392 | } |
361 | 393 | ||
394 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | ||
395 | { | ||
396 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | ||
397 | |||
398 | ttm_prime_object_kfree(vmw_user_bo, prime); | ||
399 | } | ||
400 | |||
362 | int vmw_dmabuf_init(struct vmw_private *dev_priv, | 401 | int vmw_dmabuf_init(struct vmw_private *dev_priv, |
363 | struct vmw_dma_buffer *vmw_bo, | 402 | struct vmw_dma_buffer *vmw_bo, |
364 | size_t size, struct ttm_placement *placement, | 403 | size_t size, struct ttm_placement *placement, |
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
368 | struct ttm_bo_device *bdev = &dev_priv->bdev; | 407 | struct ttm_bo_device *bdev = &dev_priv->bdev; |
369 | size_t acc_size; | 408 | size_t acc_size; |
370 | int ret; | 409 | int ret; |
410 | bool user = (bo_free == &vmw_user_dmabuf_destroy); | ||
371 | 411 | ||
372 | BUG_ON(!bo_free); | 412 | BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); |
373 | 413 | ||
374 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); | 414 | acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); |
375 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | 415 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
376 | 416 | ||
377 | INIT_LIST_HEAD(&vmw_bo->res_list); | 417 | INIT_LIST_HEAD(&vmw_bo->res_list); |
378 | 418 | ||
379 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 419 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
380 | ttm_bo_type_device, placement, | 420 | (user) ? ttm_bo_type_device : |
421 | ttm_bo_type_kernel, placement, | ||
381 | 0, interruptible, | 422 | 0, interruptible, |
382 | NULL, acc_size, NULL, bo_free); | 423 | NULL, acc_size, NULL, bo_free); |
383 | return ret; | 424 | return ret; |
384 | } | 425 | } |
385 | 426 | ||
386 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | ||
387 | { | ||
388 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | ||
389 | |||
390 | ttm_prime_object_kfree(vmw_user_bo, prime); | ||
391 | } | ||
392 | |||
393 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | 427 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) |
394 | { | 428 | { |
395 | struct vmw_user_dma_buffer *vmw_user_bo; | 429 | struct vmw_user_dma_buffer *vmw_user_bo; |
@@ -781,54 +815,55 @@ err_ref: | |||
781 | } | 815 | } |
782 | 816 | ||
783 | 817 | ||
818 | /** | ||
819 | * vmw_dumb_create - Create a dumb kms buffer | ||
820 | * | ||
821 | * @file_priv: Pointer to a struct drm_file identifying the caller. | ||
822 | * @dev: Pointer to the drm device. | ||
823 | * @args: Pointer to a struct drm_mode_create_dumb structure | ||
824 | * | ||
825 | * This is a driver callback for the core drm create_dumb functionality. | ||
826 | * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except | ||
827 | * that the arguments have a different format. | ||
828 | */ | ||
784 | int vmw_dumb_create(struct drm_file *file_priv, | 829 | int vmw_dumb_create(struct drm_file *file_priv, |
785 | struct drm_device *dev, | 830 | struct drm_device *dev, |
786 | struct drm_mode_create_dumb *args) | 831 | struct drm_mode_create_dumb *args) |
787 | { | 832 | { |
788 | struct vmw_private *dev_priv = vmw_priv(dev); | 833 | struct vmw_private *dev_priv = vmw_priv(dev); |
789 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 834 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
790 | struct vmw_user_dma_buffer *vmw_user_bo; | 835 | struct vmw_dma_buffer *dma_buf; |
791 | struct ttm_buffer_object *tmp; | ||
792 | int ret; | 836 | int ret; |
793 | 837 | ||
794 | args->pitch = args->width * ((args->bpp + 7) / 8); | 838 | args->pitch = args->width * ((args->bpp + 7) / 8); |
795 | args->size = args->pitch * args->height; | 839 | args->size = args->pitch * args->height; |
796 | 840 | ||
797 | vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); | ||
798 | if (vmw_user_bo == NULL) | ||
799 | return -ENOMEM; | ||
800 | |||
801 | ret = ttm_read_lock(&vmaster->lock, true); | 841 | ret = ttm_read_lock(&vmaster->lock, true); |
802 | if (ret != 0) { | 842 | if (unlikely(ret != 0)) |
803 | kfree(vmw_user_bo); | ||
804 | return ret; | 843 | return ret; |
805 | } | ||
806 | 844 | ||
807 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, | 845 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
808 | &vmw_vram_sys_placement, true, | 846 | args->size, false, &args->handle, |
809 | &vmw_user_dmabuf_destroy); | 847 | &dma_buf); |
810 | if (ret != 0) | ||
811 | goto out_no_dmabuf; | ||
812 | |||
813 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | ||
814 | ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile, | ||
815 | args->size, | ||
816 | &vmw_user_bo->prime, | ||
817 | false, | ||
818 | ttm_buffer_type, | ||
819 | &vmw_user_dmabuf_release, NULL); | ||
820 | if (unlikely(ret != 0)) | 848 | if (unlikely(ret != 0)) |
821 | goto out_no_base_object; | 849 | goto out_no_dmabuf; |
822 | |||
823 | args->handle = vmw_user_bo->prime.base.hash.key; | ||
824 | 850 | ||
825 | out_no_base_object: | 851 | vmw_dmabuf_unreference(&dma_buf); |
826 | ttm_bo_unref(&tmp); | ||
827 | out_no_dmabuf: | 852 | out_no_dmabuf: |
828 | ttm_read_unlock(&vmaster->lock); | 853 | ttm_read_unlock(&vmaster->lock); |
829 | return ret; | 854 | return ret; |
830 | } | 855 | } |
831 | 856 | ||
857 | /** | ||
858 | * vmw_dumb_map_offset - Return the address space offset of a dumb buffer | ||
859 | * | ||
860 | * @file_priv: Pointer to a struct drm_file identifying the caller. | ||
861 | * @dev: Pointer to the drm device. | ||
862 | * @handle: Handle identifying the dumb buffer. | ||
863 | * @offset: The address space offset returned. | ||
864 | * | ||
865 | * This is a driver callback for the core drm dumb_map_offset functionality. | ||
866 | */ | ||
832 | int vmw_dumb_map_offset(struct drm_file *file_priv, | 867 | int vmw_dumb_map_offset(struct drm_file *file_priv, |
833 | struct drm_device *dev, uint32_t handle, | 868 | struct drm_device *dev, uint32_t handle, |
834 | uint64_t *offset) | 869 | uint64_t *offset) |
@@ -846,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, | |||
846 | return 0; | 881 | return 0; |
847 | } | 882 | } |
848 | 883 | ||
884 | /** | ||
885 | * vmw_dumb_destroy - Destroy a dumb boffer | ||
886 | * | ||
887 | * @file_priv: Pointer to a struct drm_file identifying the caller. | ||
888 | * @dev: Pointer to the drm device. | ||
889 | * @handle: Handle identifying the dumb buffer. | ||
890 | * | ||
891 | * This is a driver callback for the core drm dumb_destroy functionality. | ||
892 | */ | ||
849 | int vmw_dumb_destroy(struct drm_file *file_priv, | 893 | int vmw_dumb_destroy(struct drm_file *file_priv, |
850 | struct drm_device *dev, | 894 | struct drm_device *dev, |
851 | uint32_t handle) | 895 | uint32_t handle) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 26387c3d5a21..22406c8651ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) | |||
310 | crtc->fb = NULL; | 310 | crtc->fb = NULL; |
311 | crtc->x = 0; | 311 | crtc->x = 0; |
312 | crtc->y = 0; | 312 | crtc->y = 0; |
313 | crtc->enabled = false; | ||
313 | 314 | ||
314 | vmw_sou_del_active(dev_priv, sou); | 315 | vmw_sou_del_active(dev_priv, sou); |
315 | 316 | ||
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) | |||
370 | crtc->fb = NULL; | 371 | crtc->fb = NULL; |
371 | crtc->x = 0; | 372 | crtc->x = 0; |
372 | crtc->y = 0; | 373 | crtc->y = 0; |
374 | crtc->enabled = false; | ||
373 | 375 | ||
374 | return ret; | 376 | return ret; |
375 | } | 377 | } |
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) | |||
382 | crtc->fb = fb; | 384 | crtc->fb = fb; |
383 | crtc->x = set->x; | 385 | crtc->x = set->x; |
384 | crtc->y = set->y; | 386 | crtc->y = set->y; |
387 | crtc->enabled = true; | ||
385 | 388 | ||
386 | return 0; | 389 | return 0; |
387 | } | 390 | } |
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) | |||
464 | encoder->possible_crtcs = (1 << unit); | 467 | encoder->possible_crtcs = (1 << unit); |
465 | encoder->possible_clones = 0; | 468 | encoder->possible_clones = 0; |
466 | 469 | ||
470 | (void) drm_sysfs_connector_add(connector); | ||
471 | |||
467 | drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); | 472 | drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); |
468 | 473 | ||
469 | drm_mode_crtc_set_gamma_size(crtc, 256); | 474 | drm_mode_crtc_set_gamma_size(crtc, 256); |
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 509383f8be03..6a929591aa73 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | 21 | ||
22 | #include "bus.h" | ||
22 | #include "dev.h" | 23 | #include "dev.h" |
23 | 24 | ||
24 | static DEFINE_MUTEX(clients_lock); | 25 | static DEFINE_MUTEX(clients_lock); |
@@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x, | |||
257 | return -ENODEV; | 258 | return -ENODEV; |
258 | } | 259 | } |
259 | 260 | ||
260 | struct bus_type host1x_bus_type = { | 261 | static struct bus_type host1x_bus_type = { |
261 | .name = "host1x", | 262 | .name = "host1x", |
262 | }; | 263 | }; |
263 | 264 | ||
@@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x, | |||
301 | device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; | 302 | device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; |
302 | device->dev.dma_mask = &device->dev.coherent_dma_mask; | 303 | device->dev.dma_mask = &device->dev.coherent_dma_mask; |
303 | device->dev.release = host1x_device_release; | 304 | device->dev.release = host1x_device_release; |
304 | dev_set_name(&device->dev, driver->name); | 305 | dev_set_name(&device->dev, "%s", driver->name); |
305 | device->dev.bus = &host1x_bus_type; | 306 | device->dev.bus = &host1x_bus_type; |
306 | device->dev.parent = host1x->dev; | 307 | device->dev.parent = host1x->dev; |
307 | 308 | ||
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 37e2a63241a9..6b09b71940c2 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c | |||
@@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr, | |||
54 | u32 *p = (u32 *)((u32)pb->mapped + getptr); | 54 | u32 *p = (u32 *)((u32)pb->mapped + getptr); |
55 | *(p++) = HOST1X_OPCODE_NOP; | 55 | *(p++) = HOST1X_OPCODE_NOP; |
56 | *(p++) = HOST1X_OPCODE_NOP; | 56 | *(p++) = HOST1X_OPCODE_NOP; |
57 | dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, | 57 | dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__, |
58 | pb->phys + getptr); | 58 | (u64)pb->phys + getptr); |
59 | getptr = (getptr + 8) & (pb->size_bytes - 1); | 59 | getptr = (getptr + 8) & (pb->size_bytes - 1); |
60 | } | 60 | } |
61 | wmb(); | 61 | wmb(); |
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index 640c75ca5a8b..f72c873eff81 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c | |||
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) | |||
163 | continue; | 163 | continue; |
164 | } | 164 | } |
165 | 165 | ||
166 | host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n", | 166 | host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n", |
167 | g->base, g->offset, g->words); | 167 | (u64)g->base, g->offset, g->words); |
168 | 168 | ||
169 | show_gather(o, g->base + g->offset, g->words, cdma, | 169 | show_gather(o, g->base + g->offset, g->words, cdma, |
170 | g->base, mapped); | 170 | g->base, mapped); |
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index ecb5ca669e97..e77696367591 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c | |||
@@ -341,6 +341,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
341 | case USB_DEVICE_ID_GENIUS_GX_IMPERATOR: | 341 | case USB_DEVICE_ID_GENIUS_GX_IMPERATOR: |
342 | rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, | 342 | rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, |
343 | "Genius Gx Imperator Keyboard"); | 343 | "Genius Gx Imperator Keyboard"); |
344 | break; | ||
344 | case USB_DEVICE_ID_GENIUS_MANTICORE: | 345 | case USB_DEVICE_ID_GENIUS_MANTICORE: |
345 | rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, | 346 | rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, |
346 | "Genius Manticore Keyboard"); | 347 | "Genius Manticore Keyboard"); |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index a184e1921c11..8fab82829f8b 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
@@ -112,13 +112,15 @@ static int sensor_hub_get_physical_device_count( | |||
112 | 112 | ||
113 | static void sensor_hub_fill_attr_info( | 113 | static void sensor_hub_fill_attr_info( |
114 | struct hid_sensor_hub_attribute_info *info, | 114 | struct hid_sensor_hub_attribute_info *info, |
115 | s32 index, s32 report_id, s32 units, s32 unit_expo, s32 size) | 115 | s32 index, s32 report_id, struct hid_field *field) |
116 | { | 116 | { |
117 | info->index = index; | 117 | info->index = index; |
118 | info->report_id = report_id; | 118 | info->report_id = report_id; |
119 | info->units = units; | 119 | info->units = field->unit; |
120 | info->unit_expo = unit_expo; | 120 | info->unit_expo = field->unit_exponent; |
121 | info->size = size/8; | 121 | info->size = (field->report_size * field->report_count)/8; |
122 | info->logical_minimum = field->logical_minimum; | ||
123 | info->logical_maximum = field->logical_maximum; | ||
122 | } | 124 | } |
123 | 125 | ||
124 | static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( | 126 | static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( |
@@ -325,9 +327,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, | |||
325 | if (field->physical == usage_id && | 327 | if (field->physical == usage_id && |
326 | field->logical == attr_usage_id) { | 328 | field->logical == attr_usage_id) { |
327 | sensor_hub_fill_attr_info(info, i, report->id, | 329 | sensor_hub_fill_attr_info(info, i, report->id, |
328 | field->unit, field->unit_exponent, | 330 | field); |
329 | field->report_size * | ||
330 | field->report_count); | ||
331 | ret = 0; | 331 | ret = 0; |
332 | } else { | 332 | } else { |
333 | for (j = 0; j < field->maxusage; ++j) { | 333 | for (j = 0; j < field->maxusage; ++j) { |
@@ -336,11 +336,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, | |||
336 | field->usage[j].collection_index == | 336 | field->usage[j].collection_index == |
337 | collection_index) { | 337 | collection_index) { |
338 | sensor_hub_fill_attr_info(info, | 338 | sensor_hub_fill_attr_info(info, |
339 | i, report->id, | 339 | i, report->id, field); |
340 | field->unit, | ||
341 | field->unit_exponent, | ||
342 | field->report_size * | ||
343 | field->report_count); | ||
344 | ret = 0; | 340 | ret = 0; |
345 | break; | 341 | break; |
346 | } | 342 | } |
@@ -573,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev, | |||
573 | goto err_free_names; | 569 | goto err_free_names; |
574 | } | 570 | } |
575 | sd->hid_sensor_hub_client_devs[ | 571 | sd->hid_sensor_hub_client_devs[ |
572 | sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO; | ||
573 | sd->hid_sensor_hub_client_devs[ | ||
576 | sd->hid_sensor_client_cnt].name = name; | 574 | sd->hid_sensor_client_cnt].name = name; |
577 | sd->hid_sensor_hub_client_devs[ | 575 | sd->hid_sensor_hub_client_devs[ |
578 | sd->hid_sensor_client_cnt].platform_data = | 576 | sd->hid_sensor_client_cnt].platform_data = |
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c index 2dc37c7c6947..7d68a08baaa8 100644 --- a/drivers/hwmon/hih6130.c +++ b/drivers/hwmon/hih6130.c | |||
@@ -43,6 +43,7 @@ | |||
43 | * @last_update: time of last update (jiffies) | 43 | * @last_update: time of last update (jiffies) |
44 | * @temperature: cached temperature measurement value | 44 | * @temperature: cached temperature measurement value |
45 | * @humidity: cached humidity measurement value | 45 | * @humidity: cached humidity measurement value |
46 | * @write_length: length for I2C measurement request | ||
46 | */ | 47 | */ |
47 | struct hih6130 { | 48 | struct hih6130 { |
48 | struct device *hwmon_dev; | 49 | struct device *hwmon_dev; |
@@ -51,6 +52,7 @@ struct hih6130 { | |||
51 | unsigned long last_update; | 52 | unsigned long last_update; |
52 | int temperature; | 53 | int temperature; |
53 | int humidity; | 54 | int humidity; |
55 | size_t write_length; | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | /** | 58 | /** |
@@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client) | |||
121 | */ | 123 | */ |
122 | if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { | 124 | if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { |
123 | 125 | ||
124 | /* write to slave address, no data, to request a measurement */ | 126 | /* |
125 | ret = i2c_master_send(client, tmp, 0); | 127 | * Write to slave address to request a measurement. |
128 | * According with the datasheet it should be with no data, but | ||
129 | * for systems with I2C bus drivers that do not allow zero | ||
130 | * length packets we write one dummy byte to allow sensor | ||
131 | * measurements on them. | ||
132 | */ | ||
133 | tmp[0] = 0; | ||
134 | ret = i2c_master_send(client, tmp, hih6130->write_length); | ||
126 | if (ret < 0) | 135 | if (ret < 0) |
127 | goto out; | 136 | goto out; |
128 | 137 | ||
@@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client, | |||
252 | goto fail_remove_sysfs; | 261 | goto fail_remove_sysfs; |
253 | } | 262 | } |
254 | 263 | ||
264 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK)) | ||
265 | hih6130->write_length = 1; | ||
266 | |||
255 | return 0; | 267 | return 0; |
256 | 268 | ||
257 | fail_remove_sysfs: | 269 | fail_remove_sysfs: |
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index 6cf6bff79003..a2f3b4a365e4 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c | |||
@@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div) | |||
94 | { | 94 | { |
95 | if (rpm <= 0) | 95 | if (rpm <= 0) |
96 | return 255; | 96 | return 255; |
97 | if (rpm > 1350000) | ||
98 | return 1; | ||
97 | return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); | 99 | return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); |
98 | } | 100 | } |
99 | 101 | ||
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 4c4c1421bf28..8b8f3aa49726 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c | |||
@@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client, | |||
1610 | "lm90", client); | 1610 | "lm90", client); |
1611 | if (err < 0) { | 1611 | if (err < 0) { |
1612 | dev_err(dev, "cannot request IRQ %d\n", client->irq); | 1612 | dev_err(dev, "cannot request IRQ %d\n", client->irq); |
1613 | goto exit_remove_files; | 1613 | goto exit_unregister; |
1614 | } | 1614 | } |
1615 | } | 1615 | } |
1616 | 1616 | ||
1617 | return 0; | 1617 | return 0; |
1618 | 1618 | ||
1619 | exit_unregister: | ||
1620 | hwmon_device_unregister(data->hwmon_dev); | ||
1619 | exit_remove_files: | 1621 | exit_remove_files: |
1620 | lm90_remove_files(client, data); | 1622 | lm90_remove_files(client, data); |
1621 | exit_restore: | 1623 | exit_restore: |
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 1404e6319deb..72a889702f0d 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c | |||
@@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div) | |||
141 | { | 141 | { |
142 | if (rpm <= 0) | 142 | if (rpm <= 0) |
143 | return 255; | 143 | return 255; |
144 | if (rpm > 1350000) | ||
145 | return 1; | ||
144 | return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); | 146 | return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); |
145 | } | 147 | } |
146 | 148 | ||
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index 0e7017841f7d..aee14e2192f8 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c | |||
@@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 }; | |||
145 | */ | 145 | */ |
146 | static inline u8 FAN_TO_REG(long rpm, int div) | 146 | static inline u8 FAN_TO_REG(long rpm, int div) |
147 | { | 147 | { |
148 | if (rpm == 0) | 148 | if (rpm <= 0 || rpm > 1310720) |
149 | return 0; | 149 | return 0; |
150 | return clamp_val(1310720 / (rpm * div), 1, 255); | 150 | return clamp_val(1310720 / (rpm * div), 1, 255); |
151 | } | 151 | } |
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index edb06cda5a68..6ed76ceb9270 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c | |||
@@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr, | |||
481 | if (err) | 481 | if (err) |
482 | return err; | 482 | return err; |
483 | val = clamp_val(val, 0, 255); | 483 | val = clamp_val(val, 0, 255); |
484 | val = DIV_ROUND_CLOSEST(val, 0x11); | ||
484 | 485 | ||
485 | mutex_lock(&data->update_lock); | 486 | mutex_lock(&data->update_lock); |
486 | data->pwm[nr] = val; | 487 | data->pwm[nr] = val * 0x11; |
488 | val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0; | ||
487 | w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); | 489 | w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); |
488 | mutex_unlock(&data->update_lock); | 490 | mutex_unlock(&data->update_lock); |
489 | return count; | 491 | return count; |
@@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, | |||
510 | mutex_lock(&data->update_lock); | 512 | mutex_lock(&data->update_lock); |
511 | reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); | 513 | reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); |
512 | data->pwm_enable[nr] = val; | 514 | data->pwm_enable[nr] = val; |
513 | reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]); | 515 | reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]); |
514 | reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; | 516 | reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; |
515 | w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); | 517 | w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); |
516 | mutex_unlock(&data->update_lock); | 518 | mutex_unlock(&data->update_lock); |
@@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev) | |||
776 | ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) | 778 | ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) |
777 | ? 0 : 1; | 779 | ? 0 : 1; |
778 | data->pwm_enable[i] = | 780 | data->pwm_enable[i] = |
779 | ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1; | 781 | ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1; |
780 | data->pwm[i] = w83l786ng_read_value(client, | 782 | data->pwm[i] = |
781 | W83L786NG_REG_PWM[i]); | 783 | (w83l786ng_read_value(client, W83L786NG_REG_PWM[i]) |
784 | & 0x0f) * 0x11; | ||
782 | } | 785 | } |
783 | 786 | ||
784 | 787 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 1d7efa3169cd..d0cfbb4cb964 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) | |||
312 | 312 | ||
313 | dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); | 313 | dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); |
314 | 314 | ||
315 | clk_prepare_enable(i2c_imx->clk); | 315 | result = clk_prepare_enable(i2c_imx->clk); |
316 | if (result) | ||
317 | return result; | ||
316 | imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); | 318 | imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); |
317 | /* Enable I2C controller */ | 319 | /* Enable I2C controller */ |
318 | imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); | 320 | imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); |
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 797e3117bef7..2d0847b6be62 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c | |||
@@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, | |||
139 | priv->adap.algo = &priv->algo; | 139 | priv->adap.algo = &priv->algo; |
140 | priv->adap.algo_data = priv; | 140 | priv->adap.algo_data = priv; |
141 | priv->adap.dev.parent = &parent->dev; | 141 | priv->adap.dev.parent = &parent->dev; |
142 | priv->adap.retries = parent->retries; | ||
143 | priv->adap.timeout = parent->timeout; | ||
142 | 144 | ||
143 | /* Sanity check on class */ | 145 | /* Sanity check on class */ |
144 | if (i2c_mux_parent_classes(parent) & class) | 146 | if (i2c_mux_parent_classes(parent) & class) |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 92d1206482a6..797ed29a36ea 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table; | |||
123 | * which is also the index into the MWAIT hint array. | 123 | * which is also the index into the MWAIT hint array. |
124 | * Thus C0 is a dummy. | 124 | * Thus C0 is a dummy. |
125 | */ | 125 | */ |
126 | static struct cpuidle_state nehalem_cstates[] __initdata = { | 126 | static struct cpuidle_state nehalem_cstates[] = { |
127 | { | 127 | { |
128 | .name = "C1-NHM", | 128 | .name = "C1-NHM", |
129 | .desc = "MWAIT 0x00", | 129 | .desc = "MWAIT 0x00", |
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { | |||
156 | .enter = NULL } | 156 | .enter = NULL } |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static struct cpuidle_state snb_cstates[] __initdata = { | 159 | static struct cpuidle_state snb_cstates[] = { |
160 | { | 160 | { |
161 | .name = "C1-SNB", | 161 | .name = "C1-SNB", |
162 | .desc = "MWAIT 0x00", | 162 | .desc = "MWAIT 0x00", |
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { | |||
196 | .enter = NULL } | 196 | .enter = NULL } |
197 | }; | 197 | }; |
198 | 198 | ||
199 | static struct cpuidle_state ivb_cstates[] __initdata = { | 199 | static struct cpuidle_state ivb_cstates[] = { |
200 | { | 200 | { |
201 | .name = "C1-IVB", | 201 | .name = "C1-IVB", |
202 | .desc = "MWAIT 0x00", | 202 | .desc = "MWAIT 0x00", |
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { | |||
236 | .enter = NULL } | 236 | .enter = NULL } |
237 | }; | 237 | }; |
238 | 238 | ||
239 | static struct cpuidle_state hsw_cstates[] __initdata = { | 239 | static struct cpuidle_state hsw_cstates[] = { |
240 | { | 240 | { |
241 | .name = "C1-HSW", | 241 | .name = "C1-HSW", |
242 | .desc = "MWAIT 0x00", | 242 | .desc = "MWAIT 0x00", |
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { | |||
297 | .enter = NULL } | 297 | .enter = NULL } |
298 | }; | 298 | }; |
299 | 299 | ||
300 | static struct cpuidle_state atom_cstates[] __initdata = { | 300 | static struct cpuidle_state atom_cstates[] = { |
301 | { | 301 | { |
302 | .name = "C1E-ATM", | 302 | .name = "C1E-ATM", |
303 | .desc = "MWAIT 0x00", | 303 | .desc = "MWAIT 0x00", |
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { | |||
329 | { | 329 | { |
330 | .enter = NULL } | 330 | .enter = NULL } |
331 | }; | 331 | }; |
332 | static struct cpuidle_state avn_cstates[] __initdata = { | 332 | static struct cpuidle_state avn_cstates[] = { |
333 | { | 333 | { |
334 | .name = "C1-AVN", | 334 | .name = "C1-AVN", |
335 | .desc = "MWAIT 0x00", | 335 | .desc = "MWAIT 0x00", |
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = { | |||
344 | .exit_latency = 15, | 344 | .exit_latency = 15, |
345 | .target_residency = 45, | 345 | .target_residency = 45, |
346 | .enter = &intel_idle }, | 346 | .enter = &intel_idle }, |
347 | { | ||
348 | .enter = NULL } | ||
347 | }; | 349 | }; |
348 | 350 | ||
349 | /** | 351 | /** |
@@ -377,6 +379,9 @@ static int intel_idle(struct cpuidle_device *dev, | |||
377 | 379 | ||
378 | if (!current_set_polling_and_test()) { | 380 | if (!current_set_polling_and_test()) { |
379 | 381 | ||
382 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||
383 | clflush((void *)¤t_thread_info()->flags); | ||
384 | |||
380 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 385 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
381 | smp_mb(); | 386 | smp_mb(); |
382 | if (!need_resched()) | 387 | if (!need_resched()) |
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index acb7f90359a3..749a6cadab8b 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c | |||
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { | |||
200 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | 200 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), |
201 | .address = 1, | 201 | .address = 1, |
202 | .scan_index = 1, | 202 | .scan_index = 1, |
203 | .scan_type = IIO_ST('u', 12, 16, 0), | 203 | .scan_type = { |
204 | .sign = 'u', | ||
205 | .realbits = 12, | ||
206 | .storagebits = 16, | ||
207 | .shift = 0, | ||
208 | .endianness = IIO_BE, | ||
209 | }, | ||
204 | }, | 210 | }, |
205 | .channel[1] = { | 211 | .channel[1] = { |
206 | .type = IIO_VOLTAGE, | 212 | .type = IIO_VOLTAGE, |
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { | |||
210 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | 216 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), |
211 | .address = 0, | 217 | .address = 0, |
212 | .scan_index = 0, | 218 | .scan_index = 0, |
213 | .scan_type = IIO_ST('u', 12, 16, 0), | 219 | .scan_type = { |
220 | .sign = 'u', | ||
221 | .realbits = 12, | ||
222 | .storagebits = 16, | ||
223 | .shift = 0, | ||
224 | .endianness = IIO_BE, | ||
225 | }, | ||
214 | }, | 226 | }, |
215 | .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), | 227 | .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), |
216 | .int_vref_mv = 2500, | 228 | .int_vref_mv = 2500, |
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig index 1178121b55b0..39188b72cd3b 100644 --- a/drivers/iio/common/hid-sensors/Kconfig +++ b/drivers/iio/common/hid-sensors/Kconfig | |||
@@ -25,13 +25,4 @@ config HID_SENSOR_IIO_TRIGGER | |||
25 | If this driver is compiled as a module, it will be named | 25 | If this driver is compiled as a module, it will be named |
26 | hid-sensor-trigger. | 26 | hid-sensor-trigger. |
27 | 27 | ||
28 | config HID_SENSOR_ENUM_BASE_QUIRKS | ||
29 | bool "ENUM base quirks for HID Sensor IIO drivers" | ||
30 | depends on HID_SENSOR_IIO_COMMON | ||
31 | help | ||
32 | Say yes here to build support for sensor hub FW using | ||
33 | enumeration, which is using 1 as base instead of 0. | ||
34 | Since logical minimum is still set 0 instead of 1, | ||
35 | there is no easy way to differentiate. | ||
36 | |||
37 | endmenu | 28 | endmenu |
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index bbd6426c9726..7dcf83998e6f 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c | |||
@@ -33,24 +33,34 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, | |||
33 | { | 33 | { |
34 | struct hid_sensor_common *st = iio_trigger_get_drvdata(trig); | 34 | struct hid_sensor_common *st = iio_trigger_get_drvdata(trig); |
35 | int state_val; | 35 | int state_val; |
36 | int report_val; | ||
36 | 37 | ||
37 | if (state) { | 38 | if (state) { |
38 | if (sensor_hub_device_open(st->hsdev)) | 39 | if (sensor_hub_device_open(st->hsdev)) |
39 | return -EIO; | 40 | return -EIO; |
40 | } else | 41 | state_val = |
42 | HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM; | ||
43 | report_val = | ||
44 | HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM; | ||
45 | |||
46 | } else { | ||
41 | sensor_hub_device_close(st->hsdev); | 47 | sensor_hub_device_close(st->hsdev); |
48 | state_val = | ||
49 | HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM; | ||
50 | report_val = | ||
51 | HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM; | ||
52 | } | ||
42 | 53 | ||
43 | state_val = state ? 1 : 0; | ||
44 | if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS)) | ||
45 | ++state_val; | ||
46 | st->data_ready = state; | 54 | st->data_ready = state; |
55 | state_val += st->power_state.logical_minimum; | ||
56 | report_val += st->report_state.logical_minimum; | ||
47 | sensor_hub_set_feature(st->hsdev, st->power_state.report_id, | 57 | sensor_hub_set_feature(st->hsdev, st->power_state.report_id, |
48 | st->power_state.index, | 58 | st->power_state.index, |
49 | (s32)state_val); | 59 | (s32)state_val); |
50 | 60 | ||
51 | sensor_hub_set_feature(st->hsdev, st->report_state.report_id, | 61 | sensor_hub_set_feature(st->hsdev, st->report_state.report_id, |
52 | st->report_state.index, | 62 | st->report_state.index, |
53 | (s32)state_val); | 63 | (s32)report_val); |
54 | 64 | ||
55 | return 0; | 65 | return 0; |
56 | } | 66 | } |
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 3fb7757a1028..368660dfe135 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c | |||
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = { | |||
651 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | 651 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), |
652 | .address = ADIS16448_BARO_OUT, | 652 | .address = ADIS16448_BARO_OUT, |
653 | .scan_index = ADIS16400_SCAN_BARO, | 653 | .scan_index = ADIS16400_SCAN_BARO, |
654 | .scan_type = IIO_ST('s', 16, 16, 0), | 654 | .scan_type = { |
655 | .sign = 's', | ||
656 | .realbits = 16, | ||
657 | .storagebits = 16, | ||
658 | .endianness = IIO_BE, | ||
659 | }, | ||
655 | }, | 660 | }, |
656 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), | 661 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), |
657 | IIO_CHAN_SOFT_TIMESTAMP(11) | 662 | IIO_CHAN_SOFT_TIMESTAMP(11) |
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index b0d65df3ede2..a022f27c6690 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig | |||
@@ -43,6 +43,7 @@ config GP2AP020A00F | |||
43 | depends on I2C | 43 | depends on I2C |
44 | select IIO_BUFFER | 44 | select IIO_BUFFER |
45 | select IIO_TRIGGERED_BUFFER | 45 | select IIO_TRIGGERED_BUFFER |
46 | select IRQ_WORK | ||
46 | help | 47 | help |
47 | Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip | 48 | Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip |
48 | hooked to an I2C bus. | 49 | hooked to an I2C bus. |
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c index 21df57130018..0922e39b0ea9 100644 --- a/drivers/iio/light/cm36651.c +++ b/drivers/iio/light/cm36651.c | |||
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651, | |||
387 | return -EINVAL; | 387 | return -EINVAL; |
388 | } | 388 | } |
389 | 389 | ||
390 | return IIO_VAL_INT_PLUS_MICRO; | 390 | return IIO_VAL_INT; |
391 | } | 391 | } |
392 | 392 | ||
393 | static int cm36651_write_int_time(struct cm36651_data *cm36651, | 393 | static int cm36651_write_int_time(struct cm36651_data *cm36651, |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c47c2034ca71..0717940ec3b5 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id) | |||
181 | static void rem_ref(struct iw_cm_id *cm_id) | 181 | static void rem_ref(struct iw_cm_id *cm_id) |
182 | { | 182 | { |
183 | struct iwcm_id_private *cm_id_priv; | 183 | struct iwcm_id_private *cm_id_priv; |
184 | int cb_destroy; | ||
185 | |||
184 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 186 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
185 | if (iwcm_deref_id(cm_id_priv) && | 187 | |
186 | test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { | 188 | /* |
189 | * Test bit before deref in case the cm_id gets freed on another | ||
190 | * thread. | ||
191 | */ | ||
192 | cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
193 | if (iwcm_deref_id(cm_id_priv) && cb_destroy) { | ||
187 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | 194 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
188 | free_cm_id(cm_id_priv); | 195 | free_cm_id(cm_id_priv); |
189 | } | 196 | } |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bdc842e9faef..a283274a5a09 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -49,12 +49,20 @@ | |||
49 | 49 | ||
50 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 50 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
51 | do { \ | 51 | do { \ |
52 | (udata)->inbuf = (void __user *) (ibuf); \ | 52 | (udata)->inbuf = (const void __user *) (ibuf); \ |
53 | (udata)->outbuf = (void __user *) (obuf); \ | 53 | (udata)->outbuf = (void __user *) (obuf); \ |
54 | (udata)->inlen = (ilen); \ | 54 | (udata)->inlen = (ilen); \ |
55 | (udata)->outlen = (olen); \ | 55 | (udata)->outlen = (olen); \ |
56 | } while (0) | 56 | } while (0) |
57 | 57 | ||
58 | #define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \ | ||
59 | do { \ | ||
60 | (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \ | ||
61 | (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \ | ||
62 | (udata)->inlen = (ilen); \ | ||
63 | (udata)->outlen = (olen); \ | ||
64 | } while (0) | ||
65 | |||
58 | /* | 66 | /* |
59 | * Our lifetime rules for these structs are the following: | 67 | * Our lifetime rules for these structs are the following: |
60 | * | 68 | * |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 65f6e7dc380c..f1cc83855af6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -2593,6 +2593,9 @@ out_put: | |||
2593 | static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | 2593 | static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, |
2594 | union ib_flow_spec *ib_spec) | 2594 | union ib_flow_spec *ib_spec) |
2595 | { | 2595 | { |
2596 | if (kern_spec->reserved) | ||
2597 | return -EINVAL; | ||
2598 | |||
2596 | ib_spec->type = kern_spec->type; | 2599 | ib_spec->type = kern_spec->type; |
2597 | 2600 | ||
2598 | switch (ib_spec->type) { | 2601 | switch (ib_spec->type) { |
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
2646 | void *ib_spec; | 2649 | void *ib_spec; |
2647 | int i; | 2650 | int i; |
2648 | 2651 | ||
2652 | if (ucore->inlen < sizeof(cmd)) | ||
2653 | return -EINVAL; | ||
2654 | |||
2649 | if (ucore->outlen < sizeof(resp)) | 2655 | if (ucore->outlen < sizeof(resp)) |
2650 | return -ENOSPC; | 2656 | return -ENOSPC; |
2651 | 2657 | ||
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
2671 | (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) | 2677 | (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) |
2672 | return -EINVAL; | 2678 | return -EINVAL; |
2673 | 2679 | ||
2680 | if (cmd.flow_attr.reserved[0] || | ||
2681 | cmd.flow_attr.reserved[1]) | ||
2682 | return -EINVAL; | ||
2683 | |||
2674 | if (cmd.flow_attr.num_of_specs) { | 2684 | if (cmd.flow_attr.num_of_specs) { |
2675 | kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, | 2685 | kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, |
2676 | GFP_KERNEL); | 2686 | GFP_KERNEL); |
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
2731 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { | 2741 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { |
2732 | pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", | 2742 | pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", |
2733 | i, cmd.flow_attr.size); | 2743 | i, cmd.flow_attr.size); |
2744 | err = -EINVAL; | ||
2734 | goto err_free; | 2745 | goto err_free; |
2735 | } | 2746 | } |
2736 | flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); | 2747 | flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); |
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, | |||
2791 | struct ib_uobject *uobj; | 2802 | struct ib_uobject *uobj; |
2792 | int ret; | 2803 | int ret; |
2793 | 2804 | ||
2805 | if (ucore->inlen < sizeof(cmd)) | ||
2806 | return -EINVAL; | ||
2807 | |||
2794 | ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); | 2808 | ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); |
2795 | if (ret) | 2809 | if (ret) |
2796 | return ret; | 2810 | return ret; |
2797 | 2811 | ||
2812 | if (cmd.comp_mask) | ||
2813 | return -EINVAL; | ||
2814 | |||
2798 | uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, | 2815 | uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, |
2799 | file->ucontext); | 2816 | file->ucontext); |
2800 | if (!uobj) | 2817 | if (!uobj) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 34386943ebcf..08219fb3338b 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
668 | if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) | 668 | if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) |
669 | return -EINVAL; | 669 | return -EINVAL; |
670 | 670 | ||
671 | if (ex_hdr.cmd_hdr_reserved) | ||
672 | return -EINVAL; | ||
673 | |||
671 | if (ex_hdr.response) { | 674 | if (ex_hdr.response) { |
672 | if (!hdr.out_words && !ex_hdr.provider_out_words) | 675 | if (!hdr.out_words && !ex_hdr.provider_out_words) |
673 | return -EINVAL; | 676 | return -EINVAL; |
677 | |||
678 | if (!access_ok(VERIFY_WRITE, | ||
679 | (void __user *) (unsigned long) ex_hdr.response, | ||
680 | (hdr.out_words + ex_hdr.provider_out_words) * 8)) | ||
681 | return -EFAULT; | ||
674 | } else { | 682 | } else { |
675 | if (hdr.out_words || ex_hdr.provider_out_words) | 683 | if (hdr.out_words || ex_hdr.provider_out_words) |
676 | return -EINVAL; | 684 | return -EINVAL; |
677 | } | 685 | } |
678 | 686 | ||
679 | INIT_UDATA(&ucore, | 687 | INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response, |
680 | (hdr.in_words) ? buf : 0, | 688 | hdr.in_words * 8, hdr.out_words * 8); |
681 | (unsigned long)ex_hdr.response, | 689 | |
682 | hdr.in_words * 8, | 690 | INIT_UDATA_BUF_OR_NULL(&uhw, |
683 | hdr.out_words * 8); | 691 | buf + ucore.inlen, |
684 | 692 | (unsigned long) ex_hdr.response + ucore.outlen, | |
685 | INIT_UDATA(&uhw, | 693 | ex_hdr.provider_in_words * 8, |
686 | (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, | 694 | ex_hdr.provider_out_words * 8); |
687 | (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0, | ||
688 | ex_hdr.provider_in_words * 8, | ||
689 | ex_hdr.provider_out_words * 8); | ||
690 | 695 | ||
691 | err = uverbs_ex_cmd_table[command](file, | 696 | err = uverbs_ex_cmd_table[command](file, |
692 | &ucore, | 697 | &ucore, |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 12fef76c791c..45126879ad28 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |||
524 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 524 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
525 | } | 525 | } |
526 | 526 | ||
527 | #define VLAN_NONE 0xfff | ||
528 | #define FILTER_SEL_VLAN_NONE 0xffff | ||
529 | #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ | ||
530 | #define FILTER_SEL_WIDTH_VIN_P_FC \ | ||
531 | (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ | ||
532 | #define FILTER_SEL_WIDTH_TAG_P_FC \ | ||
533 | (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ | ||
534 | #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) | ||
535 | |||
536 | static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, | ||
537 | struct l2t_entry *l2t) | ||
538 | { | ||
539 | unsigned int ntuple = 0; | ||
540 | u32 viid; | ||
541 | |||
542 | switch (dev->rdev.lldi.filt_mode) { | ||
543 | |||
544 | /* default filter mode */ | ||
545 | case HW_TPL_FR_MT_PR_IV_P_FC: | ||
546 | if (l2t->vlan == VLAN_NONE) | ||
547 | ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; | ||
548 | else { | ||
549 | ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; | ||
550 | ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC; | ||
551 | } | ||
552 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | ||
553 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
554 | break; | ||
555 | case HW_TPL_FR_MT_PR_OV_P_FC: { | ||
556 | viid = cxgb4_port_viid(l2t->neigh->dev); | ||
557 | |||
558 | ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; | ||
559 | ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; | ||
560 | ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; | ||
561 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | ||
562 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | ||
563 | break; | ||
564 | } | ||
565 | default: | ||
566 | break; | ||
567 | } | ||
568 | return ntuple; | ||
569 | } | ||
570 | |||
571 | static int send_connect(struct c4iw_ep *ep) | 527 | static int send_connect(struct c4iw_ep *ep) |
572 | { | 528 | { |
573 | struct cpl_act_open_req *req; | 529 | struct cpl_act_open_req *req; |
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
641 | req->local_ip = la->sin_addr.s_addr; | 597 | req->local_ip = la->sin_addr.s_addr; |
642 | req->peer_ip = ra->sin_addr.s_addr; | 598 | req->peer_ip = ra->sin_addr.s_addr; |
643 | req->opt0 = cpu_to_be64(opt0); | 599 | req->opt0 = cpu_to_be64(opt0); |
644 | req->params = cpu_to_be32(select_ntuple(ep->com.dev, | 600 | req->params = cpu_to_be32(cxgb4_select_ntuple( |
645 | ep->dst, ep->l2t)); | 601 | ep->com.dev->rdev.lldi.ports[0], |
602 | ep->l2t)); | ||
646 | req->opt2 = cpu_to_be32(opt2); | 603 | req->opt2 = cpu_to_be32(opt2); |
647 | } else { | 604 | } else { |
648 | req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); | 605 | req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); |
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
662 | req6->peer_ip_lo = *((__be64 *) | 619 | req6->peer_ip_lo = *((__be64 *) |
663 | (ra6->sin6_addr.s6_addr + 8)); | 620 | (ra6->sin6_addr.s6_addr + 8)); |
664 | req6->opt0 = cpu_to_be64(opt0); | 621 | req6->opt0 = cpu_to_be64(opt0); |
665 | req6->params = cpu_to_be32( | 622 | req6->params = cpu_to_be32(cxgb4_select_ntuple( |
666 | select_ntuple(ep->com.dev, ep->dst, | 623 | ep->com.dev->rdev.lldi.ports[0], |
667 | ep->l2t)); | 624 | ep->l2t)); |
668 | req6->opt2 = cpu_to_be32(opt2); | 625 | req6->opt2 = cpu_to_be32(opt2); |
669 | } | 626 | } |
670 | } else { | 627 | } else { |
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
681 | t5_req->peer_ip = ra->sin_addr.s_addr; | 638 | t5_req->peer_ip = ra->sin_addr.s_addr; |
682 | t5_req->opt0 = cpu_to_be64(opt0); | 639 | t5_req->opt0 = cpu_to_be64(opt0); |
683 | t5_req->params = cpu_to_be64(V_FILTER_TUPLE( | 640 | t5_req->params = cpu_to_be64(V_FILTER_TUPLE( |
684 | select_ntuple(ep->com.dev, | 641 | cxgb4_select_ntuple( |
685 | ep->dst, ep->l2t))); | 642 | ep->com.dev->rdev.lldi.ports[0], |
643 | ep->l2t))); | ||
686 | t5_req->opt2 = cpu_to_be32(opt2); | 644 | t5_req->opt2 = cpu_to_be32(opt2); |
687 | } else { | 645 | } else { |
688 | t5_req6 = (struct cpl_t5_act_open_req6 *) | 646 | t5_req6 = (struct cpl_t5_act_open_req6 *) |
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
703 | (ra6->sin6_addr.s6_addr + 8)); | 661 | (ra6->sin6_addr.s6_addr + 8)); |
704 | t5_req6->opt0 = cpu_to_be64(opt0); | 662 | t5_req6->opt0 = cpu_to_be64(opt0); |
705 | t5_req6->params = (__force __be64)cpu_to_be32( | 663 | t5_req6->params = (__force __be64)cpu_to_be32( |
706 | select_ntuple(ep->com.dev, ep->dst, ep->l2t)); | 664 | cxgb4_select_ntuple( |
665 | ep->com.dev->rdev.lldi.ports[0], | ||
666 | ep->l2t)); | ||
707 | t5_req6->opt2 = cpu_to_be32(opt2); | 667 | t5_req6->opt2 = cpu_to_be32(opt2); |
708 | } | 668 | } |
709 | } | 669 | } |
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1630 | memset(req, 0, sizeof(*req)); | 1590 | memset(req, 0, sizeof(*req)); |
1631 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); | 1591 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); |
1632 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | 1592 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); |
1633 | req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, | 1593 | req->le.filter = cpu_to_be32(cxgb4_select_ntuple( |
1594 | ep->com.dev->rdev.lldi.ports[0], | ||
1634 | ep->l2t)); | 1595 | ep->l2t)); |
1635 | sin = (struct sockaddr_in *)&ep->com.local_addr; | 1596 | sin = (struct sockaddr_in *)&ep->com.local_addr; |
1636 | req->le.lport = sin->sin_port; | 1597 | req->le.lport = sin->sin_port; |
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
2938 | /* | 2899 | /* |
2939 | * Allocate a server TID. | 2900 | * Allocate a server TID. |
2940 | */ | 2901 | */ |
2941 | if (dev->rdev.lldi.enable_fw_ofld_conn) | 2902 | if (dev->rdev.lldi.enable_fw_ofld_conn && |
2903 | ep->com.local_addr.ss_family == AF_INET) | ||
2942 | ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, | 2904 | ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, |
2943 | cm_id->local_addr.ss_family, ep); | 2905 | cm_id->local_addr.ss_family, ep); |
2944 | else | 2906 | else |
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3323 | /* | 3285 | /* |
3324 | * Calculate the server tid from filter hit index from cpl_rx_pkt. | 3286 | * Calculate the server tid from filter hit index from cpl_rx_pkt. |
3325 | */ | 3287 | */ |
3326 | stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) | 3288 | stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); |
3327 | - dev->rdev.lldi.tids->sftid_base | ||
3328 | + dev->rdev.lldi.tids->nstids; | ||
3329 | 3289 | ||
3330 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); | 3290 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); |
3331 | if (!lep) { | 3291 | if (!lep) { |
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3397 | window = (__force u16) htons((__force u16)tcph->window); | 3357 | window = (__force u16) htons((__force u16)tcph->window); |
3398 | 3358 | ||
3399 | /* Calcuate filter portion for LE region. */ | 3359 | /* Calcuate filter portion for LE region. */ |
3400 | filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); | 3360 | filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( |
3361 | dev->rdev.lldi.ports[0], | ||
3362 | e)); | ||
3401 | 3363 | ||
3402 | /* | 3364 | /* |
3403 | * Synthesize the cpl_pass_accept_req. We have everything except the | 3365 | * Synthesize the cpl_pass_accept_req. We have everything except the |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 4cb8eb24497c..84e45006451c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
173 | return ret; | 173 | return ret; |
174 | } | 174 | } |
175 | 175 | ||
176 | int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) | 176 | static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) |
177 | { | 177 | { |
178 | u32 remain = len; | 178 | u32 remain = len; |
179 | u32 dmalen; | 179 | u32 dmalen; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index c29b5c838833..cdc7df4fdb8a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c | |||
@@ -31,6 +31,7 @@ | |||
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
34 | #include <linux/if_arp.h> /* For ARPHRD_xxx */ | ||
34 | #include <linux/module.h> | 35 | #include <linux/module.h> |
35 | #include <net/rtnetlink.h> | 36 | #include <net/rtnetlink.h> |
36 | #include "ipoib.h" | 37 | #include "ipoib.h" |
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, | |||
103 | return -EINVAL; | 104 | return -EINVAL; |
104 | 105 | ||
105 | pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); | 106 | pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); |
106 | if (!pdev) | 107 | if (!pdev || pdev->type != ARPHRD_INFINIBAND) |
107 | return -ENODEV; | 108 | return -ENODEV; |
108 | 109 | ||
109 | ppriv = netdev_priv(pdev); | 110 | ppriv = netdev_priv(pdev); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6be57c38638d..9804fca6bf06 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) | |||
207 | isert_conn->conn_rx_descs = NULL; | 207 | isert_conn->conn_rx_descs = NULL; |
208 | } | 208 | } |
209 | 209 | ||
210 | static void isert_cq_tx_work(struct work_struct *); | ||
210 | static void isert_cq_tx_callback(struct ib_cq *, void *); | 211 | static void isert_cq_tx_callback(struct ib_cq *, void *); |
212 | static void isert_cq_rx_work(struct work_struct *); | ||
211 | static void isert_cq_rx_callback(struct ib_cq *, void *); | 213 | static void isert_cq_rx_callback(struct ib_cq *, void *); |
212 | 214 | ||
213 | static int | 215 | static int |
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device) | |||
259 | cq_desc[i].device = device; | 261 | cq_desc[i].device = device; |
260 | cq_desc[i].cq_index = i; | 262 | cq_desc[i].cq_index = i; |
261 | 263 | ||
264 | INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); | ||
262 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, | 265 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, |
263 | isert_cq_rx_callback, | 266 | isert_cq_rx_callback, |
264 | isert_cq_event_callback, | 267 | isert_cq_event_callback, |
265 | (void *)&cq_desc[i], | 268 | (void *)&cq_desc[i], |
266 | ISER_MAX_RX_CQ_LEN, i); | 269 | ISER_MAX_RX_CQ_LEN, i); |
267 | if (IS_ERR(device->dev_rx_cq[i])) | 270 | if (IS_ERR(device->dev_rx_cq[i])) { |
271 | ret = PTR_ERR(device->dev_rx_cq[i]); | ||
272 | device->dev_rx_cq[i] = NULL; | ||
268 | goto out_cq; | 273 | goto out_cq; |
274 | } | ||
269 | 275 | ||
276 | INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); | ||
270 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, | 277 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, |
271 | isert_cq_tx_callback, | 278 | isert_cq_tx_callback, |
272 | isert_cq_event_callback, | 279 | isert_cq_event_callback, |
273 | (void *)&cq_desc[i], | 280 | (void *)&cq_desc[i], |
274 | ISER_MAX_TX_CQ_LEN, i); | 281 | ISER_MAX_TX_CQ_LEN, i); |
275 | if (IS_ERR(device->dev_tx_cq[i])) | 282 | if (IS_ERR(device->dev_tx_cq[i])) { |
283 | ret = PTR_ERR(device->dev_tx_cq[i]); | ||
284 | device->dev_tx_cq[i] = NULL; | ||
276 | goto out_cq; | 285 | goto out_cq; |
286 | } | ||
277 | 287 | ||
278 | if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) | 288 | ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); |
289 | if (ret) | ||
279 | goto out_cq; | 290 | goto out_cq; |
280 | 291 | ||
281 | if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) | 292 | ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); |
293 | if (ret) | ||
282 | goto out_cq; | 294 | goto out_cq; |
283 | } | 295 | } |
284 | 296 | ||
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context) | |||
1724 | { | 1736 | { |
1725 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 1737 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; |
1726 | 1738 | ||
1727 | INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); | ||
1728 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); | 1739 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); |
1729 | } | 1740 | } |
1730 | 1741 | ||
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context) | |||
1768 | { | 1779 | { |
1769 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 1780 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; |
1770 | 1781 | ||
1771 | INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); | ||
1772 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); | 1782 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); |
1773 | } | 1783 | } |
1774 | 1784 | ||
diff --git a/drivers/input/input.c b/drivers/input/input.c index 846ccdd905b1..d2965e4b3224 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int | |||
1871 | break; | 1871 | break; |
1872 | 1872 | ||
1873 | case EV_ABS: | 1873 | case EV_ABS: |
1874 | input_alloc_absinfo(dev); | ||
1875 | if (!dev->absinfo) | ||
1876 | return; | ||
1877 | |||
1874 | __set_bit(code, dev->absbit); | 1878 | __set_bit(code, dev->absbit); |
1875 | break; | 1879 | break; |
1876 | 1880 | ||
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index dbd2047f1641..3ed23513d881 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c | |||
@@ -536,7 +536,8 @@ static int adp5588_probe(struct i2c_client *client, | |||
536 | __set_bit(EV_REP, input->evbit); | 536 | __set_bit(EV_REP, input->evbit); |
537 | 537 | ||
538 | for (i = 0; i < input->keycodemax; i++) | 538 | for (i = 0; i < input->keycodemax; i++) |
539 | __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); | 539 | if (kpad->keycode[i] <= KEY_MAX) |
540 | __set_bit(kpad->keycode[i], input->keybit); | ||
540 | __clear_bit(KEY_RESERVED, input->keybit); | 541 | __clear_bit(KEY_RESERVED, input->keybit); |
541 | 542 | ||
542 | if (kpad->gpimapsize) | 543 | if (kpad->gpimapsize) |
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 67d12b3427c9..60dafd4fa692 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c | |||
@@ -992,7 +992,8 @@ static int adp5589_probe(struct i2c_client *client, | |||
992 | __set_bit(EV_REP, input->evbit); | 992 | __set_bit(EV_REP, input->evbit); |
993 | 993 | ||
994 | for (i = 0; i < input->keycodemax; i++) | 994 | for (i = 0; i < input->keycodemax; i++) |
995 | __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); | 995 | if (kpad->keycode[i] <= KEY_MAX) |
996 | __set_bit(kpad->keycode[i], input->keybit); | ||
996 | __clear_bit(KEY_RESERVED, input->keybit); | 997 | __clear_bit(KEY_RESERVED, input->keybit); |
997 | 998 | ||
998 | if (kpad->gpimapsize) | 999 | if (kpad->gpimapsize) |
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c index fc88fb48d70d..09b91d093087 100644 --- a/drivers/input/keyboard/bf54x-keys.c +++ b/drivers/input/keyboard/bf54x-keys.c | |||
@@ -289,7 +289,8 @@ static int bfin_kpad_probe(struct platform_device *pdev) | |||
289 | __set_bit(EV_REP, input->evbit); | 289 | __set_bit(EV_REP, input->evbit); |
290 | 290 | ||
291 | for (i = 0; i < input->keycodemax; i++) | 291 | for (i = 0; i < input->keycodemax; i++) |
292 | __set_bit(bf54x_kpad->keycode[i] & KEY_MAX, input->keybit); | 292 | if (bf54x_kpad->keycode[i] <= KEY_MAX) |
293 | __set_bit(bf54x_kpad->keycode[i], input->keybit); | ||
293 | __clear_bit(KEY_RESERVED, input->keybit); | 294 | __clear_bit(KEY_RESERVED, input->keybit); |
294 | 295 | ||
295 | error = input_register_device(input); | 296 | error = input_register_device(input); |
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 0735de3a6468..1cb1da294419 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c | |||
@@ -158,7 +158,7 @@ | |||
158 | 158 | ||
159 | /* ORIENT ADXL346 only */ | 159 | /* ORIENT ADXL346 only */ |
160 | #define ADXL346_2D_VALID (1 << 6) | 160 | #define ADXL346_2D_VALID (1 << 6) |
161 | #define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) | 161 | #define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4) |
162 | #define ADXL346_3D_VALID (1 << 3) | 162 | #define ADXL346_3D_VALID (1 << 3) |
163 | #define ADXL346_3D_ORIENT(x) ((x) & 0x7) | 163 | #define ADXL346_3D_ORIENT(x) ((x) & 0x7) |
164 | #define ADXL346_2D_PORTRAIT_POS 0 /* +X */ | 164 | #define ADXL346_2D_PORTRAIT_POS 0 /* +X */ |
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c index e37392976fdd..0deca5a3c87f 100644 --- a/drivers/input/misc/pcf8574_keypad.c +++ b/drivers/input/misc/pcf8574_keypad.c | |||
@@ -113,9 +113,12 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i | |||
113 | idev->keycodemax = ARRAY_SIZE(lp->btncode); | 113 | idev->keycodemax = ARRAY_SIZE(lp->btncode); |
114 | 114 | ||
115 | for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) { | 115 | for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) { |
116 | lp->btncode[i] = pcf8574_kp_btncode[i]; | 116 | if (lp->btncode[i] <= KEY_MAX) { |
117 | __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit); | 117 | lp->btncode[i] = pcf8574_kp_btncode[i]; |
118 | __set_bit(lp->btncode[i], idev->keybit); | ||
119 | } | ||
118 | } | 120 | } |
121 | __clear_bit(KEY_RESERVED, idev->keybit); | ||
119 | 122 | ||
120 | sprintf(lp->name, DRV_NAME); | 123 | sprintf(lp->name, DRV_NAME); |
121 | sprintf(lp->phys, "kp_data/input0"); | 124 | sprintf(lp->phys, "kp_data/input0"); |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index ca7a26f1dce8..5cf62e315218 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -70,6 +70,25 @@ static const struct alps_nibble_commands alps_v4_nibble_commands[] = { | |||
70 | { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ | 70 | { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static const struct alps_nibble_commands alps_v6_nibble_commands[] = { | ||
74 | { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */ | ||
75 | { PSMOUSE_CMD_SETRATE, 0x0a }, /* 1 */ | ||
76 | { PSMOUSE_CMD_SETRATE, 0x14 }, /* 2 */ | ||
77 | { PSMOUSE_CMD_SETRATE, 0x28 }, /* 3 */ | ||
78 | { PSMOUSE_CMD_SETRATE, 0x3c }, /* 4 */ | ||
79 | { PSMOUSE_CMD_SETRATE, 0x50 }, /* 5 */ | ||
80 | { PSMOUSE_CMD_SETRATE, 0x64 }, /* 6 */ | ||
81 | { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 7 */ | ||
82 | { PSMOUSE_CMD_GETID, 0x00 }, /* 8 */ | ||
83 | { PSMOUSE_CMD_GETINFO, 0x00 }, /* 9 */ | ||
84 | { PSMOUSE_CMD_SETRES, 0x00 }, /* a */ | ||
85 | { PSMOUSE_CMD_SETRES, 0x01 }, /* b */ | ||
86 | { PSMOUSE_CMD_SETRES, 0x02 }, /* c */ | ||
87 | { PSMOUSE_CMD_SETRES, 0x03 }, /* d */ | ||
88 | { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* e */ | ||
89 | { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ | ||
90 | }; | ||
91 | |||
73 | 92 | ||
74 | #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ | 93 | #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ |
75 | #define ALPS_PASS 0x04 /* device has a pass-through port */ | 94 | #define ALPS_PASS 0x04 /* device has a pass-through port */ |
@@ -103,6 +122,7 @@ static const struct alps_model_info alps_model_data[] = { | |||
103 | /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ | 122 | /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ |
104 | { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, | 123 | { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, |
105 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, | 124 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, |
125 | { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */ | ||
106 | { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ | 126 | { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ |
107 | { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, | 127 | { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, |
108 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ | 128 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ |
@@ -645,6 +665,76 @@ static void alps_process_packet_v3(struct psmouse *psmouse) | |||
645 | alps_process_touchpad_packet_v3(psmouse); | 665 | alps_process_touchpad_packet_v3(psmouse); |
646 | } | 666 | } |
647 | 667 | ||
668 | static void alps_process_packet_v6(struct psmouse *psmouse) | ||
669 | { | ||
670 | struct alps_data *priv = psmouse->private; | ||
671 | unsigned char *packet = psmouse->packet; | ||
672 | struct input_dev *dev = psmouse->dev; | ||
673 | struct input_dev *dev2 = priv->dev2; | ||
674 | int x, y, z, left, right, middle; | ||
675 | |||
676 | /* | ||
677 | * We can use Byte5 to distinguish if the packet is from Touchpad | ||
678 | * or Trackpoint. | ||
679 | * Touchpad: 0 - 0x7E | ||
680 | * Trackpoint: 0x7F | ||
681 | */ | ||
682 | if (packet[5] == 0x7F) { | ||
683 | /* It should be a DualPoint when received Trackpoint packet */ | ||
684 | if (!(priv->flags & ALPS_DUALPOINT)) | ||
685 | return; | ||
686 | |||
687 | /* Trackpoint packet */ | ||
688 | x = packet[1] | ((packet[3] & 0x20) << 2); | ||
689 | y = packet[2] | ((packet[3] & 0x40) << 1); | ||
690 | z = packet[4]; | ||
691 | left = packet[3] & 0x01; | ||
692 | right = packet[3] & 0x02; | ||
693 | middle = packet[3] & 0x04; | ||
694 | |||
695 | /* To prevent the cursor jump when finger lifted */ | ||
696 | if (x == 0x7F && y == 0x7F && z == 0x7F) | ||
697 | x = y = z = 0; | ||
698 | |||
699 | /* Divide 4 since trackpoint's speed is too fast */ | ||
700 | input_report_rel(dev2, REL_X, (char)x / 4); | ||
701 | input_report_rel(dev2, REL_Y, -((char)y / 4)); | ||
702 | |||
703 | input_report_key(dev2, BTN_LEFT, left); | ||
704 | input_report_key(dev2, BTN_RIGHT, right); | ||
705 | input_report_key(dev2, BTN_MIDDLE, middle); | ||
706 | |||
707 | input_sync(dev2); | ||
708 | return; | ||
709 | } | ||
710 | |||
711 | /* Touchpad packet */ | ||
712 | x = packet[1] | ((packet[3] & 0x78) << 4); | ||
713 | y = packet[2] | ((packet[4] & 0x78) << 4); | ||
714 | z = packet[5]; | ||
715 | left = packet[3] & 0x01; | ||
716 | right = packet[3] & 0x02; | ||
717 | |||
718 | if (z > 30) | ||
719 | input_report_key(dev, BTN_TOUCH, 1); | ||
720 | if (z < 25) | ||
721 | input_report_key(dev, BTN_TOUCH, 0); | ||
722 | |||
723 | if (z > 0) { | ||
724 | input_report_abs(dev, ABS_X, x); | ||
725 | input_report_abs(dev, ABS_Y, y); | ||
726 | } | ||
727 | |||
728 | input_report_abs(dev, ABS_PRESSURE, z); | ||
729 | input_report_key(dev, BTN_TOOL_FINGER, z > 0); | ||
730 | |||
731 | /* v6 touchpad does not have middle button */ | ||
732 | input_report_key(dev, BTN_LEFT, left); | ||
733 | input_report_key(dev, BTN_RIGHT, right); | ||
734 | |||
735 | input_sync(dev); | ||
736 | } | ||
737 | |||
648 | static void alps_process_packet_v4(struct psmouse *psmouse) | 738 | static void alps_process_packet_v4(struct psmouse *psmouse) |
649 | { | 739 | { |
650 | struct alps_data *priv = psmouse->private; | 740 | struct alps_data *priv = psmouse->private; |
@@ -897,7 +987,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) | |||
897 | } | 987 | } |
898 | 988 | ||
899 | /* Bytes 2 - pktsize should have 0 in the highest bit */ | 989 | /* Bytes 2 - pktsize should have 0 in the highest bit */ |
900 | if (priv->proto_version != ALPS_PROTO_V5 && | 990 | if ((priv->proto_version < ALPS_PROTO_V5) && |
901 | psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && | 991 | psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && |
902 | (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { | 992 | (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { |
903 | psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", | 993 | psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", |
@@ -1085,6 +1175,80 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse) | |||
1085 | return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); | 1175 | return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); |
1086 | } | 1176 | } |
1087 | 1177 | ||
1178 | static int alps_monitor_mode_send_word(struct psmouse *psmouse, u16 word) | ||
1179 | { | ||
1180 | int i, nibble; | ||
1181 | |||
1182 | /* | ||
1183 | * b0-b11 are valid bits, send sequence is inverse. | ||
1184 | * e.g. when word = 0x0123, nibble send sequence is 3, 2, 1 | ||
1185 | */ | ||
1186 | for (i = 0; i <= 8; i += 4) { | ||
1187 | nibble = (word >> i) & 0xf; | ||
1188 | if (alps_command_mode_send_nibble(psmouse, nibble)) | ||
1189 | return -1; | ||
1190 | } | ||
1191 | |||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | static int alps_monitor_mode_write_reg(struct psmouse *psmouse, | ||
1196 | u16 addr, u16 value) | ||
1197 | { | ||
1198 | struct ps2dev *ps2dev = &psmouse->ps2dev; | ||
1199 | |||
1200 | /* 0x0A0 is the command to write the word */ | ||
1201 | if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE) || | ||
1202 | alps_monitor_mode_send_word(psmouse, 0x0A0) || | ||
1203 | alps_monitor_mode_send_word(psmouse, addr) || | ||
1204 | alps_monitor_mode_send_word(psmouse, value) || | ||
1205 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE)) | ||
1206 | return -1; | ||
1207 | |||
1208 | return 0; | ||
1209 | } | ||
1210 | |||
1211 | static int alps_monitor_mode(struct psmouse *psmouse, bool enable) | ||
1212 | { | ||
1213 | struct ps2dev *ps2dev = &psmouse->ps2dev; | ||
1214 | |||
1215 | if (enable) { | ||
1216 | /* EC E9 F5 F5 E7 E6 E7 E9 to enter monitor mode */ | ||
1217 | if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || | ||
1218 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO) || | ||
1219 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || | ||
1220 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || | ||
1221 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || | ||
1222 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || | ||
1223 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || | ||
1224 | ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO)) | ||
1225 | return -1; | ||
1226 | } else { | ||
1227 | /* EC to exit monitor mode */ | ||
1228 | if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP)) | ||
1229 | return -1; | ||
1230 | } | ||
1231 | |||
1232 | return 0; | ||
1233 | } | ||
1234 | |||
1235 | static int alps_absolute_mode_v6(struct psmouse *psmouse) | ||
1236 | { | ||
1237 | u16 reg_val = 0x181; | ||
1238 | int ret = -1; | ||
1239 | |||
1240 | /* enter monitor mode, to write the register */ | ||
1241 | if (alps_monitor_mode(psmouse, true)) | ||
1242 | return -1; | ||
1243 | |||
1244 | ret = alps_monitor_mode_write_reg(psmouse, 0x000, reg_val); | ||
1245 | |||
1246 | if (alps_monitor_mode(psmouse, false)) | ||
1247 | ret = -1; | ||
1248 | |||
1249 | return ret; | ||
1250 | } | ||
1251 | |||
1088 | static int alps_get_status(struct psmouse *psmouse, char *param) | 1252 | static int alps_get_status(struct psmouse *psmouse, char *param) |
1089 | { | 1253 | { |
1090 | /* Get status: 0xF5 0xF5 0xF5 0xE9 */ | 1254 | /* Get status: 0xF5 0xF5 0xF5 0xE9 */ |
@@ -1189,6 +1353,32 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse) | |||
1189 | return 0; | 1353 | return 0; |
1190 | } | 1354 | } |
1191 | 1355 | ||
1356 | static int alps_hw_init_v6(struct psmouse *psmouse) | ||
1357 | { | ||
1358 | unsigned char param[2] = {0xC8, 0x14}; | ||
1359 | |||
1360 | /* Enter passthrough mode to let trackpoint enter 6byte raw mode */ | ||
1361 | if (alps_passthrough_mode_v2(psmouse, true)) | ||
1362 | return -1; | ||
1363 | |||
1364 | if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || | ||
1365 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || | ||
1366 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || | ||
1367 | ps2_command(&psmouse->ps2dev, ¶m[0], PSMOUSE_CMD_SETRATE) || | ||
1368 | ps2_command(&psmouse->ps2dev, ¶m[1], PSMOUSE_CMD_SETRATE)) | ||
1369 | return -1; | ||
1370 | |||
1371 | if (alps_passthrough_mode_v2(psmouse, false)) | ||
1372 | return -1; | ||
1373 | |||
1374 | if (alps_absolute_mode_v6(psmouse)) { | ||
1375 | psmouse_err(psmouse, "Failed to enable absolute mode\n"); | ||
1376 | return -1; | ||
1377 | } | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
1192 | /* | 1382 | /* |
1193 | * Enable or disable passthrough mode to the trackstick. | 1383 | * Enable or disable passthrough mode to the trackstick. |
1194 | */ | 1384 | */ |
@@ -1553,6 +1743,8 @@ static void alps_set_defaults(struct alps_data *priv) | |||
1553 | priv->hw_init = alps_hw_init_v1_v2; | 1743 | priv->hw_init = alps_hw_init_v1_v2; |
1554 | priv->process_packet = alps_process_packet_v1_v2; | 1744 | priv->process_packet = alps_process_packet_v1_v2; |
1555 | priv->set_abs_params = alps_set_abs_params_st; | 1745 | priv->set_abs_params = alps_set_abs_params_st; |
1746 | priv->x_max = 1023; | ||
1747 | priv->y_max = 767; | ||
1556 | break; | 1748 | break; |
1557 | case ALPS_PROTO_V3: | 1749 | case ALPS_PROTO_V3: |
1558 | priv->hw_init = alps_hw_init_v3; | 1750 | priv->hw_init = alps_hw_init_v3; |
@@ -1584,6 +1776,14 @@ static void alps_set_defaults(struct alps_data *priv) | |||
1584 | priv->x_bits = 23; | 1776 | priv->x_bits = 23; |
1585 | priv->y_bits = 12; | 1777 | priv->y_bits = 12; |
1586 | break; | 1778 | break; |
1779 | case ALPS_PROTO_V6: | ||
1780 | priv->hw_init = alps_hw_init_v6; | ||
1781 | priv->process_packet = alps_process_packet_v6; | ||
1782 | priv->set_abs_params = alps_set_abs_params_st; | ||
1783 | priv->nibble_commands = alps_v6_nibble_commands; | ||
1784 | priv->x_max = 2047; | ||
1785 | priv->y_max = 1535; | ||
1786 | break; | ||
1587 | } | 1787 | } |
1588 | } | 1788 | } |
1589 | 1789 | ||
@@ -1705,8 +1905,8 @@ static void alps_disconnect(struct psmouse *psmouse) | |||
1705 | static void alps_set_abs_params_st(struct alps_data *priv, | 1905 | static void alps_set_abs_params_st(struct alps_data *priv, |
1706 | struct input_dev *dev1) | 1906 | struct input_dev *dev1) |
1707 | { | 1907 | { |
1708 | input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0); | 1908 | input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0); |
1709 | input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0); | 1909 | input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0); |
1710 | } | 1910 | } |
1711 | 1911 | ||
1712 | static void alps_set_abs_params_mt(struct alps_data *priv, | 1912 | static void alps_set_abs_params_mt(struct alps_data *priv, |
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index eee59853b9ce..704f0f924307 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define ALPS_PROTO_V3 3 | 17 | #define ALPS_PROTO_V3 3 |
18 | #define ALPS_PROTO_V4 4 | 18 | #define ALPS_PROTO_V4 4 |
19 | #define ALPS_PROTO_V5 5 | 19 | #define ALPS_PROTO_V5 5 |
20 | #define ALPS_PROTO_V6 6 | ||
20 | 21 | ||
21 | /** | 22 | /** |
22 | * struct alps_model_info - touchpad ID table | 23 | * struct alps_model_info - touchpad ID table |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 8551dcaf24db..597e9b8fc18d 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
1313 | break; | 1313 | break; |
1314 | case 6: | 1314 | case 6: |
1315 | case 7: | 1315 | case 7: |
1316 | case 8: | ||
1316 | etd->hw_version = 4; | 1317 | etd->hw_version = 4; |
1317 | break; | 1318 | break; |
1318 | default: | 1319 | default: |
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 98707fb2cb5d..8f4c4ab04bc2 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
@@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type); | |||
455 | static DEVICE_ATTR_RO(proto); | 455 | static DEVICE_ATTR_RO(proto); |
456 | static DEVICE_ATTR_RO(id); | 456 | static DEVICE_ATTR_RO(id); |
457 | static DEVICE_ATTR_RO(extra); | 457 | static DEVICE_ATTR_RO(extra); |
458 | static DEVICE_ATTR_RO(modalias); | ||
459 | static DEVICE_ATTR_WO(drvctl); | ||
460 | static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); | ||
461 | static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); | ||
462 | 458 | ||
463 | static struct attribute *serio_device_id_attrs[] = { | 459 | static struct attribute *serio_device_id_attrs[] = { |
464 | &dev_attr_type.attr, | 460 | &dev_attr_type.attr, |
465 | &dev_attr_proto.attr, | 461 | &dev_attr_proto.attr, |
466 | &dev_attr_id.attr, | 462 | &dev_attr_id.attr, |
467 | &dev_attr_extra.attr, | 463 | &dev_attr_extra.attr, |
464 | NULL | ||
465 | }; | ||
466 | |||
467 | static struct attribute_group serio_id_attr_group = { | ||
468 | .name = "id", | ||
469 | .attrs = serio_device_id_attrs, | ||
470 | }; | ||
471 | |||
472 | static DEVICE_ATTR_RO(modalias); | ||
473 | static DEVICE_ATTR_WO(drvctl); | ||
474 | static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); | ||
475 | static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); | ||
476 | |||
477 | static struct attribute *serio_device_attrs[] = { | ||
468 | &dev_attr_modalias.attr, | 478 | &dev_attr_modalias.attr, |
469 | &dev_attr_description.attr, | 479 | &dev_attr_description.attr, |
470 | &dev_attr_drvctl.attr, | 480 | &dev_attr_drvctl.attr, |
@@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = { | |||
472 | NULL | 482 | NULL |
473 | }; | 483 | }; |
474 | 484 | ||
475 | static struct attribute_group serio_id_attr_group = { | 485 | static struct attribute_group serio_device_attr_group = { |
476 | .name = "id", | 486 | .attrs = serio_device_attrs, |
477 | .attrs = serio_device_id_attrs, | ||
478 | }; | 487 | }; |
479 | 488 | ||
480 | static const struct attribute_group *serio_device_attr_groups[] = { | 489 | static const struct attribute_group *serio_device_attr_groups[] = { |
481 | &serio_id_attr_group, | 490 | &serio_id_attr_group, |
491 | &serio_device_attr_group, | ||
482 | NULL | 492 | NULL |
483 | }; | 493 | }; |
484 | 494 | ||
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index cfd1b7e8c001..f1cb05148b46 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c | |||
@@ -251,7 +251,7 @@ static void sur40_poll(struct input_polled_dev *polldev) | |||
251 | struct sur40_state *sur40 = polldev->private; | 251 | struct sur40_state *sur40 = polldev->private; |
252 | struct input_dev *input = polldev->input; | 252 | struct input_dev *input = polldev->input; |
253 | int result, bulk_read, need_blobs, packet_blobs, i; | 253 | int result, bulk_read, need_blobs, packet_blobs, i; |
254 | u32 packet_id; | 254 | u32 uninitialized_var(packet_id); |
255 | 255 | ||
256 | struct sur40_header *header = &sur40->bulk_in_buffer->header; | 256 | struct sur40_header *header = &sur40->bulk_in_buffer->header; |
257 | struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0]; | 257 | struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0]; |
@@ -286,7 +286,7 @@ static void sur40_poll(struct input_polled_dev *polldev) | |||
286 | if (need_blobs == -1) { | 286 | if (need_blobs == -1) { |
287 | need_blobs = le16_to_cpu(header->count); | 287 | need_blobs = le16_to_cpu(header->count); |
288 | dev_dbg(sur40->dev, "need %d blobs\n", need_blobs); | 288 | dev_dbg(sur40->dev, "need %d blobs\n", need_blobs); |
289 | packet_id = header->packet_id; | 289 | packet_id = le32_to_cpu(header->packet_id); |
290 | } | 290 | } |
291 | 291 | ||
292 | /* | 292 | /* |
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index ae4b6b903629..5f87bed05467 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c | |||
@@ -106,6 +106,7 @@ struct usbtouch_device_info { | |||
106 | struct usbtouch_usb { | 106 | struct usbtouch_usb { |
107 | unsigned char *data; | 107 | unsigned char *data; |
108 | dma_addr_t data_dma; | 108 | dma_addr_t data_dma; |
109 | int data_size; | ||
109 | unsigned char *buffer; | 110 | unsigned char *buffer; |
110 | int buf_len; | 111 | int buf_len; |
111 | struct urb *irq; | 112 | struct urb *irq; |
@@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf) | |||
1521 | static void usbtouch_free_buffers(struct usb_device *udev, | 1522 | static void usbtouch_free_buffers(struct usb_device *udev, |
1522 | struct usbtouch_usb *usbtouch) | 1523 | struct usbtouch_usb *usbtouch) |
1523 | { | 1524 | { |
1524 | usb_free_coherent(udev, usbtouch->type->rept_size, | 1525 | usb_free_coherent(udev, usbtouch->data_size, |
1525 | usbtouch->data, usbtouch->data_dma); | 1526 | usbtouch->data, usbtouch->data_dma); |
1526 | kfree(usbtouch->buffer); | 1527 | kfree(usbtouch->buffer); |
1527 | } | 1528 | } |
@@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf, | |||
1566 | if (!type->process_pkt) | 1567 | if (!type->process_pkt) |
1567 | type->process_pkt = usbtouch_process_pkt; | 1568 | type->process_pkt = usbtouch_process_pkt; |
1568 | 1569 | ||
1569 | usbtouch->data = usb_alloc_coherent(udev, type->rept_size, | 1570 | usbtouch->data_size = type->rept_size; |
1571 | if (type->get_pkt_len) { | ||
1572 | /* | ||
1573 | * When dealing with variable-length packets we should | ||
1574 | * not request more than wMaxPacketSize bytes at once | ||
1575 | * as we do not know if there is more data coming or | ||
1576 | * we filled exactly wMaxPacketSize bytes and there is | ||
1577 | * nothing else. | ||
1578 | */ | ||
1579 | usbtouch->data_size = min(usbtouch->data_size, | ||
1580 | usb_endpoint_maxp(endpoint)); | ||
1581 | } | ||
1582 | |||
1583 | usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size, | ||
1570 | GFP_KERNEL, &usbtouch->data_dma); | 1584 | GFP_KERNEL, &usbtouch->data_dma); |
1571 | if (!usbtouch->data) | 1585 | if (!usbtouch->data) |
1572 | goto out_free; | 1586 | goto out_free; |
@@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf, | |||
1626 | if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) | 1640 | if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) |
1627 | usb_fill_int_urb(usbtouch->irq, udev, | 1641 | usb_fill_int_urb(usbtouch->irq, udev, |
1628 | usb_rcvintpipe(udev, endpoint->bEndpointAddress), | 1642 | usb_rcvintpipe(udev, endpoint->bEndpointAddress), |
1629 | usbtouch->data, type->rept_size, | 1643 | usbtouch->data, usbtouch->data_size, |
1630 | usbtouch_irq, usbtouch, endpoint->bInterval); | 1644 | usbtouch_irq, usbtouch, endpoint->bInterval); |
1631 | else | 1645 | else |
1632 | usb_fill_bulk_urb(usbtouch->irq, udev, | 1646 | usb_fill_bulk_urb(usbtouch->irq, udev, |
1633 | usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), | 1647 | usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), |
1634 | usbtouch->data, type->rept_size, | 1648 | usbtouch->data, usbtouch->data_size, |
1635 | usbtouch_irq, usbtouch); | 1649 | usbtouch_irq, usbtouch); |
1636 | 1650 | ||
1637 | usbtouch->irq->dev = udev; | 1651 | usbtouch->irq->dev = udev; |
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index 75762d6ff3ba..aa127ba392a4 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c | |||
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result) | |||
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | static irqreturn_t zforce_interrupt(int irq, void *dev_id) | 458 | static irqreturn_t zforce_irq(int irq, void *dev_id) |
459 | { | ||
460 | struct zforce_ts *ts = dev_id; | ||
461 | struct i2c_client *client = ts->client; | ||
462 | |||
463 | if (ts->suspended && device_may_wakeup(&client->dev)) | ||
464 | pm_wakeup_event(&client->dev, 500); | ||
465 | |||
466 | return IRQ_WAKE_THREAD; | ||
467 | } | ||
468 | |||
469 | static irqreturn_t zforce_irq_thread(int irq, void *dev_id) | ||
459 | { | 470 | { |
460 | struct zforce_ts *ts = dev_id; | 471 | struct zforce_ts *ts = dev_id; |
461 | struct i2c_client *client = ts->client; | 472 | struct i2c_client *client = ts->client; |
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id) | |||
465 | u8 *payload; | 476 | u8 *payload; |
466 | 477 | ||
467 | /* | 478 | /* |
468 | * When suspended, emit a wakeup signal if necessary and return. | 479 | * When still suspended, return. |
469 | * Due to the level-interrupt we will get re-triggered later. | 480 | * Due to the level-interrupt we will get re-triggered later. |
470 | */ | 481 | */ |
471 | if (ts->suspended) { | 482 | if (ts->suspended) { |
472 | if (device_may_wakeup(&client->dev)) | ||
473 | pm_wakeup_event(&client->dev, 500); | ||
474 | msleep(20); | 483 | msleep(20); |
475 | return IRQ_HANDLED; | 484 | return IRQ_HANDLED; |
476 | } | 485 | } |
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client, | |||
763 | * Therefore we can trigger the interrupt anytime it is low and do | 772 | * Therefore we can trigger the interrupt anytime it is low and do |
764 | * not need to limit it to the interrupt edge. | 773 | * not need to limit it to the interrupt edge. |
765 | */ | 774 | */ |
766 | ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, | 775 | ret = devm_request_threaded_irq(&client->dev, client->irq, |
767 | zforce_interrupt, | 776 | zforce_irq, zforce_irq_thread, |
768 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | 777 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, |
769 | input_dev->name, ts); | 778 | input_dev->name, ts); |
770 | if (ret) { | 779 | if (ret) { |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1abfb5684ab7..e46a88700b68 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -392,7 +392,7 @@ struct arm_smmu_domain { | |||
392 | struct arm_smmu_cfg root_cfg; | 392 | struct arm_smmu_cfg root_cfg; |
393 | phys_addr_t output_mask; | 393 | phys_addr_t output_mask; |
394 | 394 | ||
395 | spinlock_t lock; | 395 | struct mutex lock; |
396 | }; | 396 | }; |
397 | 397 | ||
398 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | 398 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) | |||
900 | goto out_free_domain; | 900 | goto out_free_domain; |
901 | smmu_domain->root_cfg.pgd = pgd; | 901 | smmu_domain->root_cfg.pgd = pgd; |
902 | 902 | ||
903 | spin_lock_init(&smmu_domain->lock); | 903 | mutex_init(&smmu_domain->lock); |
904 | domain->priv = smmu_domain; | 904 | domain->priv = smmu_domain; |
905 | return 0; | 905 | return 0; |
906 | 906 | ||
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1137 | * Sanity check the domain. We don't currently support domains | 1137 | * Sanity check the domain. We don't currently support domains |
1138 | * that cross between different SMMU chains. | 1138 | * that cross between different SMMU chains. |
1139 | */ | 1139 | */ |
1140 | spin_lock(&smmu_domain->lock); | 1140 | mutex_lock(&smmu_domain->lock); |
1141 | if (!smmu_domain->leaf_smmu) { | 1141 | if (!smmu_domain->leaf_smmu) { |
1142 | /* Now that we have a master, we can finalise the domain */ | 1142 | /* Now that we have a master, we can finalise the domain */ |
1143 | ret = arm_smmu_init_domain_context(domain, dev); | 1143 | ret = arm_smmu_init_domain_context(domain, dev); |
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1152 | dev_name(device_smmu->dev)); | 1152 | dev_name(device_smmu->dev)); |
1153 | goto err_unlock; | 1153 | goto err_unlock; |
1154 | } | 1154 | } |
1155 | spin_unlock(&smmu_domain->lock); | 1155 | mutex_unlock(&smmu_domain->lock); |
1156 | 1156 | ||
1157 | /* Looks ok, so add the device to the domain */ | 1157 | /* Looks ok, so add the device to the domain */ |
1158 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | 1158 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); |
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1162 | return arm_smmu_domain_add_master(smmu_domain, master); | 1162 | return arm_smmu_domain_add_master(smmu_domain, master); |
1163 | 1163 | ||
1164 | err_unlock: | 1164 | err_unlock: |
1165 | spin_unlock(&smmu_domain->lock); | 1165 | mutex_unlock(&smmu_domain->lock); |
1166 | return ret; | 1166 | return ret; |
1167 | } | 1167 | } |
1168 | 1168 | ||
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
1394 | if (paddr & ~output_mask) | 1394 | if (paddr & ~output_mask) |
1395 | return -ERANGE; | 1395 | return -ERANGE; |
1396 | 1396 | ||
1397 | spin_lock(&smmu_domain->lock); | 1397 | mutex_lock(&smmu_domain->lock); |
1398 | pgd += pgd_index(iova); | 1398 | pgd += pgd_index(iova); |
1399 | end = iova + size; | 1399 | end = iova + size; |
1400 | do { | 1400 | do { |
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
1410 | } while (pgd++, iova != end); | 1410 | } while (pgd++, iova != end); |
1411 | 1411 | ||
1412 | out_unlock: | 1412 | out_unlock: |
1413 | spin_unlock(&smmu_domain->lock); | 1413 | mutex_unlock(&smmu_domain->lock); |
1414 | 1414 | ||
1415 | /* Ensure new page tables are visible to the hardware walker */ | 1415 | /* Ensure new page tables are visible to the hardware walker */ |
1416 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) | 1416 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) |
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
1423 | phys_addr_t paddr, size_t size, int flags) | 1423 | phys_addr_t paddr, size_t size, int flags) |
1424 | { | 1424 | { |
1425 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1425 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1426 | struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; | ||
1427 | 1426 | ||
1428 | if (!smmu_domain || !smmu) | 1427 | if (!smmu_domain) |
1429 | return -ENODEV; | 1428 | return -ENODEV; |
1430 | 1429 | ||
1431 | /* Check for silent address truncation up the SMMU chain. */ | 1430 | /* Check for silent address truncation up the SMMU chain. */ |
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
1449 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | 1448 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, |
1450 | dma_addr_t iova) | 1449 | dma_addr_t iova) |
1451 | { | 1450 | { |
1452 | pgd_t *pgd; | 1451 | pgd_t *pgdp, pgd; |
1453 | pud_t *pud; | 1452 | pud_t pud; |
1454 | pmd_t *pmd; | 1453 | pmd_t pmd; |
1455 | pte_t *pte; | 1454 | pte_t pte; |
1456 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1455 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1457 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 1456 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; |
1458 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1459 | 1457 | ||
1460 | spin_lock(&smmu_domain->lock); | 1458 | pgdp = root_cfg->pgd; |
1461 | pgd = root_cfg->pgd; | 1459 | if (!pgdp) |
1462 | if (!pgd) | 1460 | return 0; |
1463 | goto err_unlock; | ||
1464 | 1461 | ||
1465 | pgd += pgd_index(iova); | 1462 | pgd = *(pgdp + pgd_index(iova)); |
1466 | if (pgd_none_or_clear_bad(pgd)) | 1463 | if (pgd_none(pgd)) |
1467 | goto err_unlock; | 1464 | return 0; |
1468 | 1465 | ||
1469 | pud = pud_offset(pgd, iova); | 1466 | pud = *pud_offset(&pgd, iova); |
1470 | if (pud_none_or_clear_bad(pud)) | 1467 | if (pud_none(pud)) |
1471 | goto err_unlock; | 1468 | return 0; |
1472 | 1469 | ||
1473 | pmd = pmd_offset(pud, iova); | 1470 | pmd = *pmd_offset(&pud, iova); |
1474 | if (pmd_none_or_clear_bad(pmd)) | 1471 | if (pmd_none(pmd)) |
1475 | goto err_unlock; | 1472 | return 0; |
1476 | 1473 | ||
1477 | pte = pmd_page_vaddr(*pmd) + pte_index(iova); | 1474 | pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); |
1478 | if (pte_none(pte)) | 1475 | if (pte_none(pte)) |
1479 | goto err_unlock; | 1476 | return 0; |
1480 | |||
1481 | spin_unlock(&smmu_domain->lock); | ||
1482 | return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); | ||
1483 | 1477 | ||
1484 | err_unlock: | 1478 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); |
1485 | spin_unlock(&smmu_domain->lock); | ||
1486 | dev_warn(smmu->dev, | ||
1487 | "invalid (corrupt?) page tables detected for iova 0x%llx\n", | ||
1488 | (unsigned long long)iova); | ||
1489 | return -EINVAL; | ||
1490 | } | 1479 | } |
1491 | 1480 | ||
1492 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, | 1481 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, |
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1863 | dev_err(dev, | 1852 | dev_err(dev, |
1864 | "found only %d context interrupt(s) but %d required\n", | 1853 | "found only %d context interrupt(s) but %d required\n", |
1865 | smmu->num_context_irqs, smmu->num_context_banks); | 1854 | smmu->num_context_irqs, smmu->num_context_banks); |
1855 | err = -ENODEV; | ||
1866 | goto out_put_parent; | 1856 | goto out_put_parent; |
1867 | } | 1857 | } |
1868 | 1858 | ||
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 82cec63a9011..3ee78f02e5d7 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c | |||
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p, | |||
149 | static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, | 149 | static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, |
150 | int irq, int do_mask) | 150 | int irq, int do_mask) |
151 | { | 151 | { |
152 | int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ | 152 | /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */ |
153 | int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ | 153 | int bitfield_width = 4; |
154 | int shift = 32 - (irq + 1) * bitfield_width; | ||
154 | 155 | ||
155 | intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, | 156 | intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, |
156 | shift, bitfield_width, | 157 | shift, bitfield_width, |
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, | |||
159 | 160 | ||
160 | static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) | 161 | static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) |
161 | { | 162 | { |
163 | /* The SENSE register is assumed to be 32-bit. */ | ||
162 | int bitfield_width = p->config.sense_bitfield_width; | 164 | int bitfield_width = p->config.sense_bitfield_width; |
163 | int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ | 165 | int shift = 32 - (irq + 1) * bitfield_width; |
164 | 166 | ||
165 | dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); | 167 | dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); |
166 | 168 | ||
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 497bd026c237..4a4825528188 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card) | |||
1643 | int i; | 1643 | int i; |
1644 | struct pci_dev *tmp_hfcpci = NULL; | 1644 | struct pci_dev *tmp_hfcpci = NULL; |
1645 | 1645 | ||
1646 | #ifdef __BIG_ENDIAN | ||
1647 | #error "not running on big endian machines now" | ||
1648 | #endif | ||
1649 | |||
1650 | strcpy(tmp, hfcpci_revision); | 1646 | strcpy(tmp, hfcpci_revision); |
1651 | printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); | 1647 | printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); |
1652 | 1648 | ||
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c index f6ab63aa6995..33eeb4602c7e 100644 --- a/drivers/isdn/hisax/telespci.c +++ b/drivers/isdn/hisax/telespci.c | |||
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card) | |||
290 | struct IsdnCardState *cs = card->cs; | 290 | struct IsdnCardState *cs = card->cs; |
291 | char tmp[64]; | 291 | char tmp[64]; |
292 | 292 | ||
293 | #ifdef __BIG_ENDIAN | ||
294 | #error "not running on big endian machines now" | ||
295 | #endif | ||
296 | |||
297 | strcpy(tmp, telespci_revision); | 293 | strcpy(tmp, telespci_revision); |
298 | printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); | 294 | printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); |
299 | if (cs->typ != ISDN_CTYPE_TELESPCI) | 295 | if (cs->typ != ISDN_CTYPE_TELESPCI) |
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 05188351711d..a97263e902ff 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c | |||
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip, | |||
244 | if (i % 2) | 244 | if (i % 2) |
245 | goto err; | 245 | goto err; |
246 | 246 | ||
247 | mutex_lock(&chip->lock); | ||
248 | |||
249 | for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { | 247 | for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { |
250 | ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); | 248 | ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); |
251 | if (ret) { | 249 | if (ret) |
252 | mutex_unlock(&chip->lock); | ||
253 | return -EINVAL; | 250 | return -EINVAL; |
254 | } | ||
255 | } | 251 | } |
256 | 252 | ||
257 | mutex_unlock(&chip->lock); | ||
258 | |||
259 | return size; | 253 | return size; |
260 | 254 | ||
261 | err: | 255 | err: |
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev, | |||
427 | { | 421 | { |
428 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); | 422 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); |
429 | struct lp55xx_chip *chip = led->chip; | 423 | struct lp55xx_chip *chip = led->chip; |
424 | int ret; | ||
430 | 425 | ||
431 | mutex_lock(&chip->lock); | 426 | mutex_lock(&chip->lock); |
432 | 427 | ||
433 | chip->engine_idx = nr; | 428 | chip->engine_idx = nr; |
434 | lp5521_load_engine(chip); | 429 | lp5521_load_engine(chip); |
430 | ret = lp5521_update_program_memory(chip, buf, len); | ||
435 | 431 | ||
436 | mutex_unlock(&chip->lock); | 432 | mutex_unlock(&chip->lock); |
437 | 433 | ||
438 | return lp5521_update_program_memory(chip, buf, len); | 434 | return ret; |
439 | } | 435 | } |
440 | store_load(1) | 436 | store_load(1) |
441 | store_load(2) | 437 | store_load(2) |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 6b553d9f4266..fd9ab5f61441 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip, | |||
337 | if (i % 2) | 337 | if (i % 2) |
338 | goto err; | 338 | goto err; |
339 | 339 | ||
340 | mutex_lock(&chip->lock); | ||
341 | |||
342 | for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { | 340 | for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { |
343 | ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); | 341 | ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); |
344 | if (ret) { | 342 | if (ret) |
345 | mutex_unlock(&chip->lock); | ||
346 | return -EINVAL; | 343 | return -EINVAL; |
347 | } | ||
348 | } | 344 | } |
349 | 345 | ||
350 | mutex_unlock(&chip->lock); | ||
351 | |||
352 | return size; | 346 | return size; |
353 | 347 | ||
354 | err: | 348 | err: |
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev, | |||
548 | { | 542 | { |
549 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); | 543 | struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); |
550 | struct lp55xx_chip *chip = led->chip; | 544 | struct lp55xx_chip *chip = led->chip; |
545 | int ret; | ||
551 | 546 | ||
552 | mutex_lock(&chip->lock); | 547 | mutex_lock(&chip->lock); |
553 | 548 | ||
554 | chip->engine_idx = nr; | 549 | chip->engine_idx = nr; |
555 | lp5523_load_engine_and_select_page(chip); | 550 | lp5523_load_engine_and_select_page(chip); |
551 | ret = lp5523_update_program_memory(chip, buf, len); | ||
556 | 552 | ||
557 | mutex_unlock(&chip->lock); | 553 | mutex_unlock(&chip->lock); |
558 | 554 | ||
559 | return lp5523_update_program_memory(chip, buf, len); | 555 | return ret; |
560 | } | 556 | } |
561 | store_load(1) | 557 | store_load(1) |
562 | store_load(2) | 558 | store_load(2) |
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 2b46bf1d7e40..4c9852d92b0a 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
@@ -421,9 +421,11 @@ out: | |||
421 | 421 | ||
422 | if (watermark <= WATERMARK_METADATA) { | 422 | if (watermark <= WATERMARK_METADATA) { |
423 | SET_GC_MARK(b, GC_MARK_METADATA); | 423 | SET_GC_MARK(b, GC_MARK_METADATA); |
424 | SET_GC_MOVE(b, 0); | ||
424 | b->prio = BTREE_PRIO; | 425 | b->prio = BTREE_PRIO; |
425 | } else { | 426 | } else { |
426 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); | 427 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); |
428 | SET_GC_MOVE(b, 0); | ||
427 | b->prio = INITIAL_PRIO; | 429 | b->prio = INITIAL_PRIO; |
428 | } | 430 | } |
429 | 431 | ||
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 4beb55a0ff30..754f43177483 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -197,7 +197,7 @@ struct bucket { | |||
197 | uint8_t disk_gen; | 197 | uint8_t disk_gen; |
198 | uint8_t last_gc; /* Most out of date gen in the btree */ | 198 | uint8_t last_gc; /* Most out of date gen in the btree */ |
199 | uint8_t gc_gen; | 199 | uint8_t gc_gen; |
200 | uint16_t gc_mark; | 200 | uint16_t gc_mark; /* Bitfield used by GC. See below for field */ |
201 | }; | 201 | }; |
202 | 202 | ||
203 | /* | 203 | /* |
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |||
209 | #define GC_MARK_RECLAIMABLE 0 | 209 | #define GC_MARK_RECLAIMABLE 0 |
210 | #define GC_MARK_DIRTY 1 | 210 | #define GC_MARK_DIRTY 1 |
211 | #define GC_MARK_METADATA 2 | 211 | #define GC_MARK_METADATA 2 |
212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); | 212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); |
213 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); | ||
213 | 214 | ||
214 | #include "journal.h" | 215 | #include "journal.h" |
215 | #include "stats.h" | 216 | #include "stats.h" |
@@ -372,14 +373,14 @@ struct cached_dev { | |||
372 | unsigned char writeback_percent; | 373 | unsigned char writeback_percent; |
373 | unsigned writeback_delay; | 374 | unsigned writeback_delay; |
374 | 375 | ||
375 | int writeback_rate_change; | ||
376 | int64_t writeback_rate_derivative; | ||
377 | uint64_t writeback_rate_target; | 376 | uint64_t writeback_rate_target; |
377 | int64_t writeback_rate_proportional; | ||
378 | int64_t writeback_rate_derivative; | ||
379 | int64_t writeback_rate_change; | ||
378 | 380 | ||
379 | unsigned writeback_rate_update_seconds; | 381 | unsigned writeback_rate_update_seconds; |
380 | unsigned writeback_rate_d_term; | 382 | unsigned writeback_rate_d_term; |
381 | unsigned writeback_rate_p_term_inverse; | 383 | unsigned writeback_rate_p_term_inverse; |
382 | unsigned writeback_rate_d_smooth; | ||
383 | }; | 384 | }; |
384 | 385 | ||
385 | enum alloc_watermarks { | 386 | enum alloc_watermarks { |
@@ -445,7 +446,6 @@ struct cache { | |||
445 | * call prio_write() to keep gens from wrapping. | 446 | * call prio_write() to keep gens from wrapping. |
446 | */ | 447 | */ |
447 | uint8_t need_save_prio; | 448 | uint8_t need_save_prio; |
448 | unsigned gc_move_threshold; | ||
449 | 449 | ||
450 | /* | 450 | /* |
451 | * If nonzero, we know we aren't going to find any buckets to invalidate | 451 | * If nonzero, we know we aren't going to find any buckets to invalidate |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..31bb53fcc67a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c) | |||
1561 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), | 1561 | SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), |
1562 | GC_MARK_METADATA); | 1562 | GC_MARK_METADATA); |
1563 | 1563 | ||
1564 | /* don't reclaim buckets to which writeback keys point */ | ||
1565 | rcu_read_lock(); | ||
1566 | for (i = 0; i < c->nr_uuids; i++) { | ||
1567 | struct bcache_device *d = c->devices[i]; | ||
1568 | struct cached_dev *dc; | ||
1569 | struct keybuf_key *w, *n; | ||
1570 | unsigned j; | ||
1571 | |||
1572 | if (!d || UUID_FLASH_ONLY(&c->uuids[i])) | ||
1573 | continue; | ||
1574 | dc = container_of(d, struct cached_dev, disk); | ||
1575 | |||
1576 | spin_lock(&dc->writeback_keys.lock); | ||
1577 | rbtree_postorder_for_each_entry_safe(w, n, | ||
1578 | &dc->writeback_keys.keys, node) | ||
1579 | for (j = 0; j < KEY_PTRS(&w->key); j++) | ||
1580 | SET_GC_MARK(PTR_BUCKET(c, &w->key, j), | ||
1581 | GC_MARK_DIRTY); | ||
1582 | spin_unlock(&dc->writeback_keys.lock); | ||
1583 | } | ||
1584 | rcu_read_unlock(); | ||
1585 | |||
1564 | for_each_cache(ca, c, i) { | 1586 | for_each_cache(ca, c, i) { |
1565 | uint64_t *i; | 1587 | uint64_t *i; |
1566 | 1588 | ||
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, | |||
1817 | if (KEY_START(k) > KEY_START(insert) + sectors_found) | 1839 | if (KEY_START(k) > KEY_START(insert) + sectors_found) |
1818 | goto check_failed; | 1840 | goto check_failed; |
1819 | 1841 | ||
1820 | if (KEY_PTRS(replace_key) != KEY_PTRS(k)) | 1842 | if (KEY_PTRS(k) != KEY_PTRS(replace_key) || |
1843 | KEY_DIRTY(k) != KEY_DIRTY(replace_key)) | ||
1821 | goto check_failed; | 1844 | goto check_failed; |
1822 | 1845 | ||
1823 | /* skip past gen */ | 1846 | /* skip past gen */ |
@@ -2217,7 +2240,7 @@ struct btree_insert_op { | |||
2217 | struct bkey *replace_key; | 2240 | struct bkey *replace_key; |
2218 | }; | 2241 | }; |
2219 | 2242 | ||
2220 | int btree_insert_fn(struct btree_op *b_op, struct btree *b) | 2243 | static int btree_insert_fn(struct btree_op *b_op, struct btree *b) |
2221 | { | 2244 | { |
2222 | struct btree_insert_op *op = container_of(b_op, | 2245 | struct btree_insert_op *op = container_of(b_op, |
2223 | struct btree_insert_op, op); | 2246 | struct btree_insert_op, op); |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..f2f0998c4a91 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) | |||
25 | unsigned i; | 25 | unsigned i; |
26 | 26 | ||
27 | for (i = 0; i < KEY_PTRS(k); i++) { | 27 | for (i = 0; i < KEY_PTRS(k); i++) { |
28 | struct cache *ca = PTR_CACHE(c, k, i); | ||
29 | struct bucket *g = PTR_BUCKET(c, k, i); | 28 | struct bucket *g = PTR_BUCKET(c, k, i); |
30 | 29 | ||
31 | if (GC_SECTORS_USED(g) < ca->gc_move_threshold) | 30 | if (GC_MOVE(g)) |
32 | return true; | 31 | return true; |
33 | } | 32 | } |
34 | 33 | ||
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl) | |||
65 | 64 | ||
66 | static void read_moving_endio(struct bio *bio, int error) | 65 | static void read_moving_endio(struct bio *bio, int error) |
67 | { | 66 | { |
67 | struct bbio *b = container_of(bio, struct bbio, bio); | ||
68 | struct moving_io *io = container_of(bio->bi_private, | 68 | struct moving_io *io = container_of(bio->bi_private, |
69 | struct moving_io, cl); | 69 | struct moving_io, cl); |
70 | 70 | ||
71 | if (error) | 71 | if (error) |
72 | io->op.error = error; | 72 | io->op.error = error; |
73 | else if (!KEY_DIRTY(&b->key) && | ||
74 | ptr_stale(io->op.c, &b->key, 0)) { | ||
75 | io->op.error = -EINTR; | ||
76 | } | ||
73 | 77 | ||
74 | bch_bbio_endio(io->op.c, bio, error, "reading data to move"); | 78 | bch_bbio_endio(io->op.c, bio, error, "reading data to move"); |
75 | } | 79 | } |
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c) | |||
141 | if (!w) | 145 | if (!w) |
142 | break; | 146 | break; |
143 | 147 | ||
148 | if (ptr_stale(c, &w->key, 0)) { | ||
149 | bch_keybuf_del(&c->moving_gc_keys, w); | ||
150 | continue; | ||
151 | } | ||
152 | |||
144 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) | 153 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) |
145 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), | 154 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), |
146 | GFP_KERNEL); | 155 | GFP_KERNEL); |
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) | |||
184 | 193 | ||
185 | static unsigned bucket_heap_top(struct cache *ca) | 194 | static unsigned bucket_heap_top(struct cache *ca) |
186 | { | 195 | { |
187 | return GC_SECTORS_USED(heap_peek(&ca->heap)); | 196 | struct bucket *b; |
197 | return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; | ||
188 | } | 198 | } |
189 | 199 | ||
190 | void bch_moving_gc(struct cache_set *c) | 200 | void bch_moving_gc(struct cache_set *c) |
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c) | |||
226 | sectors_to_move -= GC_SECTORS_USED(b); | 236 | sectors_to_move -= GC_SECTORS_USED(b); |
227 | } | 237 | } |
228 | 238 | ||
229 | ca->gc_move_threshold = bucket_heap_top(ca); | 239 | while (heap_pop(&ca->heap, b, bucket_cmp)) |
230 | 240 | SET_GC_MOVE(b, 1); | |
231 | pr_debug("threshold %u", ca->gc_move_threshold); | ||
232 | } | 241 | } |
233 | 242 | ||
234 | mutex_unlock(&c->bucket_lock); | 243 | mutex_unlock(&c->bucket_lock); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dec15cd2d797..c57bfa071a57 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -1676,7 +1676,7 @@ err: | |||
1676 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) | 1676 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) |
1677 | { | 1677 | { |
1678 | return ca->sb.block_size == c->sb.block_size && | 1678 | return ca->sb.block_size == c->sb.block_size && |
1679 | ca->sb.bucket_size == c->sb.block_size && | 1679 | ca->sb.bucket_size == c->sb.bucket_size && |
1680 | ca->sb.nr_in_set == c->sb.nr_in_set; | 1680 | ca->sb.nr_in_set == c->sb.nr_in_set; |
1681 | } | 1681 | } |
1682 | 1682 | ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 80d4c2bee18a..a1f85612f0b3 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate); | |||
83 | rw_attribute(writeback_rate_update_seconds); | 83 | rw_attribute(writeback_rate_update_seconds); |
84 | rw_attribute(writeback_rate_d_term); | 84 | rw_attribute(writeback_rate_d_term); |
85 | rw_attribute(writeback_rate_p_term_inverse); | 85 | rw_attribute(writeback_rate_p_term_inverse); |
86 | rw_attribute(writeback_rate_d_smooth); | ||
87 | read_attribute(writeback_rate_debug); | 86 | read_attribute(writeback_rate_debug); |
88 | 87 | ||
89 | read_attribute(stripe_size); | 88 | read_attribute(stripe_size); |
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev) | |||
129 | var_printf(writeback_running, "%i"); | 128 | var_printf(writeback_running, "%i"); |
130 | var_print(writeback_delay); | 129 | var_print(writeback_delay); |
131 | var_print(writeback_percent); | 130 | var_print(writeback_percent); |
132 | sysfs_print(writeback_rate, dc->writeback_rate.rate); | 131 | sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); |
133 | 132 | ||
134 | var_print(writeback_rate_update_seconds); | 133 | var_print(writeback_rate_update_seconds); |
135 | var_print(writeback_rate_d_term); | 134 | var_print(writeback_rate_d_term); |
136 | var_print(writeback_rate_p_term_inverse); | 135 | var_print(writeback_rate_p_term_inverse); |
137 | var_print(writeback_rate_d_smooth); | ||
138 | 136 | ||
139 | if (attr == &sysfs_writeback_rate_debug) { | 137 | if (attr == &sysfs_writeback_rate_debug) { |
138 | char rate[20]; | ||
140 | char dirty[20]; | 139 | char dirty[20]; |
141 | char derivative[20]; | ||
142 | char target[20]; | 140 | char target[20]; |
143 | bch_hprint(dirty, | 141 | char proportional[20]; |
144 | bcache_dev_sectors_dirty(&dc->disk) << 9); | 142 | char derivative[20]; |
145 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | 143 | char change[20]; |
144 | s64 next_io; | ||
145 | |||
146 | bch_hprint(rate, dc->writeback_rate.rate << 9); | ||
147 | bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); | ||
146 | bch_hprint(target, dc->writeback_rate_target << 9); | 148 | bch_hprint(target, dc->writeback_rate_target << 9); |
149 | bch_hprint(proportional,dc->writeback_rate_proportional << 9); | ||
150 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | ||
151 | bch_hprint(change, dc->writeback_rate_change << 9); | ||
152 | |||
153 | next_io = div64_s64(dc->writeback_rate.next - local_clock(), | ||
154 | NSEC_PER_MSEC); | ||
147 | 155 | ||
148 | return sprintf(buf, | 156 | return sprintf(buf, |
149 | "rate:\t\t%u\n" | 157 | "rate:\t\t%s/sec\n" |
150 | "change:\t\t%i\n" | ||
151 | "dirty:\t\t%s\n" | 158 | "dirty:\t\t%s\n" |
159 | "target:\t\t%s\n" | ||
160 | "proportional:\t%s\n" | ||
152 | "derivative:\t%s\n" | 161 | "derivative:\t%s\n" |
153 | "target:\t\t%s\n", | 162 | "change:\t\t%s/sec\n" |
154 | dc->writeback_rate.rate, | 163 | "next io:\t%llims\n", |
155 | dc->writeback_rate_change, | 164 | rate, dirty, target, proportional, |
156 | dirty, derivative, target); | 165 | derivative, change, next_io); |
157 | } | 166 | } |
158 | 167 | ||
159 | sysfs_hprint(dirty_data, | 168 | sysfs_hprint(dirty_data, |
@@ -189,6 +198,7 @@ STORE(__cached_dev) | |||
189 | struct kobj_uevent_env *env; | 198 | struct kobj_uevent_env *env; |
190 | 199 | ||
191 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) | 200 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) |
201 | #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) | ||
192 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) | 202 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) |
193 | 203 | ||
194 | sysfs_strtoul(data_csum, dc->disk.data_csum); | 204 | sysfs_strtoul(data_csum, dc->disk.data_csum); |
@@ -197,16 +207,15 @@ STORE(__cached_dev) | |||
197 | d_strtoul(writeback_metadata); | 207 | d_strtoul(writeback_metadata); |
198 | d_strtoul(writeback_running); | 208 | d_strtoul(writeback_running); |
199 | d_strtoul(writeback_delay); | 209 | d_strtoul(writeback_delay); |
200 | sysfs_strtoul_clamp(writeback_rate, | 210 | |
201 | dc->writeback_rate.rate, 1, 1000000); | ||
202 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); | 211 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); |
203 | 212 | ||
204 | d_strtoul(writeback_rate_update_seconds); | 213 | sysfs_strtoul_clamp(writeback_rate, |
214 | dc->writeback_rate.rate, 1, INT_MAX); | ||
215 | |||
216 | d_strtoul_nonzero(writeback_rate_update_seconds); | ||
205 | d_strtoul(writeback_rate_d_term); | 217 | d_strtoul(writeback_rate_d_term); |
206 | d_strtoul(writeback_rate_p_term_inverse); | 218 | d_strtoul_nonzero(writeback_rate_p_term_inverse); |
207 | sysfs_strtoul_clamp(writeback_rate_p_term_inverse, | ||
208 | dc->writeback_rate_p_term_inverse, 1, INT_MAX); | ||
209 | d_strtoul(writeback_rate_d_smooth); | ||
210 | 219 | ||
211 | d_strtoi_h(sequential_cutoff); | 220 | d_strtoi_h(sequential_cutoff); |
212 | d_strtoi_h(readahead); | 221 | d_strtoi_h(readahead); |
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = { | |||
313 | &sysfs_writeback_rate_update_seconds, | 322 | &sysfs_writeback_rate_update_seconds, |
314 | &sysfs_writeback_rate_d_term, | 323 | &sysfs_writeback_rate_d_term, |
315 | &sysfs_writeback_rate_p_term_inverse, | 324 | &sysfs_writeback_rate_p_term_inverse, |
316 | &sysfs_writeback_rate_d_smooth, | ||
317 | &sysfs_writeback_rate_debug, | 325 | &sysfs_writeback_rate_debug, |
318 | &sysfs_dirty_data, | 326 | &sysfs_dirty_data, |
319 | &sysfs_stripe_size, | 327 | &sysfs_stripe_size, |
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..bb37618e7664 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c | |||
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) | |||
209 | { | 209 | { |
210 | uint64_t now = local_clock(); | 210 | uint64_t now = local_clock(); |
211 | 211 | ||
212 | d->next += div_u64(done, d->rate); | 212 | d->next += div_u64(done * NSEC_PER_SEC, d->rate); |
213 | |||
214 | if (time_before64(now + NSEC_PER_SEC, d->next)) | ||
215 | d->next = now + NSEC_PER_SEC; | ||
216 | |||
217 | if (time_after64(now - NSEC_PER_SEC * 2, d->next)) | ||
218 | d->next = now - NSEC_PER_SEC * 2; | ||
213 | 219 | ||
214 | return time_after64(d->next, now) | 220 | return time_after64(d->next, now) |
215 | ? div_u64(d->next - now, NSEC_PER_SEC / HZ) | 221 | ? div_u64(d->next - now, NSEC_PER_SEC / HZ) |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 362c4b3f8b4a..1030c6020e98 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
@@ -110,7 +110,7 @@ do { \ | |||
110 | _r; \ | 110 | _r; \ |
111 | }) | 111 | }) |
112 | 112 | ||
113 | #define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) | 113 | #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) |
114 | 114 | ||
115 | #define heap_full(h) ((h)->used == (h)->size) | 115 | #define heap_full(h) ((h)->used == (h)->size) |
116 | 116 | ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..6c44fe059c27 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc) | |||
30 | 30 | ||
31 | /* PD controller */ | 31 | /* PD controller */ |
32 | 32 | ||
33 | int change = 0; | ||
34 | int64_t error; | ||
35 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); | 33 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
36 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; | 34 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; |
35 | int64_t proportional = dirty - target; | ||
36 | int64_t change; | ||
37 | 37 | ||
38 | dc->disk.sectors_dirty_last = dirty; | 38 | dc->disk.sectors_dirty_last = dirty; |
39 | 39 | ||
40 | derivative *= dc->writeback_rate_d_term; | 40 | /* Scale to sectors per second */ |
41 | derivative = clamp(derivative, -dirty, dirty); | ||
42 | 41 | ||
43 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, | 42 | proportional *= dc->writeback_rate_update_seconds; |
44 | dc->writeback_rate_d_smooth, 0); | 43 | proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); |
45 | 44 | ||
46 | /* Avoid divide by zero */ | 45 | derivative = div_s64(derivative, dc->writeback_rate_update_seconds); |
47 | if (!target) | ||
48 | goto out; | ||
49 | 46 | ||
50 | error = div64_s64((dirty + derivative - target) << 8, target); | 47 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, |
48 | (dc->writeback_rate_d_term / | ||
49 | dc->writeback_rate_update_seconds) ?: 1, 0); | ||
50 | |||
51 | derivative *= dc->writeback_rate_d_term; | ||
52 | derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); | ||
51 | 53 | ||
52 | change = div_s64((dc->writeback_rate.rate * error) >> 8, | 54 | change = proportional + derivative; |
53 | dc->writeback_rate_p_term_inverse); | ||
54 | 55 | ||
55 | /* Don't increase writeback rate if the device isn't keeping up */ | 56 | /* Don't increase writeback rate if the device isn't keeping up */ |
56 | if (change > 0 && | 57 | if (change > 0 && |
57 | time_after64(local_clock(), | 58 | time_after64(local_clock(), |
58 | dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) | 59 | dc->writeback_rate.next + NSEC_PER_MSEC)) |
59 | change = 0; | 60 | change = 0; |
60 | 61 | ||
61 | dc->writeback_rate.rate = | 62 | dc->writeback_rate.rate = |
62 | clamp_t(int64_t, dc->writeback_rate.rate + change, | 63 | clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, |
63 | 1, NSEC_PER_MSEC); | 64 | 1, NSEC_PER_MSEC); |
64 | out: | 65 | |
66 | dc->writeback_rate_proportional = proportional; | ||
65 | dc->writeback_rate_derivative = derivative; | 67 | dc->writeback_rate_derivative = derivative; |
66 | dc->writeback_rate_change = change; | 68 | dc->writeback_rate_change = change; |
67 | dc->writeback_rate_target = target; | 69 | dc->writeback_rate_target = target; |
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work) | |||
87 | 89 | ||
88 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | 90 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) |
89 | { | 91 | { |
90 | uint64_t ret; | ||
91 | |||
92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || | 92 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
93 | !dc->writeback_percent) | 93 | !dc->writeback_percent) |
94 | return 0; | 94 | return 0; |
95 | 95 | ||
96 | ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); | 96 | return bch_next_delay(&dc->writeback_rate, sectors); |
97 | |||
98 | return min_t(uint64_t, ret, HZ); | ||
99 | } | 97 | } |
100 | 98 | ||
101 | struct dirty_io { | 99 | struct dirty_io { |
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc) | |||
241 | if (KEY_START(&w->key) != dc->last_read || | 239 | if (KEY_START(&w->key) != dc->last_read || |
242 | jiffies_to_msecs(delay) > 50) | 240 | jiffies_to_msecs(delay) > 50) |
243 | while (!kthread_should_stop() && delay) | 241 | while (!kthread_should_stop() && delay) |
244 | delay = schedule_timeout_interruptible(delay); | 242 | delay = schedule_timeout_uninterruptible(delay); |
245 | 243 | ||
246 | dc->last_read = KEY_OFFSET(&w->key); | 244 | dc->last_read = KEY_OFFSET(&w->key); |
247 | 245 | ||
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg) | |||
438 | while (delay && | 436 | while (delay && |
439 | !kthread_should_stop() && | 437 | !kthread_should_stop() && |
440 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) | 438 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
441 | delay = schedule_timeout_interruptible(delay); | 439 | delay = schedule_timeout_uninterruptible(delay); |
442 | } | 440 | } |
443 | } | 441 | } |
444 | 442 | ||
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc) | |||
476 | 474 | ||
477 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), | 475 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |
478 | sectors_dirty_init_fn, 0); | 476 | sectors_dirty_init_fn, 0); |
477 | |||
478 | dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); | ||
479 | } | 479 | } |
480 | 480 | ||
481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) | 481 | int bch_cached_dev_writeback_init(struct cached_dev *dc) |
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) | |||
490 | dc->writeback_delay = 30; | 490 | dc->writeback_delay = 30; |
491 | dc->writeback_rate.rate = 1024; | 491 | dc->writeback_rate.rate = 1024; |
492 | 492 | ||
493 | dc->writeback_rate_update_seconds = 30; | 493 | dc->writeback_rate_update_seconds = 5; |
494 | dc->writeback_rate_d_term = 16; | 494 | dc->writeback_rate_d_term = 30; |
495 | dc->writeback_rate_p_term_inverse = 64; | 495 | dc->writeback_rate_p_term_inverse = 6000; |
496 | dc->writeback_rate_d_smooth = 8; | ||
497 | 496 | ||
498 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, | 497 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
499 | "bcache_writeback"); | 498 | "bcache_writeback"); |
500 | if (IS_ERR(dc->writeback_thread)) | 499 | if (IS_ERR(dc->writeback_thread)) |
501 | return PTR_ERR(dc->writeback_thread); | 500 | return PTR_ERR(dc->writeback_thread); |
502 | 501 | ||
503 | set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); | ||
504 | |||
505 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); | 502 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
506 | schedule_delayed_work(&dc->writeback_rate_update, | 503 | schedule_delayed_work(&dc->writeback_rate_update, |
507 | dc->writeback_rate_update_seconds * HZ); | 504 | dc->writeback_rate_update_seconds * HZ); |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 173cbb20d104..54bdd923316f 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void) | |||
1717 | { | 1717 | { |
1718 | __u64 mem; | 1718 | __u64 mem; |
1719 | 1719 | ||
1720 | dm_bufio_allocated_kmem_cache = 0; | ||
1721 | dm_bufio_allocated_get_free_pages = 0; | ||
1722 | dm_bufio_allocated_vmalloc = 0; | ||
1723 | dm_bufio_current_allocated = 0; | ||
1724 | |||
1720 | memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); | 1725 | memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); |
1721 | memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); | 1726 | memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); |
1722 | 1727 | ||
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 416b7b752a6e..64780ad73bb0 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
@@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, | |||
730 | int r = 0; | 730 | int r = 0; |
731 | bool updated = updated_this_tick(mq, e); | 731 | bool updated = updated_this_tick(mq, e); |
732 | 732 | ||
733 | requeue_and_update_tick(mq, e); | ||
734 | |||
735 | if ((!discarded_oblock && updated) || | 733 | if ((!discarded_oblock && updated) || |
736 | !should_promote(mq, e, discarded_oblock, data_dir)) | 734 | !should_promote(mq, e, discarded_oblock, data_dir)) { |
735 | requeue_and_update_tick(mq, e); | ||
737 | result->op = POLICY_MISS; | 736 | result->op = POLICY_MISS; |
738 | else if (!can_migrate) | 737 | |
738 | } else if (!can_migrate) | ||
739 | r = -EWOULDBLOCK; | 739 | r = -EWOULDBLOCK; |
740 | else | 740 | |
741 | else { | ||
742 | requeue_and_update_tick(mq, e); | ||
741 | r = pre_cache_to_cache(mq, e, result); | 743 | r = pre_cache_to_cache(mq, e, result); |
744 | } | ||
742 | 745 | ||
743 | return r; | 746 | return r; |
744 | } | 747 | } |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9efcf1059b99..1b1469ebe5cb 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) | |||
2755 | { | 2755 | { |
2756 | int r; | 2756 | int r; |
2757 | 2757 | ||
2758 | r = dm_cache_resize(cache->cmd, cache->cache_size); | 2758 | r = dm_cache_resize(cache->cmd, new_size); |
2759 | if (r) { | 2759 | if (r) { |
2760 | DMERR("could not resize cache metadata"); | 2760 | DMERR("could not resize cache metadata"); |
2761 | return r; | 2761 | return r; |
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 496d5f3646a5..2f91d6d4a2cc 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -20,6 +20,7 @@ | |||
20 | struct delay_c { | 20 | struct delay_c { |
21 | struct timer_list delay_timer; | 21 | struct timer_list delay_timer; |
22 | struct mutex timer_lock; | 22 | struct mutex timer_lock; |
23 | struct workqueue_struct *kdelayd_wq; | ||
23 | struct work_struct flush_expired_bios; | 24 | struct work_struct flush_expired_bios; |
24 | struct list_head delayed_bios; | 25 | struct list_head delayed_bios; |
25 | atomic_t may_delay; | 26 | atomic_t may_delay; |
@@ -45,14 +46,13 @@ struct dm_delay_info { | |||
45 | 46 | ||
46 | static DEFINE_MUTEX(delayed_bios_lock); | 47 | static DEFINE_MUTEX(delayed_bios_lock); |
47 | 48 | ||
48 | static struct workqueue_struct *kdelayd_wq; | ||
49 | static struct kmem_cache *delayed_cache; | 49 | static struct kmem_cache *delayed_cache; |
50 | 50 | ||
51 | static void handle_delayed_timer(unsigned long data) | 51 | static void handle_delayed_timer(unsigned long data) |
52 | { | 52 | { |
53 | struct delay_c *dc = (struct delay_c *)data; | 53 | struct delay_c *dc = (struct delay_c *)data; |
54 | 54 | ||
55 | queue_work(kdelayd_wq, &dc->flush_expired_bios); | 55 | queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); |
56 | } | 56 | } |
57 | 57 | ||
58 | static void queue_timeout(struct delay_c *dc, unsigned long expires) | 58 | static void queue_timeout(struct delay_c *dc, unsigned long expires) |
@@ -191,6 +191,12 @@ out: | |||
191 | goto bad_dev_write; | 191 | goto bad_dev_write; |
192 | } | 192 | } |
193 | 193 | ||
194 | dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); | ||
195 | if (!dc->kdelayd_wq) { | ||
196 | DMERR("Couldn't start kdelayd"); | ||
197 | goto bad_queue; | ||
198 | } | ||
199 | |||
194 | setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); | 200 | setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); |
195 | 201 | ||
196 | INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); | 202 | INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); |
@@ -203,6 +209,8 @@ out: | |||
203 | ti->private = dc; | 209 | ti->private = dc; |
204 | return 0; | 210 | return 0; |
205 | 211 | ||
212 | bad_queue: | ||
213 | mempool_destroy(dc->delayed_pool); | ||
206 | bad_dev_write: | 214 | bad_dev_write: |
207 | if (dc->dev_write) | 215 | if (dc->dev_write) |
208 | dm_put_device(ti, dc->dev_write); | 216 | dm_put_device(ti, dc->dev_write); |
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti) | |||
217 | { | 225 | { |
218 | struct delay_c *dc = ti->private; | 226 | struct delay_c *dc = ti->private; |
219 | 227 | ||
220 | flush_workqueue(kdelayd_wq); | 228 | destroy_workqueue(dc->kdelayd_wq); |
221 | 229 | ||
222 | dm_put_device(ti, dc->dev_read); | 230 | dm_put_device(ti, dc->dev_read); |
223 | 231 | ||
@@ -350,12 +358,6 @@ static int __init dm_delay_init(void) | |||
350 | { | 358 | { |
351 | int r = -ENOMEM; | 359 | int r = -ENOMEM; |
352 | 360 | ||
353 | kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); | ||
354 | if (!kdelayd_wq) { | ||
355 | DMERR("Couldn't start kdelayd"); | ||
356 | goto bad_queue; | ||
357 | } | ||
358 | |||
359 | delayed_cache = KMEM_CACHE(dm_delay_info, 0); | 361 | delayed_cache = KMEM_CACHE(dm_delay_info, 0); |
360 | if (!delayed_cache) { | 362 | if (!delayed_cache) { |
361 | DMERR("Couldn't create delayed bio cache."); | 363 | DMERR("Couldn't create delayed bio cache."); |
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void) | |||
373 | bad_register: | 375 | bad_register: |
374 | kmem_cache_destroy(delayed_cache); | 376 | kmem_cache_destroy(delayed_cache); |
375 | bad_memcache: | 377 | bad_memcache: |
376 | destroy_workqueue(kdelayd_wq); | ||
377 | bad_queue: | ||
378 | return r; | 378 | return r; |
379 | } | 379 | } |
380 | 380 | ||
@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void) | |||
382 | { | 382 | { |
383 | dm_unregister_target(&delay_target); | 383 | dm_unregister_target(&delay_target); |
384 | kmem_cache_destroy(delayed_cache); | 384 | kmem_cache_destroy(delayed_cache); |
385 | destroy_workqueue(kdelayd_wq); | ||
386 | } | 385 | } |
387 | 386 | ||
388 | /* Module hooks */ | 387 | /* Module hooks */ |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index aec57d76db5d..944690bafd93 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -66,6 +66,18 @@ struct dm_snapshot { | |||
66 | 66 | ||
67 | atomic_t pending_exceptions_count; | 67 | atomic_t pending_exceptions_count; |
68 | 68 | ||
69 | /* Protected by "lock" */ | ||
70 | sector_t exception_start_sequence; | ||
71 | |||
72 | /* Protected by kcopyd single-threaded callback */ | ||
73 | sector_t exception_complete_sequence; | ||
74 | |||
75 | /* | ||
76 | * A list of pending exceptions that completed out of order. | ||
77 | * Protected by kcopyd single-threaded callback. | ||
78 | */ | ||
79 | struct list_head out_of_order_list; | ||
80 | |||
69 | mempool_t *pending_pool; | 81 | mempool_t *pending_pool; |
70 | 82 | ||
71 | struct dm_exception_table pending; | 83 | struct dm_exception_table pending; |
@@ -173,6 +185,14 @@ struct dm_snap_pending_exception { | |||
173 | */ | 185 | */ |
174 | int started; | 186 | int started; |
175 | 187 | ||
188 | /* There was copying error. */ | ||
189 | int copy_error; | ||
190 | |||
191 | /* A sequence number, it is used for in-order completion. */ | ||
192 | sector_t exception_sequence; | ||
193 | |||
194 | struct list_head out_of_order_entry; | ||
195 | |||
176 | /* | 196 | /* |
177 | * For writing a complete chunk, bypassing the copy. | 197 | * For writing a complete chunk, bypassing the copy. |
178 | */ | 198 | */ |
@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1094 | s->valid = 1; | 1114 | s->valid = 1; |
1095 | s->active = 0; | 1115 | s->active = 0; |
1096 | atomic_set(&s->pending_exceptions_count, 0); | 1116 | atomic_set(&s->pending_exceptions_count, 0); |
1117 | s->exception_start_sequence = 0; | ||
1118 | s->exception_complete_sequence = 0; | ||
1119 | INIT_LIST_HEAD(&s->out_of_order_list); | ||
1097 | init_rwsem(&s->lock); | 1120 | init_rwsem(&s->lock); |
1098 | INIT_LIST_HEAD(&s->list); | 1121 | INIT_LIST_HEAD(&s->list); |
1099 | spin_lock_init(&s->pe_lock); | 1122 | spin_lock_init(&s->pe_lock); |
@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success) | |||
1443 | pending_complete(pe, success); | 1466 | pending_complete(pe, success); |
1444 | } | 1467 | } |
1445 | 1468 | ||
1469 | static void complete_exception(struct dm_snap_pending_exception *pe) | ||
1470 | { | ||
1471 | struct dm_snapshot *s = pe->snap; | ||
1472 | |||
1473 | if (unlikely(pe->copy_error)) | ||
1474 | pending_complete(pe, 0); | ||
1475 | |||
1476 | else | ||
1477 | /* Update the metadata if we are persistent */ | ||
1478 | s->store->type->commit_exception(s->store, &pe->e, | ||
1479 | commit_callback, pe); | ||
1480 | } | ||
1481 | |||
1446 | /* | 1482 | /* |
1447 | * Called when the copy I/O has finished. kcopyd actually runs | 1483 | * Called when the copy I/O has finished. kcopyd actually runs |
1448 | * this code so don't block. | 1484 | * this code so don't block. |
@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) | |||
1452 | struct dm_snap_pending_exception *pe = context; | 1488 | struct dm_snap_pending_exception *pe = context; |
1453 | struct dm_snapshot *s = pe->snap; | 1489 | struct dm_snapshot *s = pe->snap; |
1454 | 1490 | ||
1455 | if (read_err || write_err) | 1491 | pe->copy_error = read_err || write_err; |
1456 | pending_complete(pe, 0); | ||
1457 | 1492 | ||
1458 | else | 1493 | if (pe->exception_sequence == s->exception_complete_sequence) { |
1459 | /* Update the metadata if we are persistent */ | 1494 | s->exception_complete_sequence++; |
1460 | s->store->type->commit_exception(s->store, &pe->e, | 1495 | complete_exception(pe); |
1461 | commit_callback, pe); | 1496 | |
1497 | while (!list_empty(&s->out_of_order_list)) { | ||
1498 | pe = list_entry(s->out_of_order_list.next, | ||
1499 | struct dm_snap_pending_exception, out_of_order_entry); | ||
1500 | if (pe->exception_sequence != s->exception_complete_sequence) | ||
1501 | break; | ||
1502 | s->exception_complete_sequence++; | ||
1503 | list_del(&pe->out_of_order_entry); | ||
1504 | complete_exception(pe); | ||
1505 | } | ||
1506 | } else { | ||
1507 | struct list_head *lh; | ||
1508 | struct dm_snap_pending_exception *pe2; | ||
1509 | |||
1510 | list_for_each_prev(lh, &s->out_of_order_list) { | ||
1511 | pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry); | ||
1512 | if (pe2->exception_sequence < pe->exception_sequence) | ||
1513 | break; | ||
1514 | } | ||
1515 | list_add(&pe->out_of_order_entry, lh); | ||
1516 | } | ||
1462 | } | 1517 | } |
1463 | 1518 | ||
1464 | /* | 1519 | /* |
@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s, | |||
1553 | return NULL; | 1608 | return NULL; |
1554 | } | 1609 | } |
1555 | 1610 | ||
1611 | pe->exception_sequence = s->exception_start_sequence++; | ||
1612 | |||
1556 | dm_insert_exception(&s->pending, &pe->e); | 1613 | dm_insert_exception(&s->pending, &pe->e); |
1557 | 1614 | ||
1558 | return pe; | 1615 | return pe; |
@@ -2192,7 +2249,7 @@ static struct target_type origin_target = { | |||
2192 | 2249 | ||
2193 | static struct target_type snapshot_target = { | 2250 | static struct target_type snapshot_target = { |
2194 | .name = "snapshot", | 2251 | .name = "snapshot", |
2195 | .version = {1, 11, 1}, | 2252 | .version = {1, 12, 0}, |
2196 | .module = THIS_MODULE, | 2253 | .module = THIS_MODULE, |
2197 | .ctr = snapshot_ctr, | 2254 | .ctr = snapshot_ctr, |
2198 | .dtr = snapshot_dtr, | 2255 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 3d404c1371ed..28a90122a5a8 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, | |||
964 | 964 | ||
965 | int __init dm_statistics_init(void) | 965 | int __init dm_statistics_init(void) |
966 | { | 966 | { |
967 | shared_memory_amount = 0; | ||
967 | dm_stat_need_rcu_barrier = 0; | 968 | dm_stat_need_rcu_barrier = 0; |
968 | return 0; | 969 | return 0; |
969 | } | 970 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 465f08ca62b1..3ba6a3859ce3 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
200 | 200 | ||
201 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); | 201 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); |
202 | 202 | ||
203 | if (!num_targets) { | ||
204 | kfree(t); | ||
205 | return -ENOMEM; | ||
206 | } | ||
207 | |||
203 | if (alloc_targets(t, num_targets)) { | 208 | if (alloc_targets(t, num_targets)) { |
204 | kfree(t); | 209 | kfree(t); |
205 | return -ENOMEM; | 210 | return -ENOMEM; |
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 60bce435f4fa..8a30ad54bd46 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) | |||
1697 | up_write(&pmd->root_lock); | 1697 | up_write(&pmd->root_lock); |
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) | ||
1701 | { | ||
1702 | down_write(&pmd->root_lock); | ||
1703 | pmd->read_only = false; | ||
1704 | dm_bm_set_read_write(pmd->bm); | ||
1705 | up_write(&pmd->root_lock); | ||
1706 | } | ||
1707 | |||
1700 | int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, | 1708 | int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, |
1701 | dm_block_t threshold, | 1709 | dm_block_t threshold, |
1702 | dm_sm_threshold_fn fn, | 1710 | dm_sm_threshold_fn fn, |
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 845ebbe589a9..7bcc0e1d6238 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h | |||
@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz | |||
193 | * that nothing is changing. | 193 | * that nothing is changing. |
194 | */ | 194 | */ |
195 | void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); | 195 | void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); |
196 | void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd); | ||
196 | 197 | ||
197 | int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, | 198 | int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, |
198 | dm_block_t threshold, | 199 | dm_block_t threshold, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2c0cf511ec23..ee29037ffc2e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
640 | */ | 640 | */ |
641 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); | 641 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); |
642 | if (r) { | 642 | if (r) { |
643 | DMERR_LIMIT("dm_thin_insert_block() failed"); | 643 | DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d", |
644 | dm_device_name(pool->pool_md), r); | ||
645 | set_pool_mode(pool, PM_READ_ONLY); | ||
644 | cell_error(pool, m->cell); | 646 | cell_error(pool, m->cell); |
645 | goto out; | 647 | goto out; |
646 | } | 648 | } |
@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | |||
881 | } | 883 | } |
882 | } | 884 | } |
883 | 885 | ||
884 | static int commit(struct pool *pool) | ||
885 | { | ||
886 | int r; | ||
887 | |||
888 | r = dm_pool_commit_metadata(pool->pmd); | ||
889 | if (r) | ||
890 | DMERR_LIMIT("%s: commit failed: error = %d", | ||
891 | dm_device_name(pool->pool_md), r); | ||
892 | |||
893 | return r; | ||
894 | } | ||
895 | |||
896 | /* | 886 | /* |
897 | * A non-zero return indicates read_only or fail_io mode. | 887 | * A non-zero return indicates read_only or fail_io mode. |
898 | * Many callers don't care about the return value. | 888 | * Many callers don't care about the return value. |
899 | */ | 889 | */ |
900 | static int commit_or_fallback(struct pool *pool) | 890 | static int commit(struct pool *pool) |
901 | { | 891 | { |
902 | int r; | 892 | int r; |
903 | 893 | ||
904 | if (get_pool_mode(pool) != PM_WRITE) | 894 | if (get_pool_mode(pool) != PM_WRITE) |
905 | return -EINVAL; | 895 | return -EINVAL; |
906 | 896 | ||
907 | r = commit(pool); | 897 | r = dm_pool_commit_metadata(pool->pmd); |
908 | if (r) | 898 | if (r) { |
899 | DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d", | ||
900 | dm_device_name(pool->pool_md), r); | ||
909 | set_pool_mode(pool, PM_READ_ONLY); | 901 | set_pool_mode(pool, PM_READ_ONLY); |
902 | } | ||
910 | 903 | ||
911 | return r; | 904 | return r; |
912 | } | 905 | } |
@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | |||
943 | * Try to commit to see if that will free up some | 936 | * Try to commit to see if that will free up some |
944 | * more space. | 937 | * more space. |
945 | */ | 938 | */ |
946 | (void) commit_or_fallback(pool); | 939 | r = commit(pool); |
940 | if (r) | ||
941 | return r; | ||
947 | 942 | ||
948 | r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); | 943 | r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); |
949 | if (r) | 944 | if (r) |
@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | |||
957 | * table reload). | 952 | * table reload). |
958 | */ | 953 | */ |
959 | if (!free_blocks) { | 954 | if (!free_blocks) { |
960 | DMWARN("%s: no free space available.", | 955 | DMWARN("%s: no free data space available.", |
961 | dm_device_name(pool->pool_md)); | 956 | dm_device_name(pool->pool_md)); |
962 | spin_lock_irqsave(&pool->lock, flags); | 957 | spin_lock_irqsave(&pool->lock, flags); |
963 | pool->no_free_space = 1; | 958 | pool->no_free_space = 1; |
@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | |||
967 | } | 962 | } |
968 | 963 | ||
969 | r = dm_pool_alloc_data_block(pool->pmd, result); | 964 | r = dm_pool_alloc_data_block(pool->pmd, result); |
970 | if (r) | 965 | if (r) { |
966 | if (r == -ENOSPC && | ||
967 | !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && | ||
968 | !free_blocks) { | ||
969 | DMWARN("%s: no free metadata space available.", | ||
970 | dm_device_name(pool->pool_md)); | ||
971 | set_pool_mode(pool, PM_READ_ONLY); | ||
972 | } | ||
971 | return r; | 973 | return r; |
974 | } | ||
972 | 975 | ||
973 | return 0; | 976 | return 0; |
974 | } | 977 | } |
@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool) | |||
1349 | if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) | 1352 | if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) |
1350 | return; | 1353 | return; |
1351 | 1354 | ||
1352 | if (commit_or_fallback(pool)) { | 1355 | if (commit(pool)) { |
1353 | while ((bio = bio_list_pop(&bios))) | 1356 | while ((bio = bio_list_pop(&bios))) |
1354 | bio_io_error(bio); | 1357 | bio_io_error(bio); |
1355 | return; | 1358 | return; |
@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) | |||
1397 | case PM_FAIL: | 1400 | case PM_FAIL: |
1398 | DMERR("%s: switching pool to failure mode", | 1401 | DMERR("%s: switching pool to failure mode", |
1399 | dm_device_name(pool->pool_md)); | 1402 | dm_device_name(pool->pool_md)); |
1403 | dm_pool_metadata_read_only(pool->pmd); | ||
1400 | pool->process_bio = process_bio_fail; | 1404 | pool->process_bio = process_bio_fail; |
1401 | pool->process_discard = process_bio_fail; | 1405 | pool->process_discard = process_bio_fail; |
1402 | pool->process_prepared_mapping = process_prepared_mapping_fail; | 1406 | pool->process_prepared_mapping = process_prepared_mapping_fail; |
@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) | |||
1421 | break; | 1425 | break; |
1422 | 1426 | ||
1423 | case PM_WRITE: | 1427 | case PM_WRITE: |
1428 | dm_pool_metadata_read_write(pool->pmd); | ||
1424 | pool->process_bio = process_bio; | 1429 | pool->process_bio = process_bio; |
1425 | pool->process_discard = process_discard; | 1430 | pool->process_discard = process_discard; |
1426 | pool->process_prepared_mapping = process_prepared_mapping; | 1431 | pool->process_prepared_mapping = process_prepared_mapping; |
@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) | |||
1637 | struct pool_c *pt = ti->private; | 1642 | struct pool_c *pt = ti->private; |
1638 | 1643 | ||
1639 | /* | 1644 | /* |
1640 | * We want to make sure that degraded pools are never upgraded. | 1645 | * We want to make sure that a pool in PM_FAIL mode is never upgraded. |
1641 | */ | 1646 | */ |
1642 | enum pool_mode old_mode = pool->pf.mode; | 1647 | enum pool_mode old_mode = pool->pf.mode; |
1643 | enum pool_mode new_mode = pt->adjusted_pf.mode; | 1648 | enum pool_mode new_mode = pt->adjusted_pf.mode; |
1644 | 1649 | ||
1645 | if (old_mode > new_mode) | 1650 | /* |
1651 | * If we were in PM_FAIL mode, rollback of metadata failed. We're | ||
1652 | * not going to recover without a thin_repair. So we never let the | ||
1653 | * pool move out of the old mode. On the other hand a PM_READ_ONLY | ||
1654 | * may have been due to a lack of metadata or data space, and may | ||
1655 | * now work (ie. if the underlying devices have been resized). | ||
1656 | */ | ||
1657 | if (old_mode == PM_FAIL) | ||
1646 | new_mode = old_mode; | 1658 | new_mode = old_mode; |
1647 | 1659 | ||
1648 | pool->ti = ti; | 1660 | pool->ti = ti; |
@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti) | |||
2266 | return r; | 2278 | return r; |
2267 | 2279 | ||
2268 | if (need_commit1 || need_commit2) | 2280 | if (need_commit1 || need_commit2) |
2269 | (void) commit_or_fallback(pool); | 2281 | (void) commit(pool); |
2270 | 2282 | ||
2271 | return 0; | 2283 | return 0; |
2272 | } | 2284 | } |
@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti) | |||
2293 | 2305 | ||
2294 | cancel_delayed_work(&pool->waker); | 2306 | cancel_delayed_work(&pool->waker); |
2295 | flush_workqueue(pool->wq); | 2307 | flush_workqueue(pool->wq); |
2296 | (void) commit_or_fallback(pool); | 2308 | (void) commit(pool); |
2297 | } | 2309 | } |
2298 | 2310 | ||
2299 | static int check_arg_count(unsigned argc, unsigned args_required) | 2311 | static int check_arg_count(unsigned argc, unsigned args_required) |
@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct | |||
2427 | if (r) | 2439 | if (r) |
2428 | return r; | 2440 | return r; |
2429 | 2441 | ||
2430 | (void) commit_or_fallback(pool); | 2442 | (void) commit(pool); |
2431 | 2443 | ||
2432 | r = dm_pool_reserve_metadata_snap(pool->pmd); | 2444 | r = dm_pool_reserve_metadata_snap(pool->pmd); |
2433 | if (r) | 2445 | if (r) |
@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) | |||
2489 | DMWARN("Unrecognised thin pool target message received: %s", argv[0]); | 2501 | DMWARN("Unrecognised thin pool target message received: %s", argv[0]); |
2490 | 2502 | ||
2491 | if (!r) | 2503 | if (!r) |
2492 | (void) commit_or_fallback(pool); | 2504 | (void) commit(pool); |
2493 | 2505 | ||
2494 | return r; | 2506 | return r; |
2495 | } | 2507 | } |
@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type, | |||
2544 | 2556 | ||
2545 | /* Commit to ensure statistics aren't out-of-date */ | 2557 | /* Commit to ensure statistics aren't out-of-date */ |
2546 | if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) | 2558 | if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) |
2547 | (void) commit_or_fallback(pool); | 2559 | (void) commit(pool); |
2548 | 2560 | ||
2549 | r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); | 2561 | r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); |
2550 | if (r) { | 2562 | if (r) { |
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c index af96e24ec328..1d75b1dc1e2e 100644 --- a/drivers/md/persistent-data/dm-array.c +++ b/drivers/md/persistent-data/dm-array.c | |||
@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root, | |||
317 | * The shadow op will often be a noop. Only insert if it really | 317 | * The shadow op will often be a noop. Only insert if it really |
318 | * copied data. | 318 | * copied data. |
319 | */ | 319 | */ |
320 | if (dm_block_location(*block) != b) | 320 | if (dm_block_location(*block) != b) { |
321 | /* | ||
322 | * dm_tm_shadow_block will have already decremented the old | ||
323 | * block, but it is still referenced by the btree. We | ||
324 | * increment to stop the insert decrementing it below zero | ||
325 | * when overwriting the old value. | ||
326 | */ | ||
327 | dm_tm_inc(info->btree_info.tm, b); | ||
321 | r = insert_ablock(info, index, *block, root); | 328 | r = insert_ablock(info, index, *block, root); |
329 | } | ||
322 | 330 | ||
323 | return r; | 331 | return r; |
324 | } | 332 | } |
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index a7e8bf296388..064a3c271baa 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c | |||
@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm) | |||
626 | } | 626 | } |
627 | EXPORT_SYMBOL_GPL(dm_bm_set_read_only); | 627 | EXPORT_SYMBOL_GPL(dm_bm_set_read_only); |
628 | 628 | ||
629 | void dm_bm_set_read_write(struct dm_block_manager *bm) | ||
630 | { | ||
631 | bm->read_only = false; | ||
632 | } | ||
633 | EXPORT_SYMBOL_GPL(dm_bm_set_read_write); | ||
634 | |||
629 | u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) | 635 | u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) |
630 | { | 636 | { |
631 | return crc32c(~(u32) 0, data, len) ^ init_xor; | 637 | return crc32c(~(u32) 0, data, len) ^ init_xor; |
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 9a82083a66b6..13cd58e1fe69 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h | |||
@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b); | |||
108 | int dm_bm_flush_and_unlock(struct dm_block_manager *bm, | 108 | int dm_bm_flush_and_unlock(struct dm_block_manager *bm, |
109 | struct dm_block *superblock); | 109 | struct dm_block *superblock); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Request data be prefetched into the cache. | 112 | * Request data is prefetched into the cache. |
113 | */ | 113 | */ |
114 | void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); | 114 | void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); |
115 | 115 | ||
116 | /* | 116 | /* |
@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); | |||
125 | * be returned if you do. | 125 | * be returned if you do. |
126 | */ | 126 | */ |
127 | void dm_bm_set_read_only(struct dm_block_manager *bm); | 127 | void dm_bm_set_read_only(struct dm_block_manager *bm); |
128 | void dm_bm_set_read_write(struct dm_block_manager *bm); | ||
128 | 129 | ||
129 | u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); | 130 | u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); |
130 | 131 | ||
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 6058569fe86c..466a60bbd716 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c | |||
@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, | |||
381 | } | 381 | } |
382 | 382 | ||
383 | static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, | 383 | static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, |
384 | uint32_t (*mutator)(void *context, uint32_t old), | 384 | int (*mutator)(void *context, uint32_t old, uint32_t *new), |
385 | void *context, enum allocation_event *ev) | 385 | void *context, enum allocation_event *ev) |
386 | { | 386 | { |
387 | int r; | 387 | int r; |
@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, | |||
410 | 410 | ||
411 | if (old > 2) { | 411 | if (old > 2) { |
412 | r = sm_ll_lookup_big_ref_count(ll, b, &old); | 412 | r = sm_ll_lookup_big_ref_count(ll, b, &old); |
413 | if (r < 0) | 413 | if (r < 0) { |
414 | dm_tm_unlock(ll->tm, nb); | ||
414 | return r; | 415 | return r; |
416 | } | ||
415 | } | 417 | } |
416 | 418 | ||
417 | ref_count = mutator(context, old); | 419 | r = mutator(context, old, &ref_count); |
420 | if (r) { | ||
421 | dm_tm_unlock(ll->tm, nb); | ||
422 | return r; | ||
423 | } | ||
418 | 424 | ||
419 | if (ref_count <= 2) { | 425 | if (ref_count <= 2) { |
420 | sm_set_bitmap(bm_le, bit, ref_count); | 426 | sm_set_bitmap(bm_le, bit, ref_count); |
@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, | |||
465 | return ll->save_ie(ll, index, &ie_disk); | 471 | return ll->save_ie(ll, index, &ie_disk); |
466 | } | 472 | } |
467 | 473 | ||
468 | static uint32_t set_ref_count(void *context, uint32_t old) | 474 | static int set_ref_count(void *context, uint32_t old, uint32_t *new) |
469 | { | 475 | { |
470 | return *((uint32_t *) context); | 476 | *new = *((uint32_t *) context); |
477 | return 0; | ||
471 | } | 478 | } |
472 | 479 | ||
473 | int sm_ll_insert(struct ll_disk *ll, dm_block_t b, | 480 | int sm_ll_insert(struct ll_disk *ll, dm_block_t b, |
@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, | |||
476 | return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); | 483 | return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); |
477 | } | 484 | } |
478 | 485 | ||
479 | static uint32_t inc_ref_count(void *context, uint32_t old) | 486 | static int inc_ref_count(void *context, uint32_t old, uint32_t *new) |
480 | { | 487 | { |
481 | return old + 1; | 488 | *new = old + 1; |
489 | return 0; | ||
482 | } | 490 | } |
483 | 491 | ||
484 | int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) | 492 | int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) |
@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) | |||
486 | return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); | 494 | return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); |
487 | } | 495 | } |
488 | 496 | ||
489 | static uint32_t dec_ref_count(void *context, uint32_t old) | 497 | static int dec_ref_count(void *context, uint32_t old, uint32_t *new) |
490 | { | 498 | { |
491 | return old - 1; | 499 | if (!old) { |
500 | DMERR_LIMIT("unable to decrement a reference count below 0"); | ||
501 | return -EINVAL; | ||
502 | } | ||
503 | |||
504 | *new = old - 1; | ||
505 | return 0; | ||
492 | } | 506 | } |
493 | 507 | ||
494 | int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) | 508 | int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) |
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 1c959684caef..58fc1eef7499 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c | |||
@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) | |||
384 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); | 384 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); |
385 | 385 | ||
386 | int r = sm_metadata_new_block_(sm, b); | 386 | int r = sm_metadata_new_block_(sm, b); |
387 | if (r) | 387 | if (r) { |
388 | DMERR("unable to allocate new metadata block"); | 388 | DMERR("unable to allocate new metadata block"); |
389 | return r; | ||
390 | } | ||
389 | 391 | ||
390 | r = sm_metadata_get_nr_free(sm, &count); | 392 | r = sm_metadata_get_nr_free(sm, &count); |
391 | if (r) | 393 | if (r) { |
392 | DMERR("couldn't get free block count"); | 394 | DMERR("couldn't get free block count"); |
395 | return r; | ||
396 | } | ||
393 | 397 | ||
394 | check_threshold(&smm->threshold, count); | 398 | check_threshold(&smm->threshold, count); |
395 | 399 | ||
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h index d0799e323364..9c9063cd3208 100644 --- a/drivers/media/common/siano/smscoreapi.h +++ b/drivers/media/common/siano/smscoreapi.h | |||
@@ -955,7 +955,7 @@ struct sms_rx_stats { | |||
955 | u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ | 955 | u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ |
956 | s32 SNR; /* dB */ | 956 | s32 SNR; /* dB */ |
957 | u32 ber; /* Post Viterbi ber [1E-5] */ | 957 | u32 ber; /* Post Viterbi ber [1E-5] */ |
958 | u32 ber_error_count; /* Number of erronous SYNC bits. */ | 958 | u32 ber_error_count; /* Number of erroneous SYNC bits. */ |
959 | u32 ber_bit_count; /* Total number of SYNC bits. */ | 959 | u32 ber_bit_count; /* Total number of SYNC bits. */ |
960 | u32 ts_per; /* Transport stream PER, | 960 | u32 ts_per; /* Transport stream PER, |
961 | 0xFFFFFFFF indicate N/A */ | 961 | 0xFFFFFFFF indicate N/A */ |
@@ -981,7 +981,7 @@ struct sms_rx_stats_ex { | |||
981 | u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ | 981 | u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ |
982 | s32 SNR; /* dB */ | 982 | s32 SNR; /* dB */ |
983 | u32 ber; /* Post Viterbi ber [1E-5] */ | 983 | u32 ber; /* Post Viterbi ber [1E-5] */ |
984 | u32 ber_error_count; /* Number of erronous SYNC bits. */ | 984 | u32 ber_error_count; /* Number of erroneous SYNC bits. */ |
985 | u32 ber_bit_count; /* Total number of SYNC bits. */ | 985 | u32 ber_bit_count; /* Total number of SYNC bits. */ |
986 | u32 ts_per; /* Transport stream PER, | 986 | u32 ts_per; /* Transport stream PER, |
987 | 0xFFFFFFFF indicate N/A */ | 987 | 0xFFFFFFFF indicate N/A */ |
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h index 92c413ba0c79..ae36d0ae0fb1 100644 --- a/drivers/media/common/siano/smsdvb.h +++ b/drivers/media/common/siano/smsdvb.h | |||
@@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S { | |||
95 | u32 is_demod_locked; /* 0 - not locked, 1 - locked */ | 95 | u32 is_demod_locked; /* 0 - not locked, 1 - locked */ |
96 | 96 | ||
97 | u32 ber_bit_count; /* Total number of SYNC bits. */ | 97 | u32 ber_bit_count; /* Total number of SYNC bits. */ |
98 | u32 ber_error_count; /* Number of erronous SYNC bits. */ | 98 | u32 ber_error_count; /* Number of erroneous SYNC bits. */ |
99 | 99 | ||
100 | s32 MRC_SNR; /* dB */ | 100 | s32 MRC_SNR; /* dB */ |
101 | s32 mrc_in_band_pwr; /* In band power in dBM */ | 101 | s32 mrc_in_band_pwr; /* In band power in dBM */ |
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 58de4410c525..6c7ff0cdcd32 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c | |||
@@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
435 | dprintk_tscheck("TEI detected. " | 435 | dprintk_tscheck("TEI detected. " |
436 | "PID=0x%x data1=0x%x\n", | 436 | "PID=0x%x data1=0x%x\n", |
437 | pid, buf[1]); | 437 | pid, buf[1]); |
438 | /* data in this packet cant be trusted - drop it unless | 438 | /* data in this packet can't be trusted - drop it unless |
439 | * module option dvb_demux_feed_err_pkts is set */ | 439 | * module option dvb_demux_feed_err_pkts is set */ |
440 | if (!dvb_demux_feed_err_pkts) | 440 | if (!dvb_demux_feed_err_pkts) |
441 | return; | 441 | return; |
@@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed, | |||
1032 | return -EINVAL; | 1032 | return -EINVAL; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | if (feed->is_filtering) | 1035 | if (feed->is_filtering) { |
1036 | /* release dvbdmx->mutex as far as it is | ||
1037 | acquired by stop_filtering() itself */ | ||
1038 | mutex_unlock(&dvbdmx->mutex); | ||
1036 | feed->stop_filtering(feed); | 1039 | feed->stop_filtering(feed); |
1040 | mutex_lock(&dvbdmx->mutex); | ||
1041 | } | ||
1037 | 1042 | ||
1038 | spin_lock_irq(&dvbdmx->lock); | 1043 | spin_lock_irq(&dvbdmx->lock); |
1039 | f = dvbdmxfeed->filter; | 1044 | f = dvbdmxfeed->filter; |
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index 30ee59052157..65728c25ea05 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c | |||
@@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val, | |||
170 | static int af9033_wr_reg_val_tab(struct af9033_state *state, | 170 | static int af9033_wr_reg_val_tab(struct af9033_state *state, |
171 | const struct reg_val *tab, int tab_len) | 171 | const struct reg_val *tab, int tab_len) |
172 | { | 172 | { |
173 | #define MAX_TAB_LEN 212 | ||
173 | int ret, i, j; | 174 | int ret, i, j; |
174 | u8 buf[MAX_XFER_SIZE]; | 175 | u8 buf[1 + MAX_TAB_LEN]; |
176 | |||
177 | dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); | ||
175 | 178 | ||
176 | if (tab_len > sizeof(buf)) { | 179 | if (tab_len > sizeof(buf)) { |
177 | dev_warn(&state->i2c->dev, | 180 | dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n", |
178 | "%s: i2c wr len=%d is too big!\n", | 181 | KBUILD_MODNAME, tab_len); |
179 | KBUILD_MODNAME, tab_len); | ||
180 | return -EINVAL; | 182 | return -EINVAL; |
181 | } | 183 | } |
182 | 184 | ||
183 | dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); | ||
184 | |||
185 | for (i = 0, j = 0; i < tab_len; i++) { | 185 | for (i = 0, j = 0; i < tab_len; i++) { |
186 | buf[j] = tab[i].val; | 186 | buf[j] = tab[i].val; |
187 | 187 | ||
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c index 125a44041011..5c6ab4921bf1 100644 --- a/drivers/media/dvb-frontends/cxd2820r_c.c +++ b/drivers/media/dvb-frontends/cxd2820r_c.c | |||
@@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe) | |||
78 | 78 | ||
79 | num = if_freq / 1000; /* Hz => kHz */ | 79 | num = if_freq / 1000; /* Hz => kHz */ |
80 | num *= 0x4000; | 80 | num *= 0x4000; |
81 | if_ctl = cxd2820r_div_u64_round_closest(num, 41000); | 81 | if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000); |
82 | buf[0] = (if_ctl >> 8) & 0x3f; | 82 | buf[0] = (if_ctl >> 8) & 0x3f; |
83 | buf[1] = (if_ctl >> 0) & 0xff; | 83 | buf[1] = (if_ctl >> 0) & 0xff; |
84 | 84 | ||
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 90536147bf04..6dbbee453ee1 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c | |||
@@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe) | |||
3048 | dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); | 3048 | dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); |
3049 | 3049 | ||
3050 | locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */ | 3050 | locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */ |
3051 | /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */ | 3051 | /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */ |
3052 | *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON); | 3052 | *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON); |
3053 | *tune_state = CT_DEMOD_STEP_5; | 3053 | *tune_state = CT_DEMOD_STEP_5; |
3054 | break; | 3054 | break; |
@@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe) | |||
3115 | 3115 | ||
3116 | case CT_DEMOD_STEP_9: /* 39 */ | 3116 | case CT_DEMOD_STEP_9: /* 39 */ |
3117 | if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */ | 3117 | if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */ |
3118 | /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */ | 3118 | /* defines timeout for mpeg lock depending on interleaver length of longest layer */ |
3119 | for (i = 0; i < 3; i++) { | 3119 | for (i = 0; i < 3; i++) { |
3120 | if (c->layer[i].interleaving >= deeper_interleaver) { | 3120 | if (c->layer[i].interleaving >= deeper_interleaver) { |
3121 | dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving); | 3121 | dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving); |
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index d416c15691da..bf29a3f0e6f0 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c | |||
@@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable) | |||
1191 | goto error; | 1191 | goto error; |
1192 | 1192 | ||
1193 | if (state->m_enable_parallel == true) { | 1193 | if (state->m_enable_parallel == true) { |
1194 | /* paralel -> enable MD1 to MD7 */ | 1194 | /* parallel -> enable MD1 to MD7 */ |
1195 | status = write16(state, SIO_PDR_MD1_CFG__A, | 1195 | status = write16(state, SIO_PDR_MD1_CFG__A, |
1196 | sio_pdr_mdx_cfg); | 1196 | sio_pdr_mdx_cfg); |
1197 | if (status < 0) | 1197 | if (status < 0) |
@@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state) | |||
1428 | 1428 | ||
1429 | dprintk(1, "\n"); | 1429 | dprintk(1, "\n"); |
1430 | 1430 | ||
1431 | /* Gracefull shutdown (byte boundaries) */ | 1431 | /* Graceful shutdown (byte boundaries) */ |
1432 | status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); | 1432 | status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); |
1433 | if (status < 0) | 1433 | if (status < 0) |
1434 | goto error; | 1434 | goto error; |
@@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state, | |||
2021 | fec_oc_dto_burst_len = 204; | 2021 | fec_oc_dto_burst_len = 204; |
2022 | } | 2022 | } |
2023 | 2023 | ||
2024 | /* Check serial or parrallel output */ | 2024 | /* Check serial or parallel output */ |
2025 | fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); | 2025 | fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); |
2026 | if (state->m_enable_parallel == false) { | 2026 | if (state->m_enable_parallel == false) { |
2027 | /* MPEG data output is serial -> set ipr_mode[0] */ | 2027 | /* MPEG data output is serial -> set ipr_mode[0] */ |
@@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state) | |||
2908 | goto error; | 2908 | goto error; |
2909 | 2909 | ||
2910 | if (count == 1) { | 2910 | if (count == 1) { |
2911 | /* Try sampling on a diffrent edge */ | 2911 | /* Try sampling on a different edge */ |
2912 | u16 clk_neg = 0; | 2912 | u16 clk_neg = 0; |
2913 | 2913 | ||
2914 | status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); | 2914 | status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); |
@@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state, | |||
3306 | if (status < 0) | 3306 | if (status < 0) |
3307 | goto error; | 3307 | goto error; |
3308 | 3308 | ||
3309 | /* Retreive results parameters from SC */ | 3309 | /* Retrieve results parameters from SC */ |
3310 | switch (cmd) { | 3310 | switch (cmd) { |
3311 | /* All commands yielding 5 results */ | 3311 | /* All commands yielding 5 results */ |
3312 | /* All commands yielding 4 results */ | 3312 | /* All commands yielding 4 results */ |
@@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, | |||
3849 | break; | 3849 | break; |
3850 | } | 3850 | } |
3851 | #if 0 | 3851 | #if 0 |
3852 | /* No hierachical channels support in BDA */ | 3852 | /* No hierarchical channels support in BDA */ |
3853 | /* Priority (only for hierarchical channels) */ | 3853 | /* Priority (only for hierarchical channels) */ |
3854 | switch (channel->priority) { | 3854 | switch (channel->priority) { |
3855 | case DRX_PRIORITY_LOW: | 3855 | case DRX_PRIORITY_LOW: |
@@ -4081,7 +4081,7 @@ error: | |||
4081 | /*============================================================================*/ | 4081 | /*============================================================================*/ |
4082 | 4082 | ||
4083 | /** | 4083 | /** |
4084 | * \brief Retreive lock status . | 4084 | * \brief Retrieve lock status . |
4085 | * \param demod Pointer to demodulator instance. | 4085 | * \param demod Pointer to demodulator instance. |
4086 | * \param lockStat Pointer to lock status structure. | 4086 | * \param lockStat Pointer to lock status structure. |
4087 | * \return DRXStatus_t. | 4087 | * \return DRXStatus_t. |
@@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state) | |||
6174 | goto error; | 6174 | goto error; |
6175 | 6175 | ||
6176 | /* Stamp driver version number in SCU data RAM in BCD code | 6176 | /* Stamp driver version number in SCU data RAM in BCD code |
6177 | Done to enable field application engineers to retreive drxdriver version | 6177 | Done to enable field application engineers to retrieve drxdriver version |
6178 | via I2C from SCU RAM. | 6178 | via I2C from SCU RAM. |
6179 | Not using SCU command interface for SCU register access since no | 6179 | Not using SCU command interface for SCU register access since no |
6180 | microcode may be present. | 6180 | microcode may be present. |
@@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe) | |||
6399 | fe->ops.tuner_ops.get_if_frequency(fe, &IF); | 6399 | fe->ops.tuner_ops.get_if_frequency(fe, &IF); |
6400 | start(state, 0, IF); | 6400 | start(state, 0, IF); |
6401 | 6401 | ||
6402 | /* After set_frontend, stats aren't avaliable */ | 6402 | /* After set_frontend, stats aren't available */ |
6403 | p->strength.stat[0].scale = FE_SCALE_RELATIVE; | 6403 | p->strength.stat[0].scale = FE_SCALE_RELATIVE; |
6404 | p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | 6404 | p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; |
6405 | p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | 6405 | p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; |
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 7efb796c472c..50e8b63e5169 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c | |||
@@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg, | |||
710 | sizeof(priv->tuner_i2c_adapter.name)); | 710 | sizeof(priv->tuner_i2c_adapter.name)); |
711 | priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; | 711 | priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; |
712 | priv->tuner_i2c_adapter.algo_data = NULL; | 712 | priv->tuner_i2c_adapter.algo_data = NULL; |
713 | priv->tuner_i2c_adapter.dev.parent = &i2c->dev; | ||
713 | i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); | 714 | i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); |
714 | if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { | 715 | if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { |
715 | dev_err(&i2c->dev, | 716 | dev_err(&i2c->dev, |
diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h index 4a5b7d211d2f..b253d400e817 100644 --- a/drivers/media/i2c/adv7183_regs.h +++ b/drivers/media/i2c/adv7183_regs.h | |||
@@ -52,9 +52,9 @@ | |||
52 | #define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */ | 52 | #define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */ |
53 | #define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */ | 53 | #define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */ |
54 | #define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */ | 54 | #define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */ |
55 | #define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */ | 55 | #define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */ |
56 | #define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */ | 56 | #define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */ |
57 | #define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */ | 57 | #define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */ |
58 | #define ADV7183_POLARITY 0x37 /* Polarity */ | 58 | #define ADV7183_POLARITY 0x37 /* Polarity */ |
59 | #define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */ | 59 | #define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */ |
60 | #define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */ | 60 | #define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */ |
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index fbfdd2fc2a36..a324106b9f11 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c | |||
@@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd, | |||
877 | break; | 877 | break; |
878 | case ADV7604_MODE_HDMI: | 878 | case ADV7604_MODE_HDMI: |
879 | /* set default prim_mode/vid_std for HDMI | 879 | /* set default prim_mode/vid_std for HDMI |
880 | accoring to [REF_03, c. 4.2] */ | 880 | according to [REF_03, c. 4.2] */ |
881 | io_write(sd, 0x00, 0x02); /* video std */ | 881 | io_write(sd, 0x00, 0x02); /* video std */ |
882 | io_write(sd, 0x01, 0x06); /* prim mode */ | 882 | io_write(sd, 0x01, 0x06); /* prim mode */ |
883 | break; | 883 | break; |
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 22f729d66a96..b154f36740b4 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c | |||
@@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd, | |||
1013 | break; | 1013 | break; |
1014 | case ADV7842_MODE_HDMI: | 1014 | case ADV7842_MODE_HDMI: |
1015 | /* set default prim_mode/vid_std for HDMI | 1015 | /* set default prim_mode/vid_std for HDMI |
1016 | accoring to [REF_03, c. 4.2] */ | 1016 | according to [REF_03, c. 4.2] */ |
1017 | io_write(sd, 0x00, 0x02); /* video std */ | 1017 | io_write(sd, 0x00, 0x02); /* video std */ |
1018 | io_write(sd, 0x01, 0x06); /* prim mode */ | 1018 | io_write(sd, 0x01, 0x06); /* prim mode */ |
1019 | break; | 1019 | break; |
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index 82bf5679da30..99ee456700f4 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c | |||
@@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
394 | 394 | ||
395 | if (!rc) { | 395 | if (!rc) { |
396 | /* | 396 | /* |
397 | * If platform_data doesn't specify rc_dev, initilize it | 397 | * If platform_data doesn't specify rc_dev, initialize it |
398 | * internally | 398 | * internally |
399 | */ | 399 | */ |
400 | rc = rc_allocate_device(); | 400 | rc = rc_allocate_device(); |
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c index f34429e452ab..a60931e66312 100644 --- a/drivers/media/i2c/m5mols/m5mols_controls.c +++ b/drivers/media/i2c/m5mols/m5mols_controls.c | |||
@@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd) | |||
544 | u16 zoom_step; | 544 | u16 zoom_step; |
545 | int ret; | 545 | int ret; |
546 | 546 | ||
547 | /* Determine the firmware dependant control range and step values */ | 547 | /* Determine the firmware dependent control range and step values */ |
548 | ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max); | 548 | ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max); |
549 | if (ret < 0) | 549 | if (ret < 0) |
550 | return ret; | 550 | return ret; |
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c index 4734836fe5a4..1c2303d18bf4 100644 --- a/drivers/media/i2c/mt9p031.c +++ b/drivers/media/i2c/mt9p031.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/i2c.h> | 19 | #include <linux/i2c.h> |
20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/of.h> | ||
22 | #include <linux/of_gpio.h> | 23 | #include <linux/of_gpio.h> |
23 | #include <linux/pm.h> | 24 | #include <linux/pm.h> |
24 | #include <linux/regulator/consumer.h> | 25 | #include <linux/regulator/consumer.h> |
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 6fec9384d86e..e7f555cc827a 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c | |||
@@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd) | |||
1460 | mutex_unlock(&state->lock); | 1460 | mutex_unlock(&state->lock); |
1461 | 1461 | ||
1462 | v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n", | 1462 | v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n", |
1463 | __func__, ret ? "failed" : "succeded", ret); | 1463 | __func__, ret ? "failed" : "succeeded", ret); |
1464 | 1464 | ||
1465 | return ret; | 1465 | return ret; |
1466 | } | 1466 | } |
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h index 9d2c08652246..9dfa516f6944 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3.h +++ b/drivers/media/i2c/s5c73m3/s5c73m3.h | |||
@@ -393,7 +393,7 @@ struct s5c73m3 { | |||
393 | 393 | ||
394 | /* External master clock frequency */ | 394 | /* External master clock frequency */ |
395 | u32 mclk_frequency; | 395 | u32 mclk_frequency; |
396 | /* Video bus type - MIPI-CSI2/paralell */ | 396 | /* Video bus type - MIPI-CSI2/parallel */ |
397 | enum v4l2_mbus_type bus_type; | 397 | enum v4l2_mbus_type bus_type; |
398 | 398 | ||
399 | const struct s5c73m3_frame_size *sensor_pix_size[2]; | 399 | const struct s5c73m3_frame_size *sensor_pix_size[2]; |
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index 637d02634527..afdbcb045cee 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c | |||
@@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state, | |||
1699 | * the analog demod. | 1699 | * the analog demod. |
1700 | * If the tuner is not found, it returns -ENODEV. | 1700 | * If the tuner is not found, it returns -ENODEV. |
1701 | * If auto-detection is disabled and the tuner doesn't match what it was | 1701 | * If auto-detection is disabled and the tuner doesn't match what it was |
1702 | * requred, it returns -EINVAL and fills 'name'. | 1702 | * required, it returns -EINVAL and fills 'name'. |
1703 | * If the chip is found, it returns the chip ID and fills 'name'. | 1703 | * If the chip is found, it returns the chip ID and fills 'name'. |
1704 | */ | 1704 | */ |
1705 | static int saa711x_detect_chip(struct i2c_client *client, | 1705 | static int saa711x_detect_chip(struct i2c_client *client, |
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c index 0a5c5d4fedd6..d2daa6a8f272 100644 --- a/drivers/media/i2c/soc_camera/ov5642.c +++ b/drivers/media/i2c/soc_camera/ov5642.c | |||
@@ -642,7 +642,7 @@ static const struct ov5642_datafmt | |||
642 | static int reg_read(struct i2c_client *client, u16 reg, u8 *val) | 642 | static int reg_read(struct i2c_client *client, u16 reg, u8 *val) |
643 | { | 643 | { |
644 | int ret; | 644 | int ret; |
645 | /* We have 16-bit i2c addresses - care for endianess */ | 645 | /* We have 16-bit i2c addresses - care for endianness */ |
646 | unsigned char data[2] = { reg >> 8, reg & 0xff }; | 646 | unsigned char data[2] = { reg >> 8, reg & 0xff }; |
647 | 647 | ||
648 | ret = i2c_master_send(client, data, 2); | 648 | ret = i2c_master_send(client, data, 2); |
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c index 42276d93624c..ed9ae8875348 100644 --- a/drivers/media/i2c/ths7303.c +++ b/drivers/media/i2c/ths7303.c | |||
@@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val) | |||
83 | } | 83 | } |
84 | 84 | ||
85 | /* following function is used to set ths7303 */ | 85 | /* following function is used to set ths7303 */ |
86 | int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode) | 86 | static int ths7303_setval(struct v4l2_subdev *sd, |
87 | enum ths7303_filter_mode mode) | ||
87 | { | 88 | { |
88 | struct i2c_client *client = v4l2_get_subdevdata(sd); | 89 | struct i2c_client *client = v4l2_get_subdevdata(sd); |
89 | struct ths7303_state *state = to_state(sd); | 90 | struct ths7303_state *state = to_state(sd); |
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c index 3f584a7d0781..bee7946faa7c 100644 --- a/drivers/media/i2c/wm8775.c +++ b/drivers/media/i2c/wm8775.c | |||
@@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd, | |||
130 | return -EINVAL; | 130 | return -EINVAL; |
131 | } | 131 | } |
132 | state->input = input; | 132 | state->input = input; |
133 | if (!v4l2_ctrl_g_ctrl(state->mute)) | 133 | if (v4l2_ctrl_g_ctrl(state->mute)) |
134 | return 0; | 134 | return 0; |
135 | if (!v4l2_ctrl_g_ctrl(state->vol)) | 135 | if (!v4l2_ctrl_g_ctrl(state->vol)) |
136 | return 0; | 136 | return 0; |
137 | if (!v4l2_ctrl_g_ctrl(state->bal)) | ||
138 | return 0; | ||
139 | wm8775_set_audio(sd, 1); | 137 | wm8775_set_audio(sd, 1); |
140 | return 0; | 138 | return 0; |
141 | } | 139 | } |
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index a3b1ee9c00d7..92a06fd85865 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c | |||
@@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) | |||
4182 | } | 4182 | } |
4183 | btv->std = V4L2_STD_PAL; | 4183 | btv->std = V4L2_STD_PAL; |
4184 | init_irqreg(btv); | 4184 | init_irqreg(btv); |
4185 | v4l2_ctrl_handler_setup(hdl); | 4185 | if (!bttv_tvcards[btv->c.type].no_video) |
4186 | v4l2_ctrl_handler_setup(hdl); | ||
4186 | if (hdl->error) { | 4187 | if (hdl->error) { |
4187 | result = hdl->error; | 4188 | result = hdl->error; |
4188 | goto fail2; | 4189 | goto fail2; |
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index 2767c64df0c8..57f4688ea55b 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h | |||
@@ -262,7 +262,7 @@ struct cx18_options { | |||
262 | }; | 262 | }; |
263 | 263 | ||
264 | /* per-mdl bit flags */ | 264 | /* per-mdl bit flags */ |
265 | #define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */ | 265 | #define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */ |
266 | 266 | ||
267 | /* per-stream, s_flags */ | 267 | /* per-stream, s_flags */ |
268 | #define CX18_F_S_CLAIMED 3 /* this stream is claimed */ | 268 | #define CX18_F_S_CLAIMED 3 /* this stream is claimed */ |
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index e3fc2c71808a..95666eee7b27 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c | |||
@@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value) | |||
427 | cx_write(MC417_RWD, regval); | 427 | cx_write(MC417_RWD, regval); |
428 | 428 | ||
429 | /* Transition RD to effect read transaction across bus. | 429 | /* Transition RD to effect read transaction across bus. |
430 | * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? | 430 | * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? |
431 | * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its | 431 | * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its |
432 | * input only...) | 432 | * input only...) |
433 | */ | 433 | */ |
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c index 8164d74b46a4..655d6854a8d7 100644 --- a/drivers/media/pci/pluto2/pluto2.c +++ b/drivers/media/pci/pluto2/pluto2.c | |||
@@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto) | |||
401 | /* set automatic LED control by FPGA */ | 401 | /* set automatic LED control by FPGA */ |
402 | pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED); | 402 | pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED); |
403 | 403 | ||
404 | /* set data endianess */ | 404 | /* set data endianness */ |
405 | #ifdef __LITTLE_ENDIAN | 405 | #ifdef __LITTLE_ENDIAN |
406 | pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END); | 406 | pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END); |
407 | #else | 407 | #else |
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 57ef5456f1e8..1bf06970ca3e 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c | |||
@@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev, | |||
1354 | if (fw_debug) { | 1354 | if (fw_debug) { |
1355 | dev->kthread = kthread_run(saa7164_thread_function, dev, | 1355 | dev->kthread = kthread_run(saa7164_thread_function, dev, |
1356 | "saa7164 debug"); | 1356 | "saa7164 debug"); |
1357 | if (!dev->kthread) | 1357 | if (IS_ERR(dev->kthread)) { |
1358 | dev->kthread = NULL; | ||
1358 | printk(KERN_ERR "%s() Failed to create " | 1359 | printk(KERN_ERR "%s() Failed to create " |
1359 | "debug kernel thread\n", __func__); | 1360 | "debug kernel thread\n", __func__); |
1361 | } | ||
1360 | } | 1362 | } |
1361 | 1363 | ||
1362 | } /* != BOARD_UNKNOWN */ | 1364 | } /* != BOARD_UNKNOWN */ |
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index bd72fb97fea5..61f3dbcc259f 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c | |||
@@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb) | |||
1434 | if (q_data->fourcc == V4L2_PIX_FMT_H264 && | 1434 | if (q_data->fourcc == V4L2_PIX_FMT_H264 && |
1435 | vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { | 1435 | vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { |
1436 | /* | 1436 | /* |
1437 | * For backwards compatiblity, queuing an empty buffer marks | 1437 | * For backwards compatibility, queuing an empty buffer marks |
1438 | * the stream end | 1438 | * the stream end |
1439 | */ | 1439 | */ |
1440 | if (vb2_get_plane_payload(vb, 0) == 0) | 1440 | if (vb2_get_plane_payload(vb, 0) == 0) |
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index 3d66d88ea3a1..f7915695c907 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c | |||
@@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev) | |||
1039 | 1039 | ||
1040 | dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); | 1040 | dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); |
1041 | 1041 | ||
1042 | /* Enable clocks and perform basic initalization */ | 1042 | /* Enable clocks and perform basic initialization */ |
1043 | clk_enable(fimc->clock[CLK_GATE]); | 1043 | clk_enable(fimc->clock[CLK_GATE]); |
1044 | fimc_hw_reset(fimc); | 1044 | fimc_hw_reset(fimc); |
1045 | 1045 | ||
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 7a4ee4c0449d..c1bce170df6f 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c | |||
@@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd, | |||
759 | goto dev_unlock; | 759 | goto dev_unlock; |
760 | 760 | ||
761 | drvdata = dev_get_drvdata(dev); | 761 | drvdata = dev_get_drvdata(dev); |
762 | /* Some subdev didn't probe succesfully id drvdata is NULL */ | 762 | /* Some subdev didn't probe successfully id drvdata is NULL */ |
763 | if (drvdata) { | 763 | if (drvdata) { |
764 | switch (plat_entity) { | 764 | switch (plat_entity) { |
765 | case IDX_FIMC: | 765 | case IDX_FIMC: |
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index 3458fa0e2fd5..054507f16734 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c | |||
@@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam) | |||
142 | struct mmp_camera *cam = mcam_to_cam(mcam); | 142 | struct mmp_camera *cam = mcam_to_cam(mcam); |
143 | struct mmp_camera_platform_data *pdata; | 143 | struct mmp_camera_platform_data *pdata; |
144 | 144 | ||
145 | if (mcam->bus_type == V4L2_MBUS_CSI2) { | ||
146 | cam->mipi_clk = devm_clk_get(mcam->dev, "mipi"); | ||
147 | if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0)) | ||
148 | return PTR_ERR(cam->mipi_clk); | ||
149 | } | ||
150 | |||
151 | /* | 145 | /* |
152 | * Turn on power and clocks to the controller. | 146 | * Turn on power and clocks to the controller. |
153 | */ | 147 | */ |
@@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam) | |||
186 | gpio_set_value(pdata->sensor_power_gpio, 0); | 180 | gpio_set_value(pdata->sensor_power_gpio, 0); |
187 | gpio_set_value(pdata->sensor_reset_gpio, 0); | 181 | gpio_set_value(pdata->sensor_reset_gpio, 0); |
188 | 182 | ||
189 | if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) { | ||
190 | if (cam->mipi_clk) | ||
191 | devm_clk_put(mcam->dev, cam->mipi_clk); | ||
192 | cam->mipi_clk = NULL; | ||
193 | } | ||
194 | |||
195 | mcam_clk_disable(mcam); | 183 | mcam_clk_disable(mcam); |
196 | } | 184 | } |
197 | 185 | ||
@@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam) | |||
292 | return; | 280 | return; |
293 | 281 | ||
294 | /* get the escape clk, this is hard coded */ | 282 | /* get the escape clk, this is hard coded */ |
283 | clk_prepare_enable(cam->mipi_clk); | ||
295 | tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12; | 284 | tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12; |
296 | 285 | clk_disable_unprepare(cam->mipi_clk); | |
297 | /* | 286 | /* |
298 | * dphy[2] - CSI2_DPHY6: | 287 | * dphy[2] - CSI2_DPHY6: |
299 | * bit 0 ~ bit 7: CK Term Enable | 288 | * bit 0 ~ bit 7: CK Term Enable |
@@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data) | |||
325 | return IRQ_RETVAL(handled); | 314 | return IRQ_RETVAL(handled); |
326 | } | 315 | } |
327 | 316 | ||
328 | static void mcam_deinit_clk(struct mcam_camera *mcam) | ||
329 | { | ||
330 | unsigned int i; | ||
331 | |||
332 | for (i = 0; i < NR_MCAM_CLK; i++) { | ||
333 | if (!IS_ERR(mcam->clk[i])) { | ||
334 | if (mcam->clk[i]) | ||
335 | devm_clk_put(mcam->dev, mcam->clk[i]); | ||
336 | } | ||
337 | mcam->clk[i] = NULL; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static void mcam_init_clk(struct mcam_camera *mcam) | 317 | static void mcam_init_clk(struct mcam_camera *mcam) |
342 | { | 318 | { |
343 | unsigned int i; | 319 | unsigned int i; |
@@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev) | |||
371 | if (cam == NULL) | 347 | if (cam == NULL) |
372 | return -ENOMEM; | 348 | return -ENOMEM; |
373 | cam->pdev = pdev; | 349 | cam->pdev = pdev; |
374 | cam->mipi_clk = NULL; | ||
375 | INIT_LIST_HEAD(&cam->devlist); | 350 | INIT_LIST_HEAD(&cam->devlist); |
376 | 351 | ||
377 | mcam = &cam->mcam; | 352 | mcam = &cam->mcam; |
@@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev) | |||
387 | mcam->mclk_div = pdata->mclk_div; | 362 | mcam->mclk_div = pdata->mclk_div; |
388 | mcam->bus_type = pdata->bus_type; | 363 | mcam->bus_type = pdata->bus_type; |
389 | mcam->dphy = pdata->dphy; | 364 | mcam->dphy = pdata->dphy; |
365 | if (mcam->bus_type == V4L2_MBUS_CSI2) { | ||
366 | cam->mipi_clk = devm_clk_get(mcam->dev, "mipi"); | ||
367 | if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0)) | ||
368 | return PTR_ERR(cam->mipi_clk); | ||
369 | } | ||
390 | mcam->mipi_enabled = false; | 370 | mcam->mipi_enabled = false; |
391 | mcam->lane = pdata->lane; | 371 | mcam->lane = pdata->lane; |
392 | mcam->chip_id = MCAM_ARMADA610; | 372 | mcam->chip_id = MCAM_ARMADA610; |
@@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev) | |||
444 | */ | 424 | */ |
445 | ret = mmpcam_power_up(mcam); | 425 | ret = mmpcam_power_up(mcam); |
446 | if (ret) | 426 | if (ret) |
447 | goto out_deinit_clk; | 427 | return ret; |
448 | ret = mccic_register(mcam); | 428 | ret = mccic_register(mcam); |
449 | if (ret) | 429 | if (ret) |
450 | goto out_power_down; | 430 | goto out_power_down; |
@@ -469,8 +449,6 @@ out_unregister: | |||
469 | mccic_shutdown(mcam); | 449 | mccic_shutdown(mcam); |
470 | out_power_down: | 450 | out_power_down: |
471 | mmpcam_power_down(mcam); | 451 | mmpcam_power_down(mcam); |
472 | out_deinit_clk: | ||
473 | mcam_deinit_clk(mcam); | ||
474 | return ret; | 452 | return ret; |
475 | } | 453 | } |
476 | 454 | ||
@@ -478,18 +456,10 @@ out_deinit_clk: | |||
478 | static int mmpcam_remove(struct mmp_camera *cam) | 456 | static int mmpcam_remove(struct mmp_camera *cam) |
479 | { | 457 | { |
480 | struct mcam_camera *mcam = &cam->mcam; | 458 | struct mcam_camera *mcam = &cam->mcam; |
481 | struct mmp_camera_platform_data *pdata; | ||
482 | 459 | ||
483 | mmpcam_remove_device(cam); | 460 | mmpcam_remove_device(cam); |
484 | mccic_shutdown(mcam); | 461 | mccic_shutdown(mcam); |
485 | mmpcam_power_down(mcam); | 462 | mmpcam_power_down(mcam); |
486 | pdata = cam->pdev->dev.platform_data; | ||
487 | gpio_free(pdata->sensor_reset_gpio); | ||
488 | gpio_free(pdata->sensor_power_gpio); | ||
489 | mcam_deinit_clk(mcam); | ||
490 | iounmap(cam->power_regs); | ||
491 | iounmap(mcam->regs); | ||
492 | kfree(cam); | ||
493 | return 0; | 463 | return 0; |
494 | } | 464 | } |
495 | 465 | ||
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 1c3608039663..561bce8ffb1b 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c | |||
@@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp) | |||
1673 | * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in | 1673 | * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in |
1674 | * resume(), and the the pipelines are restarted in complete(). | 1674 | * resume(), and the the pipelines are restarted in complete(). |
1675 | * | 1675 | * |
1676 | * TODO: PM dependencies between the ISP and sensors are not modeled explicitly | 1676 | * TODO: PM dependencies between the ISP and sensors are not modelled explicitly |
1677 | * yet. | 1677 | * yet. |
1678 | */ | 1678 | */ |
1679 | static int isp_pm_prepare(struct device *dev) | 1679 | static int isp_pm_prepare(struct device *dev) |
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index a908d006f527..f6304bb074f5 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c | |||
@@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) | |||
339 | if (subdev == NULL) | 339 | if (subdev == NULL) |
340 | return -EINVAL; | 340 | return -EINVAL; |
341 | 341 | ||
342 | mutex_lock(&video->mutex); | ||
343 | |||
344 | fmt.pad = pad; | 342 | fmt.pad = pad; |
345 | fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; | 343 | fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; |
346 | ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); | ||
347 | if (ret == -ENOIOCTLCMD) | ||
348 | ret = -EINVAL; | ||
349 | 344 | ||
345 | mutex_lock(&video->mutex); | ||
346 | ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); | ||
350 | mutex_unlock(&video->mutex); | 347 | mutex_unlock(&video->mutex); |
351 | 348 | ||
352 | if (ret) | 349 | if (ret) |
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h index 9319e93599ae..6ccc3f8c122a 100644 --- a/drivers/media/platform/s5p-mfc/regs-mfc.h +++ b/drivers/media/platform/s5p-mfc/regs-mfc.h | |||
@@ -382,7 +382,7 @@ | |||
382 | #define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 | 382 | #define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 |
383 | #define S5P_FIMV_R2H_CMD_ERR_RET 32 | 383 | #define S5P_FIMV_R2H_CMD_ERR_RET 32 |
384 | 384 | ||
385 | /* Dummy definition for MFCv6 compatibilty */ | 385 | /* Dummy definition for MFCv6 compatibility */ |
386 | #define S5P_FIMV_CODEC_H264_MVC_DEC -1 | 386 | #define S5P_FIMV_CODEC_H264_MVC_DEC -1 |
387 | #define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1 | 387 | #define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1 |
388 | #define S5P_FIMV_MFC_RESET -1 | 388 | #define S5P_FIMV_MFC_RESET -1 |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 5f2c4ad6c2cb..e46067a57853 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c | |||
@@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) | |||
239 | frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); | 239 | frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); |
240 | 240 | ||
241 | /* Copy timestamp / timecode from decoded src to dst and set | 241 | /* Copy timestamp / timecode from decoded src to dst and set |
242 | appropraite flags */ | 242 | appropriate flags */ |
243 | src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); | 243 | src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); |
244 | list_for_each_entry(dst_buf, &ctx->dst_queue, list) { | 244 | list_for_each_entry(dst_buf, &ctx->dst_queue, list) { |
245 | if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { | 245 | if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { |
@@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, | |||
428 | case MFCINST_FINISHING: | 428 | case MFCINST_FINISHING: |
429 | case MFCINST_FINISHED: | 429 | case MFCINST_FINISHED: |
430 | case MFCINST_RUNNING: | 430 | case MFCINST_RUNNING: |
431 | /* It is higly probable that an error occured | 431 | /* It is highly probable that an error occurred |
432 | * while decoding a frame */ | 432 | * while decoding a frame */ |
433 | clear_work_bit(ctx); | 433 | clear_work_bit(ctx); |
434 | ctx->state = MFCINST_ERROR; | 434 | ctx->state = MFCINST_ERROR; |
@@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) | |||
611 | mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); | 611 | mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); |
612 | switch (reason) { | 612 | switch (reason) { |
613 | case S5P_MFC_R2H_CMD_ERR_RET: | 613 | case S5P_MFC_R2H_CMD_ERR_RET: |
614 | /* An error has occured */ | 614 | /* An error has occurred */ |
615 | if (ctx->state == MFCINST_RUNNING && | 615 | if (ctx->state == MFCINST_RUNNING && |
616 | s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= | 616 | s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= |
617 | dev->warn_start) | 617 | dev->warn_start) |
@@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file) | |||
840 | mutex_unlock(&dev->mfc_mutex); | 840 | mutex_unlock(&dev->mfc_mutex); |
841 | mfc_debug_leave(); | 841 | mfc_debug_leave(); |
842 | return ret; | 842 | return ret; |
843 | /* Deinit when failure occured */ | 843 | /* Deinit when failure occurred */ |
844 | err_queue_init: | 844 | err_queue_init: |
845 | if (dev->num_inst == 1) | 845 | if (dev->num_inst == 1) |
846 | s5p_mfc_deinit_hw(dev); | 846 | s5p_mfc_deinit_hw(dev); |
@@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file) | |||
881 | /* Mark context as idle */ | 881 | /* Mark context as idle */ |
882 | clear_work_bit_irqsave(ctx); | 882 | clear_work_bit_irqsave(ctx); |
883 | /* If instance was initialised then | 883 | /* If instance was initialised then |
884 | * return instance and free reosurces */ | 884 | * return instance and free resources */ |
885 | if (ctx->inst_no != MFC_NO_INSTANCE_SET) { | 885 | if (ctx->inst_no != MFC_NO_INSTANCE_SET) { |
886 | mfc_debug(2, "Has to free instance\n"); | 886 | mfc_debug(2, "Has to free instance\n"); |
887 | ctx->state = MFCINST_RETURN_INST; | 887 | ctx->state = MFCINST_RETURN_INST; |
888 | set_work_bit_irqsave(ctx); | 888 | set_work_bit_irqsave(ctx); |
889 | s5p_mfc_clean_ctx_int_flags(ctx); | 889 | s5p_mfc_clean_ctx_int_flags(ctx); |
890 | s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); | 890 | s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); |
891 | /* Wait until instance is returned or timeout occured */ | 891 | /* Wait until instance is returned or timeout occurred */ |
892 | if (s5p_mfc_wait_for_done_ctx | 892 | if (s5p_mfc_wait_for_done_ctx |
893 | (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { | 893 | (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { |
894 | s5p_mfc_clock_off(); | 894 | s5p_mfc_clock_off(); |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 7cab6849fb5b..2475a3c9a0a6 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c | |||
@@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) | |||
69 | 69 | ||
70 | } else { | 70 | } else { |
71 | /* In this case bank2 can point to the same address as bank1. | 71 | /* In this case bank2 can point to the same address as bank1. |
72 | * Firmware will always occupy the beggining of this area so it is | 72 | * Firmware will always occupy the beginning of this area so it is |
73 | * impossible having a video frame buffer with zero address. */ | 73 | * impossible having a video frame buffer with zero address. */ |
74 | dev->bank2 = dev->bank1; | 74 | dev->bank2 = dev->bank1; |
75 | } | 75 | } |
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h index 04e6490a45be..fb2acc53112a 100644 --- a/drivers/media/platform/s5p-tv/mixer.h +++ b/drivers/media/platform/s5p-tv/mixer.h | |||
@@ -65,7 +65,7 @@ struct mxr_format { | |||
65 | int num_subframes; | 65 | int num_subframes; |
66 | /** specifies to which subframe belong given plane */ | 66 | /** specifies to which subframe belong given plane */ |
67 | int plane2subframe[MXR_MAX_PLANES]; | 67 | int plane2subframe[MXR_MAX_PLANES]; |
68 | /** internal code, driver dependant */ | 68 | /** internal code, driver dependent */ |
69 | unsigned long cookie; | 69 | unsigned long cookie; |
70 | }; | 70 | }; |
71 | 71 | ||
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c index 641b1f071e06..81b97db111d8 100644 --- a/drivers/media/platform/s5p-tv/mixer_video.c +++ b/drivers/media/platform/s5p-tv/mixer_video.c | |||
@@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh, | |||
528 | mutex_lock(&mdev->mutex); | 528 | mutex_lock(&mdev->mutex); |
529 | 529 | ||
530 | /* timings change cannot be done while there is an entity | 530 | /* timings change cannot be done while there is an entity |
531 | * dependant on output configuration | 531 | * dependent on output configuration |
532 | */ | 532 | */ |
533 | if (mdev->n_output > 0) { | 533 | if (mdev->n_output > 0) { |
534 | mutex_unlock(&mdev->mutex); | 534 | mutex_unlock(&mdev->mutex); |
@@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm) | |||
585 | mutex_lock(&mdev->mutex); | 585 | mutex_lock(&mdev->mutex); |
586 | 586 | ||
587 | /* standard change cannot be done while there is an entity | 587 | /* standard change cannot be done while there is an entity |
588 | * dependant on output configuration | 588 | * dependent on output configuration |
589 | */ | 589 | */ |
590 | if (mdev->n_output > 0) { | 590 | if (mdev->n_output > 0) { |
591 | mutex_unlock(&mdev->mutex); | 591 | mutex_unlock(&mdev->mutex); |
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index 6769193c7c7b..74ce8b6b79fa 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c | |||
@@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd) | |||
1495 | if (ctrlclock & LCLK_EN) | 1495 | if (ctrlclock & LCLK_EN) |
1496 | CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); | 1496 | CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); |
1497 | 1497 | ||
1498 | /* select bus endianess */ | 1498 | /* select bus endianness */ |
1499 | xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); | 1499 | xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); |
1500 | fmt = xlate->host_fmt; | 1500 | fmt = xlate->host_fmt; |
1501 | 1501 | ||
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c index 1d3f11965196..2d4e73b45c5e 100644 --- a/drivers/media/platform/vivi.c +++ b/drivers/media/platform/vivi.c | |||
@@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i) | |||
1108 | return 0; | 1108 | return 0; |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | /* timeperframe is arbitrary and continous */ | 1111 | /* timeperframe is arbitrary and continuous */ |
1112 | static int vidioc_enum_frameintervals(struct file *file, void *priv, | 1112 | static int vidioc_enum_frameintervals(struct file *file, void *priv, |
1113 | struct v4l2_frmivalenum *fival) | 1113 | struct v4l2_frmivalenum *fival) |
1114 | { | 1114 | { |
@@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv, | |||
1125 | 1125 | ||
1126 | fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; | 1126 | fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; |
1127 | 1127 | ||
1128 | /* fill in stepwise (step=1.0 is requred by V4L2 spec) */ | 1128 | /* fill in stepwise (step=1.0 is required by V4L2 spec) */ |
1129 | fival->stepwise.min = tpf_min; | 1129 | fival->stepwise.min = tpf_min; |
1130 | fival->stepwise.max = tpf_max; | 1130 | fival->stepwise.max = tpf_max; |
1131 | fival->stepwise.step = (struct v4l2_fract) {1, 1}; | 1131 | fival->stepwise.step = (struct v4l2_fract) {1, 1}; |
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 1c9e771aa15c..d16bf0f41e24 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c | |||
@@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1) | |||
323 | * Increment the VSP1 reference count and initialize the device if the first | 323 | * Increment the VSP1 reference count and initialize the device if the first |
324 | * reference is taken. | 324 | * reference is taken. |
325 | * | 325 | * |
326 | * Return a pointer to the VSP1 device or NULL if an error occured. | 326 | * Return a pointer to the VSP1 device or NULL if an error occurred. |
327 | */ | 327 | */ |
328 | struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1) | 328 | struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1) |
329 | { | 329 | { |
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 714c53ef6c11..4b0ac07af662 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c | |||
@@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf) | |||
1026 | 1026 | ||
1027 | /* ... and the buffers queue... */ | 1027 | /* ... and the buffers queue... */ |
1028 | video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev); | 1028 | video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev); |
1029 | if (IS_ERR(video->alloc_ctx)) | 1029 | if (IS_ERR(video->alloc_ctx)) { |
1030 | ret = PTR_ERR(video->alloc_ctx); | ||
1030 | goto error; | 1031 | goto error; |
1032 | } | ||
1031 | 1033 | ||
1032 | video->queue.type = video->type; | 1034 | video->queue.type = video->type; |
1033 | video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; | 1035 | video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; |
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index 3db8a8cfe1a8..050b3bb96fec 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c | |||
@@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark) | |||
271 | cancel_work_sync(&shark->led_work); | 271 | cancel_work_sync(&shark->led_work); |
272 | } | 272 | } |
273 | 273 | ||
274 | #ifdef CONFIG_PM | 274 | static inline void shark_resume_leds(struct shark_device *shark) |
275 | static void shark_resume_leds(struct shark_device *shark) | ||
276 | { | 275 | { |
277 | if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) | 276 | if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) |
278 | set_bit(BLUE_PULSE_LED, &shark->brightness_new); | 277 | set_bit(BLUE_PULSE_LED, &shark->brightness_new); |
@@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark) | |||
281 | set_bit(RED_LED, &shark->brightness_new); | 280 | set_bit(RED_LED, &shark->brightness_new); |
282 | schedule_work(&shark->led_work); | 281 | schedule_work(&shark->led_work); |
283 | } | 282 | } |
284 | #endif | ||
285 | #else | 283 | #else |
286 | static int shark_register_leds(struct shark_device *shark, struct device *dev) | 284 | static int shark_register_leds(struct shark_device *shark, struct device *dev) |
287 | { | 285 | { |
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index d86d90dab8bf..8654e0dc5c95 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c | |||
@@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark) | |||
237 | cancel_work_sync(&shark->led_work); | 237 | cancel_work_sync(&shark->led_work); |
238 | } | 238 | } |
239 | 239 | ||
240 | #ifdef CONFIG_PM | 240 | static inline void shark_resume_leds(struct shark_device *shark) |
241 | static void shark_resume_leds(struct shark_device *shark) | ||
242 | { | 241 | { |
243 | int i; | 242 | int i; |
244 | 243 | ||
@@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark) | |||
247 | 246 | ||
248 | schedule_work(&shark->led_work); | 247 | schedule_work(&shark->led_work); |
249 | } | 248 | } |
250 | #endif | ||
251 | #else | 249 | #else |
252 | static int shark_register_leds(struct shark_device *shark, struct device *dev) | 250 | static int shark_register_leds(struct shark_device *shark, struct device *dev) |
253 | { | 251 | { |
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 9c9084cb99f7..2fd9009f8663 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c | |||
@@ -268,8 +268,8 @@ struct si476x_radio; | |||
268 | * | 268 | * |
269 | * @tune_freq: Tune chip to a specific frequency | 269 | * @tune_freq: Tune chip to a specific frequency |
270 | * @seek_start: Star station seeking | 270 | * @seek_start: Star station seeking |
271 | * @rsq_status: Get Recieved Signal Quality(RSQ) status | 271 | * @rsq_status: Get Received Signal Quality(RSQ) status |
272 | * @rds_blckcnt: Get recived RDS blocks count | 272 | * @rds_blckcnt: Get received RDS blocks count |
273 | * @phase_diversity: Change phase diversity mode of the tuner | 273 | * @phase_diversity: Change phase diversity mode of the tuner |
274 | * @phase_div_status: Get phase diversity mode status | 274 | * @phase_div_status: Get phase diversity mode status |
275 | * @acf_status: Get the status of Automatically Controlled | 275 | * @acf_status: Get the status of Automatically Controlled |
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 036e2f54f4db..3ed1f5669f79 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c | |||
@@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv, | |||
356 | So we keep it as-is. */ | 356 | So we keep it as-is. */ |
357 | return -EINVAL; | 357 | return -EINVAL; |
358 | } | 358 | } |
359 | clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); | 359 | freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); |
360 | tea5764_power_up(radio); | 360 | tea5764_power_up(radio); |
361 | tea5764_tune(radio, (freq * 125) / 2); | 361 | tea5764_tune(radio, (freq * 125) / 2); |
362 | return 0; | 362 | return 0; |
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 69e3245a58a0..a9319a24c7ef 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c | |||
@@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen | |||
112 | if (f->tuner != 0) | 112 | if (f->tuner != 0) |
113 | return -EINVAL; | 113 | return -EINVAL; |
114 | 114 | ||
115 | clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); | 115 | freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); |
116 | pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; | 116 | pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; |
117 | i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; | 117 | i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; |
118 | i2cmsg[1] = (pll >> 8) & 0xff; | 118 | i2cmsg[1] = (pll >> 8) & 0xff; |
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 72e3fa652481..f329485c6629 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c | |||
@@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) | |||
1370 | * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates | 1370 | * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates |
1371 | * 0x688301b7 and the right one 0x688481b7. All other keys generate | 1371 | * 0x688301b7 and the right one 0x688481b7. All other keys generate |
1372 | * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with | 1372 | * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with |
1373 | * reversed endianess. Extract direction from buffer, rotate endianess, | 1373 | * reversed endianness. Extract direction from buffer, rotate endianness, |
1374 | * adjust sign and feed the values into stabilize(). The resulting codes | 1374 | * adjust sign and feed the values into stabilize(). The resulting codes |
1375 | * will be 0x01008000, 0x01007F00, which match the newer devices. | 1375 | * will be 0x01008000, 0x01007F00, which match the newer devices. |
1376 | */ | 1376 | */ |
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 094484fac94c..a5d4f883d053 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c | |||
@@ -118,7 +118,7 @@ static int debug; | |||
118 | #define RR3_IR_IO_LENGTH_FUZZ 0x04 | 118 | #define RR3_IR_IO_LENGTH_FUZZ 0x04 |
119 | /* Timeout for end of signal detection */ | 119 | /* Timeout for end of signal detection */ |
120 | #define RR3_IR_IO_SIG_TIMEOUT 0x05 | 120 | #define RR3_IR_IO_SIG_TIMEOUT 0x05 |
121 | /* Minumum value for pause recognition. */ | 121 | /* Minimum value for pause recognition. */ |
122 | #define RR3_IR_IO_MIN_PAUSE 0x06 | 122 | #define RR3_IR_IO_MIN_PAUSE 0x06 |
123 | 123 | ||
124 | /* Clock freq. of EZ-USB chip */ | 124 | /* Clock freq. of EZ-USB chip */ |
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c index 2e1a02e360ff..20cca405bf45 100644 --- a/drivers/media/tuners/mt2063.c +++ b/drivers/media/tuners/mt2063.c | |||
@@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state, | |||
1195 | * DNC Output is selected, the other is always off) | 1195 | * DNC Output is selected, the other is always off) |
1196 | * | 1196 | * |
1197 | * @state: ptr to mt2063_state structure | 1197 | * @state: ptr to mt2063_state structure |
1198 | * @Mode: desired reciever delivery system | 1198 | * @Mode: desired receiver delivery system |
1199 | * | 1199 | * |
1200 | * Note: Register cache must be valid for it to work | 1200 | * Note: Register cache must be valid for it to work |
1201 | */ | 1201 | */ |
@@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe, | |||
2119 | 2119 | ||
2120 | /* | 2120 | /* |
2121 | * As defined on EN 300 429, the DVB-C roll-off factor is 0.15. | 2121 | * As defined on EN 300 429, the DVB-C roll-off factor is 0.15. |
2122 | * So, the amount of the needed bandwith is given by: | 2122 | * So, the amount of the needed bandwidth is given by: |
2123 | * Bw = Symbol_rate * (1 + 0.15) | 2123 | * Bw = Symbol_rate * (1 + 0.15) |
2124 | * As such, the maximum symbol rate supported by 6 MHz is given by: | 2124 | * As such, the maximum symbol rate supported by 6 MHz is given by: |
2125 | * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds | 2125 | * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds |
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h index 74dc46a71f64..7e4798783db7 100644 --- a/drivers/media/tuners/tuner-xc2028-types.h +++ b/drivers/media/tuners/tuner-xc2028-types.h | |||
@@ -119,7 +119,7 @@ | |||
119 | #define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B) | 119 | #define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B) |
120 | #define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B) | 120 | #define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B) |
121 | 121 | ||
122 | /* To preserve backward compatibilty, | 122 | /* To preserve backward compatibility, |
123 | (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported | 123 | (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported |
124 | */ | 124 | */ |
125 | 125 | ||
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index e9d017bea377..528cce958a82 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c | |||
@@ -1412,8 +1412,8 @@ err_v4l2: | |||
1412 | usb_set_intfdata(interface, NULL); | 1412 | usb_set_intfdata(interface, NULL); |
1413 | err_if: | 1413 | err_if: |
1414 | usb_put_dev(udev); | 1414 | usb_put_dev(udev); |
1415 | kfree(dev); | ||
1416 | clear_bit(dev->devno, &cx231xx_devused); | 1415 | clear_bit(dev->devno, &cx231xx_devused); |
1416 | kfree(dev); | ||
1417 | return retval; | 1417 | return retval; |
1418 | } | 1418 | } |
1419 | 1419 | ||
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index c8fcd78425bd..8f9b2cea88f0 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c | |||
@@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) | |||
131 | { | 131 | { |
132 | u8 wbuf[MAX_XFER_SIZE]; | 132 | u8 wbuf[MAX_XFER_SIZE]; |
133 | u8 mbox = (reg >> 16) & 0xff; | 133 | u8 mbox = (reg >> 16) & 0xff; |
134 | struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; | 134 | struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL }; |
135 | 135 | ||
136 | if (6 + len > sizeof(wbuf)) { | 136 | if (6 + len > sizeof(wbuf)) { |
137 | dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", | 137 | dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", |
@@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, | |||
238 | } else { | 238 | } else { |
239 | /* I2C */ | 239 | /* I2C */ |
240 | u8 buf[MAX_XFER_SIZE]; | 240 | u8 buf[MAX_XFER_SIZE]; |
241 | struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), | 241 | struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len, |
242 | buf, msg[1].len, msg[1].buf }; | 242 | buf, msg[1].len, msg[1].buf }; |
243 | 243 | ||
244 | if (5 + msg[0].len > sizeof(buf)) { | 244 | if (5 + msg[0].len > sizeof(buf)) { |
245 | dev_warn(&d->udev->dev, | 245 | dev_warn(&d->udev->dev, |
246 | "%s: i2c xfer: len=%d is too big!\n", | 246 | "%s: i2c xfer: len=%d is too big!\n", |
247 | KBUILD_MODNAME, msg[0].len); | 247 | KBUILD_MODNAME, msg[0].len); |
248 | return -EOPNOTSUPP; | 248 | ret = -EOPNOTSUPP; |
249 | goto unlock; | ||
249 | } | 250 | } |
250 | req.mbox |= ((msg[0].addr & 0x80) >> 3); | 251 | req.mbox |= ((msg[0].addr & 0x80) >> 3); |
251 | buf[0] = msg[1].len; | 252 | buf[0] = msg[1].len; |
@@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, | |||
274 | } else { | 275 | } else { |
275 | /* I2C */ | 276 | /* I2C */ |
276 | u8 buf[MAX_XFER_SIZE]; | 277 | u8 buf[MAX_XFER_SIZE]; |
277 | struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, | 278 | struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len, |
278 | 0, NULL }; | 279 | buf, 0, NULL }; |
279 | 280 | ||
280 | if (5 + msg[0].len > sizeof(buf)) { | 281 | if (5 + msg[0].len > sizeof(buf)) { |
281 | dev_warn(&d->udev->dev, | 282 | dev_warn(&d->udev->dev, |
282 | "%s: i2c xfer: len=%d is too big!\n", | 283 | "%s: i2c xfer: len=%d is too big!\n", |
283 | KBUILD_MODNAME, msg[0].len); | 284 | KBUILD_MODNAME, msg[0].len); |
284 | return -EOPNOTSUPP; | 285 | ret = -EOPNOTSUPP; |
286 | goto unlock; | ||
285 | } | 287 | } |
286 | req.mbox |= ((msg[0].addr & 0x80) >> 3); | 288 | req.mbox |= ((msg[0].addr & 0x80) >> 3); |
287 | buf[0] = msg[0].len; | 289 | buf[0] = msg[0].len; |
@@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, | |||
319 | ret = -EOPNOTSUPP; | 321 | ret = -EOPNOTSUPP; |
320 | } | 322 | } |
321 | 323 | ||
324 | unlock: | ||
322 | mutex_unlock(&d->i2c_mutex); | 325 | mutex_unlock(&d->i2c_mutex); |
323 | 326 | ||
324 | if (ret < 0) | 327 | if (ret < 0) |
@@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = { | |||
1534 | /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ | 1537 | /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ |
1535 | { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, | 1538 | { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, |
1536 | &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, | 1539 | &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, |
1540 | { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, | ||
1541 | &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, | ||
1537 | { } | 1542 | { } |
1538 | }; | 1543 | }; |
1539 | MODULE_DEVICE_TABLE(usb, af9035_id_table); | 1544 | MODULE_DEVICE_TABLE(usb, af9035_id_table); |
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index 2627553f7de1..08240e498451 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c | |||
@@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe) | |||
266 | struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; | 266 | struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; |
267 | int err; | 267 | int err; |
268 | 268 | ||
269 | /* exit if we didnt initialize the driver yet */ | 269 | /* exit if we didn't initialize the driver yet */ |
270 | if (!state->chip_id) { | 270 | if (!state->chip_id) { |
271 | mxl_debug("driver not yet initialized, exit."); | 271 | mxl_debug("driver not yet initialized, exit."); |
272 | goto fail; | 272 | goto fail; |
@@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe) | |||
322 | struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; | 322 | struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; |
323 | int err; | 323 | int err; |
324 | 324 | ||
325 | /* exit if we didnt initialize the driver yet */ | 325 | /* exit if we didn't initialize the driver yet */ |
326 | if (!state->chip_id) { | 326 | if (!state->chip_id) { |
327 | mxl_debug("driver not yet initialized, exit."); | 327 | mxl_debug("driver not yet initialized, exit."); |
328 | goto fail; | 328 | goto fail; |
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 40832a1aef6c..98d24aefb640 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c | |||
@@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, | |||
102 | if (rxlen > 62) { | 102 | if (rxlen > 62) { |
103 | err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", | 103 | err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", |
104 | device_addr); | 104 | device_addr); |
105 | txlen = 62; | 105 | rxlen = 62; |
106 | } | 106 | } |
107 | 107 | ||
108 | b[0] = I2C_SPEED_100KHZ_BIT; | 108 | b[0] = I2C_SPEED_100KHZ_BIT; |
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index fc5d60efd4ab..dd19c9ff76e0 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c | |||
@@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp) | |||
1664 | 1664 | ||
1665 | em28xx_videodbg("users=%d\n", dev->users); | 1665 | em28xx_videodbg("users=%d\n", dev->users); |
1666 | 1666 | ||
1667 | mutex_lock(&dev->lock); | ||
1668 | vb2_fop_release(filp); | 1667 | vb2_fop_release(filp); |
1668 | mutex_lock(&dev->lock); | ||
1669 | 1669 | ||
1670 | if (dev->users == 1) { | 1670 | if (dev->users == 1) { |
1671 | /* the device is already disconnect, | 1671 | /* the device is already disconnect, |
diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c index cb1e64ca59c9..cea8d7f51c3c 100644 --- a/drivers/media/usb/gspca/gl860/gl860.c +++ b/drivers/media/usb/gspca/gl860/gl860.c | |||
@@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, | |||
438 | s32 nToSkip = | 438 | s32 nToSkip = |
439 | sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); | 439 | sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); |
440 | 440 | ||
441 | /* Test only against 0202h, so endianess does not matter */ | 441 | /* Test only against 0202h, so endianness does not matter */ |
442 | switch (*(s16 *) data) { | 442 | switch (*(s16 *) data) { |
443 | case 0x0202: /* End of frame, start a new one */ | 443 | case 0x0202: /* End of frame, start a new one */ |
444 | gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); | 444 | gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); |
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c index cd79c180f67b..07529e5a0c56 100644 --- a/drivers/media/usb/gspca/pac207.c +++ b/drivers/media/usb/gspca/pac207.c | |||
@@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, | |||
416 | #if IS_ENABLED(CONFIG_INPUT) | 416 | #if IS_ENABLED(CONFIG_INPUT) |
417 | static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, | 417 | static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, |
418 | u8 *data, /* interrupt packet data */ | 418 | u8 *data, /* interrupt packet data */ |
419 | int len) /* interrput packet length */ | 419 | int len) /* interrupt packet length */ |
420 | { | 420 | { |
421 | int ret = -EINVAL; | 421 | int ret = -EINVAL; |
422 | 422 | ||
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c index a91509643563..2fd1c5e31a0f 100644 --- a/drivers/media/usb/gspca/pac7302.c +++ b/drivers/media/usb/gspca/pac7302.c | |||
@@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev, | |||
874 | #if IS_ENABLED(CONFIG_INPUT) | 874 | #if IS_ENABLED(CONFIG_INPUT) |
875 | static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, | 875 | static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, |
876 | u8 *data, /* interrupt packet data */ | 876 | u8 *data, /* interrupt packet data */ |
877 | int len) /* interrput packet length */ | 877 | int len) /* interrupt packet length */ |
878 | { | 878 | { |
879 | int ret = -EINVAL; | 879 | int ret = -EINVAL; |
880 | u8 data0, data1; | 880 | u8 data0, data1; |
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c index 1fc80af2a189..48234c9a8b6c 100644 --- a/drivers/media/usb/gspca/stk1135.c +++ b/drivers/media/usb/gspca/stk1135.c | |||
@@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev) | |||
361 | 361 | ||
362 | /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ | 362 | /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ |
363 | reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); | 363 | reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); |
364 | |||
365 | /* wait a while for sensor to catch up */ | ||
366 | udelay(1000); | ||
364 | } | 367 | } |
365 | 368 | ||
366 | static void stk1135_camera_disable(struct gspca_dev *gspca_dev) | 369 | static void stk1135_camera_disable(struct gspca_dev *gspca_dev) |
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c index 9c0827631b9c..7f94ec74282e 100644 --- a/drivers/media/usb/gspca/stv0680.c +++ b/drivers/media/usb/gspca/stv0680.c | |||
@@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev, | |||
139 | struct sd *sd = (struct sd *) gspca_dev; | 139 | struct sd *sd = (struct sd *) gspca_dev; |
140 | struct cam *cam = &gspca_dev->cam; | 140 | struct cam *cam = &gspca_dev->cam; |
141 | 141 | ||
142 | /* Give the camera some time to settle, otherwise initalization will | 142 | /* Give the camera some time to settle, otherwise initialization will |
143 | fail on hotplug, and yes it really needs a full second. */ | 143 | fail on hotplug, and yes it really needs a full second. */ |
144 | msleep(1000); | 144 | msleep(1000); |
145 | 145 | ||
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index a517d185febe..46c9f2229a18 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c | |||
@@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = { | |||
1027 | {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)}, | 1027 | {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)}, |
1028 | {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)}, | 1028 | {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)}, |
1029 | {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)}, | 1029 | {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)}, |
1030 | {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)}, | ||
1030 | {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)}, | 1031 | {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)}, |
1031 | {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)}, | 1032 | {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)}, |
1032 | {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)}, | 1033 | {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)}, |
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c index 7b95d8e88a20..d3e1b6d8bf49 100644 --- a/drivers/media/usb/gspca/zc3xx.c +++ b/drivers/media/usb/gspca/zc3xx.c | |||
@@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, | |||
6905 | #if IS_ENABLED(CONFIG_INPUT) | 6905 | #if IS_ENABLED(CONFIG_INPUT) |
6906 | static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, | 6906 | static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, |
6907 | u8 *data, /* interrupt packet data */ | 6907 | u8 *data, /* interrupt packet data */ |
6908 | int len) /* interrput packet length */ | 6908 | int len) /* interrupt packet length */ |
6909 | { | 6909 | { |
6910 | if (len == 8 && data[4] == 1) { | 6910 | if (len == 8 && data[4] == 1) { |
6911 | input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); | 6911 | input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); |
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 77bbf7889659..78c9bc8e7f56 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c | |||
@@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id | |||
1039 | /* Set the leds off */ | 1039 | /* Set the leds off */ |
1040 | pwc_set_leds(pdev, 0, 0); | 1040 | pwc_set_leds(pdev, 0, 0); |
1041 | 1041 | ||
1042 | /* Setup intial videomode */ | 1042 | /* Setup initial videomode */ |
1043 | rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, | 1043 | rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, |
1044 | V4L2_PIX_FMT_YUV420, 30, &compression, 1); | 1044 | V4L2_PIX_FMT_YUV420, 30, &compression, 1); |
1045 | if (rc) | 1045 | if (rc) |
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index 8a505a90d318..6222a4ab1e00 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c | |||
@@ -50,13 +50,8 @@ | |||
50 | #define USBTV_ISOC_TRANSFERS 16 | 50 | #define USBTV_ISOC_TRANSFERS 16 |
51 | #define USBTV_ISOC_PACKETS 8 | 51 | #define USBTV_ISOC_PACKETS 8 |
52 | 52 | ||
53 | #define USBTV_WIDTH 720 | ||
54 | #define USBTV_HEIGHT 480 | ||
55 | |||
56 | #define USBTV_CHUNK_SIZE 256 | 53 | #define USBTV_CHUNK_SIZE 256 |
57 | #define USBTV_CHUNK 240 | 54 | #define USBTV_CHUNK 240 |
58 | #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ | ||
59 | / 4 / USBTV_CHUNK) | ||
60 | 55 | ||
61 | /* Chunk header. */ | 56 | /* Chunk header. */ |
62 | #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ | 57 | #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ |
@@ -65,6 +60,27 @@ | |||
65 | #define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) | 60 | #define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) |
66 | #define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) | 61 | #define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) |
67 | 62 | ||
63 | #define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL) | ||
64 | |||
65 | /* parameters for supported TV norms */ | ||
66 | struct usbtv_norm_params { | ||
67 | v4l2_std_id norm; | ||
68 | int cap_width, cap_height; | ||
69 | }; | ||
70 | |||
71 | static struct usbtv_norm_params norm_params[] = { | ||
72 | { | ||
73 | .norm = V4L2_STD_525_60, | ||
74 | .cap_width = 720, | ||
75 | .cap_height = 480, | ||
76 | }, | ||
77 | { | ||
78 | .norm = V4L2_STD_PAL, | ||
79 | .cap_width = 720, | ||
80 | .cap_height = 576, | ||
81 | } | ||
82 | }; | ||
83 | |||
68 | /* A single videobuf2 frame buffer. */ | 84 | /* A single videobuf2 frame buffer. */ |
69 | struct usbtv_buf { | 85 | struct usbtv_buf { |
70 | struct vb2_buffer vb; | 86 | struct vb2_buffer vb; |
@@ -94,11 +110,38 @@ struct usbtv { | |||
94 | USBTV_COMPOSITE_INPUT, | 110 | USBTV_COMPOSITE_INPUT, |
95 | USBTV_SVIDEO_INPUT, | 111 | USBTV_SVIDEO_INPUT, |
96 | } input; | 112 | } input; |
113 | v4l2_std_id norm; | ||
114 | int width, height; | ||
115 | int n_chunks; | ||
97 | int iso_size; | 116 | int iso_size; |
98 | unsigned int sequence; | 117 | unsigned int sequence; |
99 | struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; | 118 | struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; |
100 | }; | 119 | }; |
101 | 120 | ||
121 | static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm) | ||
122 | { | ||
123 | int i, ret = 0; | ||
124 | struct usbtv_norm_params *params = NULL; | ||
125 | |||
126 | for (i = 0; i < ARRAY_SIZE(norm_params); i++) { | ||
127 | if (norm_params[i].norm & norm) { | ||
128 | params = &norm_params[i]; | ||
129 | break; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | if (params) { | ||
134 | usbtv->width = params->cap_width; | ||
135 | usbtv->height = params->cap_height; | ||
136 | usbtv->n_chunks = usbtv->width * usbtv->height | ||
137 | / 4 / USBTV_CHUNK; | ||
138 | usbtv->norm = params->norm; | ||
139 | } else | ||
140 | ret = -EINVAL; | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
102 | static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) | 145 | static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) |
103 | { | 146 | { |
104 | int ret; | 147 | int ret; |
@@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input) | |||
158 | return ret; | 201 | return ret; |
159 | } | 202 | } |
160 | 203 | ||
204 | static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm) | ||
205 | { | ||
206 | int ret; | ||
207 | static const u16 pal[][2] = { | ||
208 | { USBTV_BASE + 0x001a, 0x0068 }, | ||
209 | { USBTV_BASE + 0x010e, 0x0072 }, | ||
210 | { USBTV_BASE + 0x010f, 0x00a2 }, | ||
211 | { USBTV_BASE + 0x0112, 0x00b0 }, | ||
212 | { USBTV_BASE + 0x0117, 0x0001 }, | ||
213 | { USBTV_BASE + 0x0118, 0x002c }, | ||
214 | { USBTV_BASE + 0x012d, 0x0010 }, | ||
215 | { USBTV_BASE + 0x012f, 0x0020 }, | ||
216 | { USBTV_BASE + 0x024f, 0x0002 }, | ||
217 | { USBTV_BASE + 0x0254, 0x0059 }, | ||
218 | { USBTV_BASE + 0x025a, 0x0016 }, | ||
219 | { USBTV_BASE + 0x025b, 0x0035 }, | ||
220 | { USBTV_BASE + 0x0263, 0x0017 }, | ||
221 | { USBTV_BASE + 0x0266, 0x0016 }, | ||
222 | { USBTV_BASE + 0x0267, 0x0036 } | ||
223 | }; | ||
224 | |||
225 | static const u16 ntsc[][2] = { | ||
226 | { USBTV_BASE + 0x001a, 0x0079 }, | ||
227 | { USBTV_BASE + 0x010e, 0x0068 }, | ||
228 | { USBTV_BASE + 0x010f, 0x009c }, | ||
229 | { USBTV_BASE + 0x0112, 0x00f0 }, | ||
230 | { USBTV_BASE + 0x0117, 0x0000 }, | ||
231 | { USBTV_BASE + 0x0118, 0x00fc }, | ||
232 | { USBTV_BASE + 0x012d, 0x0004 }, | ||
233 | { USBTV_BASE + 0x012f, 0x0008 }, | ||
234 | { USBTV_BASE + 0x024f, 0x0001 }, | ||
235 | { USBTV_BASE + 0x0254, 0x005f }, | ||
236 | { USBTV_BASE + 0x025a, 0x0012 }, | ||
237 | { USBTV_BASE + 0x025b, 0x0001 }, | ||
238 | { USBTV_BASE + 0x0263, 0x001c }, | ||
239 | { USBTV_BASE + 0x0266, 0x0011 }, | ||
240 | { USBTV_BASE + 0x0267, 0x0005 } | ||
241 | }; | ||
242 | |||
243 | ret = usbtv_configure_for_norm(usbtv, norm); | ||
244 | |||
245 | if (!ret) { | ||
246 | if (norm & V4L2_STD_525_60) | ||
247 | ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc)); | ||
248 | else if (norm & V4L2_STD_PAL) | ||
249 | ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal)); | ||
250 | } | ||
251 | |||
252 | return ret; | ||
253 | } | ||
254 | |||
161 | static int usbtv_setup_capture(struct usbtv *usbtv) | 255 | static int usbtv_setup_capture(struct usbtv *usbtv) |
162 | { | 256 | { |
163 | int ret; | 257 | int ret; |
@@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv) | |||
225 | 319 | ||
226 | { USBTV_BASE + 0x0284, 0x0088 }, | 320 | { USBTV_BASE + 0x0284, 0x0088 }, |
227 | { USBTV_BASE + 0x0003, 0x0004 }, | 321 | { USBTV_BASE + 0x0003, 0x0004 }, |
228 | { USBTV_BASE + 0x001a, 0x0079 }, | ||
229 | { USBTV_BASE + 0x0100, 0x00d3 }, | 322 | { USBTV_BASE + 0x0100, 0x00d3 }, |
230 | { USBTV_BASE + 0x010e, 0x0068 }, | ||
231 | { USBTV_BASE + 0x010f, 0x009c }, | ||
232 | { USBTV_BASE + 0x0112, 0x00f0 }, | ||
233 | { USBTV_BASE + 0x0115, 0x0015 }, | 323 | { USBTV_BASE + 0x0115, 0x0015 }, |
234 | { USBTV_BASE + 0x0117, 0x0000 }, | ||
235 | { USBTV_BASE + 0x0118, 0x00fc }, | ||
236 | { USBTV_BASE + 0x012d, 0x0004 }, | ||
237 | { USBTV_BASE + 0x012f, 0x0008 }, | ||
238 | { USBTV_BASE + 0x0220, 0x002e }, | 324 | { USBTV_BASE + 0x0220, 0x002e }, |
239 | { USBTV_BASE + 0x0225, 0x0008 }, | 325 | { USBTV_BASE + 0x0225, 0x0008 }, |
240 | { USBTV_BASE + 0x024e, 0x0002 }, | 326 | { USBTV_BASE + 0x024e, 0x0002 }, |
241 | { USBTV_BASE + 0x024f, 0x0001 }, | ||
242 | { USBTV_BASE + 0x0254, 0x005f }, | ||
243 | { USBTV_BASE + 0x025a, 0x0012 }, | ||
244 | { USBTV_BASE + 0x025b, 0x0001 }, | ||
245 | { USBTV_BASE + 0x0263, 0x001c }, | ||
246 | { USBTV_BASE + 0x0266, 0x0011 }, | ||
247 | { USBTV_BASE + 0x0267, 0x0005 }, | ||
248 | { USBTV_BASE + 0x024e, 0x0002 }, | 327 | { USBTV_BASE + 0x024e, 0x0002 }, |
249 | { USBTV_BASE + 0x024f, 0x0002 }, | 328 | { USBTV_BASE + 0x024f, 0x0002 }, |
250 | }; | 329 | }; |
@@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv) | |||
253 | if (ret) | 332 | if (ret) |
254 | return ret; | 333 | return ret; |
255 | 334 | ||
335 | ret = usbtv_select_norm(usbtv, usbtv->norm); | ||
336 | if (ret) | ||
337 | return ret; | ||
338 | |||
256 | ret = usbtv_select_input(usbtv, usbtv->input); | 339 | ret = usbtv_select_input(usbtv, usbtv->input); |
257 | if (ret) | 340 | if (ret) |
258 | return ret; | 341 | return ret; |
@@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) | |||
296 | frame_id = USBTV_FRAME_ID(chunk); | 379 | frame_id = USBTV_FRAME_ID(chunk); |
297 | odd = USBTV_ODD(chunk); | 380 | odd = USBTV_ODD(chunk); |
298 | chunk_no = USBTV_CHUNK_NO(chunk); | 381 | chunk_no = USBTV_CHUNK_NO(chunk); |
299 | if (chunk_no >= USBTV_CHUNKS) | 382 | if (chunk_no >= usbtv->n_chunks) |
300 | return; | 383 | return; |
301 | 384 | ||
302 | /* Beginning of a frame. */ | 385 | /* Beginning of a frame. */ |
@@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) | |||
324 | usbtv->chunks_done++; | 407 | usbtv->chunks_done++; |
325 | 408 | ||
326 | /* Last chunk in a frame, signalling an end */ | 409 | /* Last chunk in a frame, signalling an end */ |
327 | if (odd && chunk_no == USBTV_CHUNKS-1) { | 410 | if (odd && chunk_no == usbtv->n_chunks-1) { |
328 | int size = vb2_plane_size(&buf->vb, 0); | 411 | int size = vb2_plane_size(&buf->vb, 0); |
329 | enum vb2_buffer_state state = usbtv->chunks_done == | 412 | enum vb2_buffer_state state = usbtv->chunks_done == |
330 | USBTV_CHUNKS ? | 413 | usbtv->n_chunks ? |
331 | VB2_BUF_STATE_DONE : | 414 | VB2_BUF_STATE_DONE : |
332 | VB2_BUF_STATE_ERROR; | 415 | VB2_BUF_STATE_ERROR; |
333 | 416 | ||
@@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv, | |||
500 | static int usbtv_enum_input(struct file *file, void *priv, | 583 | static int usbtv_enum_input(struct file *file, void *priv, |
501 | struct v4l2_input *i) | 584 | struct v4l2_input *i) |
502 | { | 585 | { |
586 | struct usbtv *dev = video_drvdata(file); | ||
587 | |||
503 | switch (i->index) { | 588 | switch (i->index) { |
504 | case USBTV_COMPOSITE_INPUT: | 589 | case USBTV_COMPOSITE_INPUT: |
505 | strlcpy(i->name, "Composite", sizeof(i->name)); | 590 | strlcpy(i->name, "Composite", sizeof(i->name)); |
@@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv, | |||
512 | } | 597 | } |
513 | 598 | ||
514 | i->type = V4L2_INPUT_TYPE_CAMERA; | 599 | i->type = V4L2_INPUT_TYPE_CAMERA; |
515 | i->std = V4L2_STD_525_60; | 600 | i->std = dev->vdev.tvnorms; |
516 | return 0; | 601 | return 0; |
517 | } | 602 | } |
518 | 603 | ||
@@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv, | |||
531 | static int usbtv_fmt_vid_cap(struct file *file, void *priv, | 616 | static int usbtv_fmt_vid_cap(struct file *file, void *priv, |
532 | struct v4l2_format *f) | 617 | struct v4l2_format *f) |
533 | { | 618 | { |
534 | f->fmt.pix.width = USBTV_WIDTH; | 619 | struct usbtv *usbtv = video_drvdata(file); |
535 | f->fmt.pix.height = USBTV_HEIGHT; | 620 | |
621 | f->fmt.pix.width = usbtv->width; | ||
622 | f->fmt.pix.height = usbtv->height; | ||
536 | f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; | 623 | f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; |
537 | f->fmt.pix.field = V4L2_FIELD_INTERLACED; | 624 | f->fmt.pix.field = V4L2_FIELD_INTERLACED; |
538 | f->fmt.pix.bytesperline = USBTV_WIDTH * 2; | 625 | f->fmt.pix.bytesperline = usbtv->width * 2; |
539 | f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height); | 626 | f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height); |
540 | f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; | 627 | f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; |
541 | f->fmt.pix.priv = 0; | 628 | |
542 | return 0; | 629 | return 0; |
543 | } | 630 | } |
544 | 631 | ||
545 | static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm) | 632 | static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm) |
546 | { | 633 | { |
547 | *norm = V4L2_STD_525_60; | 634 | struct usbtv *usbtv = video_drvdata(file); |
635 | *norm = usbtv->norm; | ||
548 | return 0; | 636 | return 0; |
549 | } | 637 | } |
550 | 638 | ||
639 | static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm) | ||
640 | { | ||
641 | int ret = -EINVAL; | ||
642 | struct usbtv *usbtv = video_drvdata(file); | ||
643 | |||
644 | if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL)) | ||
645 | ret = usbtv_select_norm(usbtv, norm); | ||
646 | |||
647 | return ret; | ||
648 | } | ||
649 | |||
551 | static int usbtv_g_input(struct file *file, void *priv, unsigned int *i) | 650 | static int usbtv_g_input(struct file *file, void *priv, unsigned int *i) |
552 | { | 651 | { |
553 | struct usbtv *usbtv = video_drvdata(file); | 652 | struct usbtv *usbtv = video_drvdata(file); |
@@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i) | |||
561 | return usbtv_select_input(usbtv, i); | 660 | return usbtv_select_input(usbtv, i); |
562 | } | 661 | } |
563 | 662 | ||
564 | static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm) | ||
565 | { | ||
566 | if (norm & V4L2_STD_525_60) | ||
567 | return 0; | ||
568 | return -EINVAL; | ||
569 | } | ||
570 | |||
571 | struct v4l2_ioctl_ops usbtv_ioctl_ops = { | 663 | struct v4l2_ioctl_ops usbtv_ioctl_ops = { |
572 | .vidioc_querycap = usbtv_querycap, | 664 | .vidioc_querycap = usbtv_querycap, |
573 | .vidioc_enum_input = usbtv_enum_input, | 665 | .vidioc_enum_input = usbtv_enum_input, |
@@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq, | |||
604 | const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, | 696 | const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, |
605 | unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) | 697 | unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) |
606 | { | 698 | { |
699 | struct usbtv *usbtv = vb2_get_drv_priv(vq); | ||
700 | |||
607 | if (*nbuffers < 2) | 701 | if (*nbuffers < 2) |
608 | *nbuffers = 2; | 702 | *nbuffers = 2; |
609 | *nplanes = 1; | 703 | *nplanes = 1; |
610 | sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); | 704 | sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32); |
611 | 705 | ||
612 | return 0; | 706 | return 0; |
613 | } | 707 | } |
@@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf, | |||
690 | return -ENOMEM; | 784 | return -ENOMEM; |
691 | usbtv->dev = dev; | 785 | usbtv->dev = dev; |
692 | usbtv->udev = usb_get_dev(interface_to_usbdev(intf)); | 786 | usbtv->udev = usb_get_dev(interface_to_usbdev(intf)); |
787 | |||
693 | usbtv->iso_size = size; | 788 | usbtv->iso_size = size; |
789 | |||
790 | (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60); | ||
791 | |||
694 | spin_lock_init(&usbtv->buflock); | 792 | spin_lock_init(&usbtv->buflock); |
695 | mutex_init(&usbtv->v4l2_lock); | 793 | mutex_init(&usbtv->v4l2_lock); |
696 | mutex_init(&usbtv->vb2q_lock); | 794 | mutex_init(&usbtv->vb2q_lock); |
@@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf, | |||
727 | usbtv->vdev.release = video_device_release_empty; | 825 | usbtv->vdev.release = video_device_release_empty; |
728 | usbtv->vdev.fops = &usbtv_fops; | 826 | usbtv->vdev.fops = &usbtv_fops; |
729 | usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops; | 827 | usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops; |
730 | usbtv->vdev.tvnorms = V4L2_STD_525_60; | 828 | usbtv->vdev.tvnorms = USBTV_TV_STD; |
731 | usbtv->vdev.queue = &usbtv->vb2q; | 829 | usbtv->vdev.queue = &usbtv->vb2q; |
732 | usbtv->vdev.lock = &usbtv->v4l2_lock; | 830 | usbtv->vdev.lock = &usbtv->v4l2_lock; |
733 | set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags); | 831 | set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags); |
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 899cb6d1c4a4..898c208889cd 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c | |||
@@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample) | |||
556 | * | 556 | * |
557 | * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) | 557 | * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) |
558 | * | 558 | * |
559 | * to avoid loosing precision in the division. Similarly, the host timestamp is | 559 | * to avoid losing precision in the division. Similarly, the host timestamp is |
560 | * computed with | 560 | * computed with |
561 | * | 561 | * |
562 | * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) | 562 | * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) |
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 60dcc0f3b32e..fb46790d0eca 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c | |||
@@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id) | |||
420 | "Advanced Simple", | 420 | "Advanced Simple", |
421 | "Core", | 421 | "Core", |
422 | "Simple Scalable", | 422 | "Simple Scalable", |
423 | "Advanced Coding Efficency", | 423 | "Advanced Coding Efficiency", |
424 | NULL, | 424 | NULL, |
425 | }; | 425 | }; |
426 | 426 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index b19b306c8f7f..0edc165f418d 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * __setup_lengths() - setup initial lengths for every plane in | ||
149 | * every buffer on the queue | ||
150 | */ | ||
151 | static void __setup_lengths(struct vb2_queue *q, unsigned int n) | ||
152 | { | ||
153 | unsigned int buffer, plane; | ||
154 | struct vb2_buffer *vb; | ||
155 | |||
156 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { | ||
157 | vb = q->bufs[buffer]; | ||
158 | if (!vb) | ||
159 | continue; | ||
160 | |||
161 | for (plane = 0; plane < vb->num_planes; ++plane) | ||
162 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /** | ||
148 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in | 167 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in |
149 | * every buffer on the queue | 168 | * every buffer on the queue |
150 | */ | 169 | */ |
@@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n) | |||
169 | continue; | 188 | continue; |
170 | 189 | ||
171 | for (plane = 0; plane < vb->num_planes; ++plane) { | 190 | for (plane = 0; plane < vb->num_planes; ++plane) { |
172 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | ||
173 | vb->v4l2_planes[plane].m.mem_offset = off; | 191 | vb->v4l2_planes[plane].m.mem_offset = off; |
174 | 192 | ||
175 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", | 193 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", |
@@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, | |||
241 | q->bufs[q->num_buffers + buffer] = vb; | 259 | q->bufs[q->num_buffers + buffer] = vb; |
242 | } | 260 | } |
243 | 261 | ||
262 | __setup_lengths(q, buffer); | ||
244 | if (memory == V4L2_MEMORY_MMAP) | 263 | if (memory == V4L2_MEMORY_MMAP) |
245 | __setup_offsets(q, buffer); | 264 | __setup_offsets(q, buffer); |
246 | 265 | ||
@@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) | |||
1824 | return -EINVAL; | 1843 | return -EINVAL; |
1825 | } | 1844 | } |
1826 | 1845 | ||
1827 | if (eb->flags & ~O_CLOEXEC) { | 1846 | if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { |
1828 | dprintk(1, "Queue does support only O_CLOEXEC flag\n"); | 1847 | dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n"); |
1829 | return -EINVAL; | 1848 | return -EINVAL; |
1830 | } | 1849 | } |
1831 | 1850 | ||
@@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) | |||
1848 | 1867 | ||
1849 | vb_plane = &vb->planes[eb->plane]; | 1868 | vb_plane = &vb->planes[eb->plane]; |
1850 | 1869 | ||
1851 | dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); | 1870 | dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); |
1852 | if (IS_ERR_OR_NULL(dbuf)) { | 1871 | if (IS_ERR_OR_NULL(dbuf)) { |
1853 | dprintk(1, "Failed to export buffer %d, plane %d\n", | 1872 | dprintk(1, "Failed to export buffer %d, plane %d\n", |
1854 | eb->index, eb->plane); | 1873 | eb->index, eb->plane); |
1855 | return -EINVAL; | 1874 | return -EINVAL; |
1856 | } | 1875 | } |
1857 | 1876 | ||
1858 | ret = dma_buf_fd(dbuf, eb->flags); | 1877 | ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); |
1859 | if (ret < 0) { | 1878 | if (ret < 0) { |
1860 | dprintk(3, "buffer %d, plane %d failed to export (%d)\n", | 1879 | dprintk(3, "buffer %d, plane %d failed to export (%d)\n", |
1861 | eb->index, eb->plane, ret); | 1880 | eb->index, eb->plane, ret); |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 646f08f4f504..33d3871d1e13 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) | |||
393 | return sgt; | 393 | return sgt; |
394 | } | 394 | } |
395 | 395 | ||
396 | static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) | 396 | static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) |
397 | { | 397 | { |
398 | struct vb2_dc_buf *buf = buf_priv; | 398 | struct vb2_dc_buf *buf = buf_priv; |
399 | struct dma_buf *dbuf; | 399 | struct dma_buf *dbuf; |
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) | |||
404 | if (WARN_ON(!buf->sgt_base)) | 404 | if (WARN_ON(!buf->sgt_base)) |
405 | return NULL; | 405 | return NULL; |
406 | 406 | ||
407 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); | 407 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags); |
408 | if (IS_ERR(dbuf)) | 408 | if (IS_ERR(dbuf)) |
409 | return NULL; | 409 | return NULL; |
410 | 410 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 2f860543912c..0d3a8ffe47a3 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
178 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), | 178 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), |
179 | GFP_KERNEL); | 179 | GFP_KERNEL); |
180 | if (!buf->pages) | 180 | if (!buf->pages) |
181 | return NULL; | 181 | goto userptr_fail_alloc_pages; |
182 | 182 | ||
183 | num_pages_from_user = get_user_pages(current, current->mm, | 183 | num_pages_from_user = get_user_pages(current, current->mm, |
184 | vaddr & PAGE_MASK, | 184 | vaddr & PAGE_MASK, |
@@ -204,6 +204,7 @@ userptr_fail_get_user_pages: | |||
204 | while (--num_pages_from_user >= 0) | 204 | while (--num_pages_from_user >= 0) |
205 | put_page(buf->pages[num_pages_from_user]); | 205 | put_page(buf->pages[num_pages_from_user]); |
206 | kfree(buf->pages); | 206 | kfree(buf->pages); |
207 | userptr_fail_alloc_pages: | ||
207 | kfree(buf); | 208 | kfree(buf); |
208 | return NULL; | 209 | return NULL; |
209 | } | 210 | } |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 62a60caa5d1f..dd671582c9a1 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -32,7 +32,7 @@ config MFD_AS3722 | |||
32 | select MFD_CORE | 32 | select MFD_CORE |
33 | select REGMAP_I2C | 33 | select REGMAP_I2C |
34 | select REGMAP_IRQ | 34 | select REGMAP_IRQ |
35 | depends on I2C && OF | 35 | depends on I2C=y && OF |
36 | help | 36 | help |
37 | The ams AS3722 is a compact system PMU suitable for mobile phones, | 37 | The ams AS3722 is a compact system PMU suitable for mobile phones, |
38 | tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down | 38 | tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down |
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index da1c6566d93d..37edf9e989b0 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c | |||
@@ -506,7 +506,7 @@ static struct lpc_ich_info lpc_chipset_info[] = { | |||
506 | .iTCO_version = 2, | 506 | .iTCO_version = 2, |
507 | }, | 507 | }, |
508 | [LPC_WPT_LP] = { | 508 | [LPC_WPT_LP] = { |
509 | .name = "Lynx Point_LP", | 509 | .name = "Wildcat Point_LP", |
510 | .iTCO_version = 2, | 510 | .iTCO_version = 2, |
511 | }, | 511 | }, |
512 | }; | 512 | }; |
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 11e20afbdcac..705698fd2c7e 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c | |||
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) | |||
1228 | 1228 | ||
1229 | pcr->remove_pci = true; | 1229 | pcr->remove_pci = true; |
1230 | 1230 | ||
1231 | cancel_delayed_work(&pcr->carddet_work); | 1231 | /* Disable interrupts at the pcr level */ |
1232 | cancel_delayed_work(&pcr->idle_work); | 1232 | spin_lock_irq(&pcr->lock); |
1233 | rtsx_pci_writel(pcr, RTSX_BIER, 0); | ||
1234 | pcr->bier = 0; | ||
1235 | spin_unlock_irq(&pcr->lock); | ||
1236 | |||
1237 | cancel_delayed_work_sync(&pcr->carddet_work); | ||
1238 | cancel_delayed_work_sync(&pcr->idle_work); | ||
1233 | 1239 | ||
1234 | mfd_remove_devices(&pcidev->dev); | 1240 | mfd_remove_devices(&pcidev->dev); |
1235 | 1241 | ||
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index 34c18fb8c089..54cc25546592 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c | |||
@@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = { | |||
81 | 81 | ||
82 | int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) | 82 | int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) |
83 | { | 83 | { |
84 | return regmap_read(sec_pmic->regmap, reg, dest); | 84 | return regmap_read(sec_pmic->regmap_pmic, reg, dest); |
85 | } | 85 | } |
86 | EXPORT_SYMBOL_GPL(sec_reg_read); | 86 | EXPORT_SYMBOL_GPL(sec_reg_read); |
87 | 87 | ||
88 | int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) | 88 | int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) |
89 | { | 89 | { |
90 | return regmap_bulk_read(sec_pmic->regmap, reg, buf, count); | 90 | return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count); |
91 | } | 91 | } |
92 | EXPORT_SYMBOL_GPL(sec_bulk_read); | 92 | EXPORT_SYMBOL_GPL(sec_bulk_read); |
93 | 93 | ||
94 | int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value) | 94 | int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value) |
95 | { | 95 | { |
96 | return regmap_write(sec_pmic->regmap, reg, value); | 96 | return regmap_write(sec_pmic->regmap_pmic, reg, value); |
97 | } | 97 | } |
98 | EXPORT_SYMBOL_GPL(sec_reg_write); | 98 | EXPORT_SYMBOL_GPL(sec_reg_write); |
99 | 99 | ||
100 | int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) | 100 | int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) |
101 | { | 101 | { |
102 | return regmap_raw_write(sec_pmic->regmap, reg, buf, count); | 102 | return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count); |
103 | } | 103 | } |
104 | EXPORT_SYMBOL_GPL(sec_bulk_write); | 104 | EXPORT_SYMBOL_GPL(sec_bulk_write); |
105 | 105 | ||
106 | int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask) | 106 | int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask) |
107 | { | 107 | { |
108 | return regmap_update_bits(sec_pmic->regmap, reg, mask, val); | 108 | return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val); |
109 | } | 109 | } |
110 | EXPORT_SYMBOL_GPL(sec_reg_update); | 110 | EXPORT_SYMBOL_GPL(sec_reg_update); |
111 | 111 | ||
@@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = { | |||
166 | .cache_type = REGCACHE_FLAT, | 166 | .cache_type = REGCACHE_FLAT, |
167 | }; | 167 | }; |
168 | 168 | ||
169 | static const struct regmap_config sec_rtc_regmap_config = { | ||
170 | .reg_bits = 8, | ||
171 | .val_bits = 8, | ||
172 | }; | ||
173 | |||
169 | #ifdef CONFIG_OF | 174 | #ifdef CONFIG_OF |
170 | /* | 175 | /* |
171 | * Only the common platform data elements for s5m8767 are parsed here from the | 176 | * Only the common platform data elements for s5m8767 are parsed here from the |
@@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c, | |||
266 | break; | 271 | break; |
267 | } | 272 | } |
268 | 273 | ||
269 | sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap); | 274 | sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap); |
270 | if (IS_ERR(sec_pmic->regmap)) { | 275 | if (IS_ERR(sec_pmic->regmap_pmic)) { |
271 | ret = PTR_ERR(sec_pmic->regmap); | 276 | ret = PTR_ERR(sec_pmic->regmap_pmic); |
272 | dev_err(&i2c->dev, "Failed to allocate register map: %d\n", | 277 | dev_err(&i2c->dev, "Failed to allocate register map: %d\n", |
273 | ret); | 278 | ret); |
274 | return ret; | 279 | return ret; |
@@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c, | |||
277 | sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); | 282 | sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); |
278 | i2c_set_clientdata(sec_pmic->rtc, sec_pmic); | 283 | i2c_set_clientdata(sec_pmic->rtc, sec_pmic); |
279 | 284 | ||
285 | sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, | ||
286 | &sec_rtc_regmap_config); | ||
287 | if (IS_ERR(sec_pmic->regmap_rtc)) { | ||
288 | ret = PTR_ERR(sec_pmic->regmap_rtc); | ||
289 | dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n", | ||
290 | ret); | ||
291 | return ret; | ||
292 | } | ||
293 | |||
280 | if (pdata && pdata->cfg_pmic_irq) | 294 | if (pdata && pdata->cfg_pmic_irq) |
281 | pdata->cfg_pmic_irq(); | 295 | pdata->cfg_pmic_irq(); |
282 | 296 | ||
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c index 0dd84e99081e..b441b1be27cb 100644 --- a/drivers/mfd/sec-irq.c +++ b/drivers/mfd/sec-irq.c | |||
@@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) | |||
280 | 280 | ||
281 | switch (type) { | 281 | switch (type) { |
282 | case S5M8763X: | 282 | case S5M8763X: |
283 | ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, | 283 | ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, |
284 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 284 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
285 | sec_pmic->irq_base, &s5m8763_irq_chip, | 285 | sec_pmic->irq_base, &s5m8763_irq_chip, |
286 | &sec_pmic->irq_data); | 286 | &sec_pmic->irq_data); |
287 | break; | 287 | break; |
288 | case S5M8767X: | 288 | case S5M8767X: |
289 | ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, | 289 | ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, |
290 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 290 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
291 | sec_pmic->irq_base, &s5m8767_irq_chip, | 291 | sec_pmic->irq_base, &s5m8767_irq_chip, |
292 | &sec_pmic->irq_data); | 292 | &sec_pmic->irq_data); |
293 | break; | 293 | break; |
294 | case S2MPS11X: | 294 | case S2MPS11X: |
295 | ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, | 295 | ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, |
296 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 296 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
297 | sec_pmic->irq_base, &s2mps11_irq_chip, | 297 | sec_pmic->irq_base, &s2mps11_irq_chip, |
298 | &sec_pmic->irq_data); | 298 | &sec_pmic->irq_data); |
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c index 71e3e0c5bf73..a5424579679c 100644 --- a/drivers/mfd/ti-ssp.c +++ b/drivers/mfd/ti-ssp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/sched.h> | ||
35 | #include <linux/mfd/core.h> | 36 | #include <linux/mfd/core.h> |
36 | #include <linux/mfd/ti_ssp.h> | 37 | #include <linux/mfd/ti_ssp.h> |
37 | 38 | ||
@@ -409,7 +410,6 @@ static int ti_ssp_probe(struct platform_device *pdev) | |||
409 | cells[id].id = id; | 410 | cells[id].id = id; |
410 | cells[id].name = data->dev_name; | 411 | cells[id].name = data->dev_name; |
411 | cells[id].platform_data = data->pdata; | 412 | cells[id].platform_data = data->pdata; |
412 | cells[id].data_size = data->pdata_size; | ||
413 | } | 413 | } |
414 | 414 | ||
415 | error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL); | 415 | error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL); |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 6c0fde55270d..66f411a6e8ea 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -109,9 +109,12 @@ | |||
109 | #define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ | 109 | #define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ |
110 | #define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ | 110 | #define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ |
111 | 111 | ||
112 | #define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ | 112 | #define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */ |
113 | #define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ | 113 | #define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ |
114 | #define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ | 114 | #define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ |
115 | #define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ | ||
116 | |||
117 | #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ | ||
115 | /* | 118 | /* |
116 | * MEI HW Section | 119 | * MEI HW Section |
117 | */ | 120 | */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index b96205aece0c..2cab3c0a6805 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -76,9 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { | |||
76 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, | 76 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, |
77 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, | 77 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, |
78 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, | 78 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, |
79 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, | 79 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)}, |
80 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, | 80 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, |
81 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, | 81 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, |
82 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)}, | ||
83 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)}, | ||
82 | 84 | ||
83 | /* required last entry */ | 85 | /* required last entry */ |
84 | {0, } | 86 | {0, } |
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c index 8aa42e738acc..653799b96bfa 100644 --- a/drivers/misc/mic/card/mic_virtio.c +++ b/drivers/misc/mic/card/mic_virtio.c | |||
@@ -154,14 +154,14 @@ static void mic_reset_inform_host(struct virtio_device *vdev) | |||
154 | { | 154 | { |
155 | struct mic_vdev *mvdev = to_micvdev(vdev); | 155 | struct mic_vdev *mvdev = to_micvdev(vdev); |
156 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | 156 | struct mic_device_ctrl __iomem *dc = mvdev->dc; |
157 | int retry = 100, i; | 157 | int retry; |
158 | 158 | ||
159 | iowrite8(0, &dc->host_ack); | 159 | iowrite8(0, &dc->host_ack); |
160 | iowrite8(1, &dc->vdev_reset); | 160 | iowrite8(1, &dc->vdev_reset); |
161 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | 161 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); |
162 | 162 | ||
163 | /* Wait till host completes all card accesses and acks the reset */ | 163 | /* Wait till host completes all card accesses and acks the reset */ |
164 | for (i = retry; i--;) { | 164 | for (retry = 100; retry--;) { |
165 | if (ioread8(&dc->host_ack)) | 165 | if (ioread8(&dc->host_ack)) |
166 | break; | 166 | break; |
167 | msleep(100); | 167 | msleep(100); |
@@ -187,11 +187,12 @@ static void mic_reset(struct virtio_device *vdev) | |||
187 | /* | 187 | /* |
188 | * The virtio_ring code calls this API when it wants to notify the Host. | 188 | * The virtio_ring code calls this API when it wants to notify the Host. |
189 | */ | 189 | */ |
190 | static void mic_notify(struct virtqueue *vq) | 190 | static bool mic_notify(struct virtqueue *vq) |
191 | { | 191 | { |
192 | struct mic_vdev *mvdev = vq->priv; | 192 | struct mic_vdev *mvdev = vq->priv; |
193 | 193 | ||
194 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | 194 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); |
195 | return true; | ||
195 | } | 196 | } |
196 | 197 | ||
197 | static void mic_del_vq(struct virtqueue *vq, int n) | 198 | static void mic_del_vq(struct virtqueue *vq, int n) |
@@ -247,17 +248,17 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, | |||
247 | /* First assign the vring's allocated in host memory */ | 248 | /* First assign the vring's allocated in host memory */ |
248 | vqconfig = mic_vq_config(mvdev->desc) + index; | 249 | vqconfig = mic_vq_config(mvdev->desc) + index; |
249 | memcpy_fromio(&config, vqconfig, sizeof(config)); | 250 | memcpy_fromio(&config, vqconfig, sizeof(config)); |
250 | _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN); | 251 | _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); |
251 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); | 252 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); |
252 | va = mic_card_map(mvdev->mdev, config.address, vr_size); | 253 | va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); |
253 | if (!va) | 254 | if (!va) |
254 | return ERR_PTR(-ENOMEM); | 255 | return ERR_PTR(-ENOMEM); |
255 | mvdev->vr[index] = va; | 256 | mvdev->vr[index] = va; |
256 | memset_io(va, 0x0, _vr_size); | 257 | memset_io(va, 0x0, _vr_size); |
257 | vq = vring_new_virtqueue(index, | 258 | vq = vring_new_virtqueue(index, le16_to_cpu(config.num), |
258 | config.num, MIC_VIRTIO_RING_ALIGN, vdev, | 259 | MIC_VIRTIO_RING_ALIGN, vdev, false, |
259 | false, | 260 | (void __force *)va, mic_notify, callback, |
260 | va, mic_notify, callback, name); | 261 | name); |
261 | if (!vq) { | 262 | if (!vq) { |
262 | err = -ENOMEM; | 263 | err = -ENOMEM; |
263 | goto unmap; | 264 | goto unmap; |
@@ -272,7 +273,8 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, | |||
272 | 273 | ||
273 | /* Allocate and reassign used ring now */ | 274 | /* Allocate and reassign used ring now */ |
274 | mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | 275 | mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + |
275 | sizeof(struct vring_used_elem) * config.num); | 276 | sizeof(struct vring_used_elem) * |
277 | le16_to_cpu(config.num)); | ||
276 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | 278 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
277 | get_order(mvdev->used_size[index])); | 279 | get_order(mvdev->used_size[index])); |
278 | if (!used) { | 280 | if (!used) { |
@@ -309,7 +311,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
309 | { | 311 | { |
310 | struct mic_vdev *mvdev = to_micvdev(vdev); | 312 | struct mic_vdev *mvdev = to_micvdev(vdev); |
311 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | 313 | struct mic_device_ctrl __iomem *dc = mvdev->dc; |
312 | int i, err, retry = 100; | 314 | int i, err, retry; |
313 | 315 | ||
314 | /* We must have this many virtqueues. */ | 316 | /* We must have this many virtqueues. */ |
315 | if (nvqs > ioread8(&mvdev->desc->num_vq)) | 317 | if (nvqs > ioread8(&mvdev->desc->num_vq)) |
@@ -331,7 +333,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
331 | * rings have been re-assigned. | 333 | * rings have been re-assigned. |
332 | */ | 334 | */ |
333 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | 335 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); |
334 | for (i = retry; i--;) { | 336 | for (retry = 100; retry--;) { |
335 | if (!ioread8(&dc->used_address_updated)) | 337 | if (!ioread8(&dc->used_address_updated)) |
336 | break; | 338 | break; |
337 | msleep(100); | 339 | msleep(100); |
@@ -519,8 +521,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove) | |||
519 | struct device *dev; | 521 | struct device *dev; |
520 | int ret; | 522 | int ret; |
521 | 523 | ||
522 | for (i = mic_aligned_size(struct mic_bootparam); | 524 | for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; |
523 | i < MIC_DP_SIZE; i += mic_total_desc_size(d)) { | 525 | i += mic_total_desc_size(d)) { |
524 | d = mdrv->dp + i; | 526 | d = mdrv->dp + i; |
525 | dc = (void __iomem *)d + mic_aligned_desc_size(d); | 527 | dc = (void __iomem *)d + mic_aligned_desc_size(d); |
526 | /* | 528 | /* |
@@ -539,7 +541,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove) | |||
539 | continue; | 541 | continue; |
540 | 542 | ||
541 | /* device already exists */ | 543 | /* device already exists */ |
542 | dev = device_find_child(mdrv->dev, d, mic_match_desc); | 544 | dev = device_find_child(mdrv->dev, (void __force *)d, |
545 | mic_match_desc); | ||
543 | if (dev) { | 546 | if (dev) { |
544 | if (remove) | 547 | if (remove) |
545 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, | 548 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, |
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h index 2c5c22c93ba8..d0407ba53bb7 100644 --- a/drivers/misc/mic/card/mic_virtio.h +++ b/drivers/misc/mic/card/mic_virtio.h | |||
@@ -42,8 +42,8 @@ | |||
42 | 42 | ||
43 | static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) | 43 | static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) |
44 | { | 44 | { |
45 | return mic_aligned_size(*desc) | 45 | return sizeof(*desc) |
46 | + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig) | 46 | + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) |
47 | + ioread8(&desc->feature_len) * 2 | 47 | + ioread8(&desc->feature_len) * 2 |
48 | + ioread8(&desc->config_len); | 48 | + ioread8(&desc->config_len); |
49 | } | 49 | } |
@@ -67,8 +67,7 @@ mic_vq_configspace(struct mic_device_desc __iomem *desc) | |||
67 | } | 67 | } |
68 | static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) | 68 | static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) |
69 | { | 69 | { |
70 | return mic_aligned_desc_size(desc) + | 70 | return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); |
71 | mic_aligned_size(struct mic_device_ctrl); | ||
72 | } | 71 | } |
73 | 72 | ||
74 | int mic_devices_init(struct mic_driver *mdrv); | 73 | int mic_devices_init(struct mic_driver *mdrv); |
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 7558d9186438..b75c6b5cc20f 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c | |||
@@ -62,7 +62,7 @@ void mic_bootparam_init(struct mic_device *mdev) | |||
62 | { | 62 | { |
63 | struct mic_bootparam *bootparam = mdev->dp; | 63 | struct mic_bootparam *bootparam = mdev->dp; |
64 | 64 | ||
65 | bootparam->magic = MIC_MAGIC; | 65 | bootparam->magic = cpu_to_le32(MIC_MAGIC); |
66 | bootparam->c2h_shutdown_db = mdev->shutdown_db; | 66 | bootparam->c2h_shutdown_db = mdev->shutdown_db; |
67 | bootparam->h2c_shutdown_db = -1; | 67 | bootparam->h2c_shutdown_db = -1; |
68 | bootparam->h2c_config_db = -1; | 68 | bootparam->h2c_config_db = -1; |
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index 5b8494bd1e00..e04bb4fe6823 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c | |||
@@ -41,7 +41,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, | |||
41 | * We are copying from IO below an should ideally use something | 41 | * We are copying from IO below an should ideally use something |
42 | * like copy_to_user_fromio(..) if it existed. | 42 | * like copy_to_user_fromio(..) if it existed. |
43 | */ | 43 | */ |
44 | if (copy_to_user(ubuf, dbuf, len)) { | 44 | if (copy_to_user(ubuf, (void __force *)dbuf, len)) { |
45 | err = -EFAULT; | 45 | err = -EFAULT; |
46 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | 46 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
47 | __func__, __LINE__, err); | 47 | __func__, __LINE__, err); |
@@ -66,7 +66,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, | |||
66 | * We are copying to IO below and should ideally use something | 66 | * We are copying to IO below and should ideally use something |
67 | * like copy_from_user_toio(..) if it existed. | 67 | * like copy_from_user_toio(..) if it existed. |
68 | */ | 68 | */ |
69 | if (copy_from_user(dbuf, ubuf, len)) { | 69 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { |
70 | err = -EFAULT; | 70 | err = -EFAULT; |
71 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | 71 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
72 | __func__, __LINE__, err); | 72 | __func__, __LINE__, err); |
@@ -293,7 +293,7 @@ static void mic_virtio_init_post(struct mic_vdev *mvdev) | |||
293 | continue; | 293 | continue; |
294 | } | 294 | } |
295 | mvdev->mvr[i].vrh.vring.used = | 295 | mvdev->mvr[i].vrh.vring.used = |
296 | mvdev->mdev->aper.va + | 296 | (void __force *)mvdev->mdev->aper.va + |
297 | le64_to_cpu(vqconfig[i].used_address); | 297 | le64_to_cpu(vqconfig[i].used_address); |
298 | } | 298 | } |
299 | 299 | ||
@@ -378,7 +378,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev, | |||
378 | void __user *argp) | 378 | void __user *argp) |
379 | { | 379 | { |
380 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | 380 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); |
381 | int ret = 0, retry = 100, i; | 381 | int ret = 0, retry, i; |
382 | struct mic_bootparam *bootparam = mvdev->mdev->dp; | 382 | struct mic_bootparam *bootparam = mvdev->mdev->dp; |
383 | s8 db = bootparam->h2c_config_db; | 383 | s8 db = bootparam->h2c_config_db; |
384 | 384 | ||
@@ -401,7 +401,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev, | |||
401 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | 401 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; |
402 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | 402 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); |
403 | 403 | ||
404 | for (i = retry; i--;) { | 404 | for (retry = 100; retry--;) { |
405 | ret = wait_event_timeout(wake, | 405 | ret = wait_event_timeout(wake, |
406 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | 406 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); |
407 | if (ret) | 407 | if (ret) |
@@ -467,7 +467,7 @@ static int mic_copy_dp_entry(struct mic_vdev *mvdev, | |||
467 | } | 467 | } |
468 | 468 | ||
469 | /* Find the first free device page entry */ | 469 | /* Find the first free device page entry */ |
470 | for (i = mic_aligned_size(struct mic_bootparam); | 470 | for (i = sizeof(struct mic_bootparam); |
471 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); | 471 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); |
472 | i += mic_total_desc_size(devp)) { | 472 | i += mic_total_desc_size(devp)) { |
473 | devp = mdev->dp + i; | 473 | devp = mdev->dp + i; |
@@ -525,6 +525,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev, | |||
525 | char irqname[10]; | 525 | char irqname[10]; |
526 | struct mic_bootparam *bootparam = mdev->dp; | 526 | struct mic_bootparam *bootparam = mdev->dp; |
527 | u16 num; | 527 | u16 num; |
528 | dma_addr_t vr_addr; | ||
528 | 529 | ||
529 | mutex_lock(&mdev->mic_mutex); | 530 | mutex_lock(&mdev->mic_mutex); |
530 | 531 | ||
@@ -559,17 +560,16 @@ int mic_virtio_add_device(struct mic_vdev *mvdev, | |||
559 | } | 560 | } |
560 | vr->len = vr_size; | 561 | vr->len = vr_size; |
561 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | 562 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); |
562 | vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i; | 563 | vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); |
563 | vqconfig[i].address = mic_map_single(mdev, | 564 | vr_addr = mic_map_single(mdev, vr->va, vr_size); |
564 | vr->va, vr_size); | 565 | if (mic_map_error(vr_addr)) { |
565 | if (mic_map_error(vqconfig[i].address)) { | ||
566 | free_pages((unsigned long)vr->va, get_order(vr_size)); | 566 | free_pages((unsigned long)vr->va, get_order(vr_size)); |
567 | ret = -ENOMEM; | 567 | ret = -ENOMEM; |
568 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | 568 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
569 | __func__, __LINE__, ret); | 569 | __func__, __LINE__, ret); |
570 | goto err; | 570 | goto err; |
571 | } | 571 | } |
572 | vqconfig[i].address = cpu_to_le64(vqconfig[i].address); | 572 | vqconfig[i].address = cpu_to_le64(vr_addr); |
573 | 573 | ||
574 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); | 574 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); |
575 | ret = vringh_init_kern(&mvr->vrh, | 575 | ret = vringh_init_kern(&mvr->vrh, |
@@ -639,7 +639,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev) | |||
639 | struct mic_vdev *tmp_mvdev; | 639 | struct mic_vdev *tmp_mvdev; |
640 | struct mic_device *mdev = mvdev->mdev; | 640 | struct mic_device *mdev = mvdev->mdev; |
641 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | 641 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); |
642 | int i, ret, retry = 100; | 642 | int i, ret, retry; |
643 | struct mic_vqconfig *vqconfig; | 643 | struct mic_vqconfig *vqconfig; |
644 | struct mic_bootparam *bootparam = mdev->dp; | 644 | struct mic_bootparam *bootparam = mdev->dp; |
645 | s8 db; | 645 | s8 db; |
@@ -652,16 +652,16 @@ void mic_virtio_del_device(struct mic_vdev *mvdev) | |||
652 | "Requesting hot remove id %d\n", mvdev->virtio_id); | 652 | "Requesting hot remove id %d\n", mvdev->virtio_id); |
653 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | 653 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; |
654 | mdev->ops->send_intr(mdev, db); | 654 | mdev->ops->send_intr(mdev, db); |
655 | for (i = retry; i--;) { | 655 | for (retry = 100; retry--;) { |
656 | ret = wait_event_timeout(wake, | 656 | ret = wait_event_timeout(wake, |
657 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | 657 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); |
658 | if (ret) | 658 | if (ret) |
659 | break; | 659 | break; |
660 | } | 660 | } |
661 | dev_dbg(mdev->sdev->parent, | 661 | dev_dbg(mdev->sdev->parent, |
662 | "Device id %d config_change %d guest_ack %d\n", | 662 | "Device id %d config_change %d guest_ack %d retry %d\n", |
663 | mvdev->virtio_id, mvdev->dc->config_change, | 663 | mvdev->virtio_id, mvdev->dc->config_change, |
664 | mvdev->dc->guest_ack); | 664 | mvdev->dc->guest_ack, retry); |
665 | mvdev->dc->config_change = 0; | 665 | mvdev->dc->config_change = 0; |
666 | mvdev->dc->guest_ack = 0; | 666 | mvdev->dc->guest_ack = 0; |
667 | skip_hot_remove: | 667 | skip_hot_remove: |
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c index 81e9541b784c..0dfa8a81436e 100644 --- a/drivers/misc/mic/host/mic_x100.c +++ b/drivers/misc/mic/host/mic_x100.c | |||
@@ -397,8 +397,8 @@ mic_x100_load_ramdisk(struct mic_device *mdev) | |||
397 | * so copy over the ramdisk @ 128M. | 397 | * so copy over the ramdisk @ 128M. |
398 | */ | 398 | */ |
399 | memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); | 399 | memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); |
400 | iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image); | 400 | iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image); |
401 | iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size); | 401 | iowrite32(fw->size, &bp->hdr.ramdisk_size); |
402 | release_firmware(fw); | 402 | release_firmware(fw); |
403 | error: | 403 | error: |
404 | return rc; | 404 | return rc; |
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index d210d131fef2..0f55589a56b8 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c | |||
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) | |||
73 | return -ENOMEM; | 73 | return -ENOMEM; |
74 | } | 74 | } |
75 | info->map.cached = | 75 | info->map.cached = |
76 | ioremap_cached(info->map.phys, info->map.size); | 76 | ioremap_cache(info->map.phys, info->map.size); |
77 | if (!info->map.cached) | 77 | if (!info->map.cached) |
78 | printk(KERN_WARNING "Failed to ioremap cached %s\n", | 78 | printk(KERN_WARNING "Failed to ioremap cached %s\n", |
79 | info->map.name); | 79 | info->map.name); |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 4cabdc9fda90..4b3aaa898a8b 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
962 | static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) | 962 | static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) |
963 | { | 963 | { |
964 | struct platform_device *pdev = info->pdev; | 964 | struct platform_device *pdev = info->pdev; |
965 | if (use_dma) { | 965 | if (info->use_dma) { |
966 | pxa_free_dma(info->data_dma_ch); | 966 | pxa_free_dma(info->data_dma_ch); |
967 | dma_free_coherent(&pdev->dev, info->buf_size, | 967 | dma_free_coherent(&pdev->dev, info->buf_size, |
968 | info->data_buff, info->data_buff_phys); | 968 | info->data_buff, info->data_buff_phys); |
@@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = { | |||
1259 | .compatible = "marvell,pxa3xx-nand", | 1259 | .compatible = "marvell,pxa3xx-nand", |
1260 | .data = (void *)PXA3XX_NAND_VARIANT_PXA, | 1260 | .data = (void *)PXA3XX_NAND_VARIANT_PXA, |
1261 | }, | 1261 | }, |
1262 | { | ||
1263 | .compatible = "marvell,armada370-nand", | ||
1264 | .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, | ||
1265 | }, | ||
1266 | {} | 1262 | {} |
1267 | }; | 1263 | }; |
1268 | MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); | 1264 | MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 187b1b7772ef..4ced59436558 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) | |||
2201 | 2201 | ||
2202 | port = &(SLAVE_AD_INFO(slave).port); | 2202 | port = &(SLAVE_AD_INFO(slave).port); |
2203 | 2203 | ||
2204 | // if slave is null, the whole port is not initialized | 2204 | /* if slave is null, the whole port is not initialized */ |
2205 | if (!port->slave) { | 2205 | if (!port->slave) { |
2206 | pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", | 2206 | pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", |
2207 | slave->bond->dev->name, slave->dev->name); | 2207 | slave->bond->dev->name, slave->dev->name); |
2208 | return; | 2208 | return; |
2209 | } | 2209 | } |
2210 | 2210 | ||
2211 | __get_state_machine_lock(port); | ||
2212 | |||
2211 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; | 2213 | port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; |
2212 | port->actor_oper_port_key = port->actor_admin_port_key |= | 2214 | port->actor_oper_port_key = port->actor_admin_port_key |= |
2213 | (__get_link_speed(port) << 1); | 2215 | (__get_link_speed(port) << 1); |
2214 | pr_debug("Port %d changed speed\n", port->actor_port_number); | 2216 | pr_debug("Port %d changed speed\n", port->actor_port_number); |
2215 | // there is no need to reselect a new aggregator, just signal the | 2217 | /* there is no need to reselect a new aggregator, just signal the |
2216 | // state machines to reinitialize | 2218 | * state machines to reinitialize |
2219 | */ | ||
2217 | port->sm_vars |= AD_PORT_BEGIN; | 2220 | port->sm_vars |= AD_PORT_BEGIN; |
2221 | |||
2222 | __release_state_machine_lock(port); | ||
2218 | } | 2223 | } |
2219 | 2224 | ||
2220 | /** | 2225 | /** |
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) | |||
2229 | 2234 | ||
2230 | port = &(SLAVE_AD_INFO(slave).port); | 2235 | port = &(SLAVE_AD_INFO(slave).port); |
2231 | 2236 | ||
2232 | // if slave is null, the whole port is not initialized | 2237 | /* if slave is null, the whole port is not initialized */ |
2233 | if (!port->slave) { | 2238 | if (!port->slave) { |
2234 | pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", | 2239 | pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", |
2235 | slave->bond->dev->name, slave->dev->name); | 2240 | slave->bond->dev->name, slave->dev->name); |
2236 | return; | 2241 | return; |
2237 | } | 2242 | } |
2238 | 2243 | ||
2244 | __get_state_machine_lock(port); | ||
2245 | |||
2239 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2246 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
2240 | port->actor_oper_port_key = port->actor_admin_port_key |= | 2247 | port->actor_oper_port_key = port->actor_admin_port_key |= |
2241 | __get_duplex(port); | 2248 | __get_duplex(port); |
2242 | pr_debug("Port %d changed duplex\n", port->actor_port_number); | 2249 | pr_debug("Port %d changed duplex\n", port->actor_port_number); |
2243 | // there is no need to reselect a new aggregator, just signal the | 2250 | /* there is no need to reselect a new aggregator, just signal the |
2244 | // state machines to reinitialize | 2251 | * state machines to reinitialize |
2252 | */ | ||
2245 | port->sm_vars |= AD_PORT_BEGIN; | 2253 | port->sm_vars |= AD_PORT_BEGIN; |
2254 | |||
2255 | __release_state_machine_lock(port); | ||
2246 | } | 2256 | } |
2247 | 2257 | ||
2248 | /** | 2258 | /** |
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
2258 | 2268 | ||
2259 | port = &(SLAVE_AD_INFO(slave).port); | 2269 | port = &(SLAVE_AD_INFO(slave).port); |
2260 | 2270 | ||
2261 | // if slave is null, the whole port is not initialized | 2271 | /* if slave is null, the whole port is not initialized */ |
2262 | if (!port->slave) { | 2272 | if (!port->slave) { |
2263 | pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", | 2273 | pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", |
2264 | slave->bond->dev->name, slave->dev->name); | 2274 | slave->bond->dev->name, slave->dev->name); |
2265 | return; | 2275 | return; |
2266 | } | 2276 | } |
2267 | 2277 | ||
2268 | // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) | 2278 | __get_state_machine_lock(port); |
2269 | // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report | 2279 | /* on link down we are zeroing duplex and speed since |
2280 | * some of the adaptors(ce1000.lan) report full duplex/speed | ||
2281 | * instead of N/A(duplex) / 0(speed). | ||
2282 | * | ||
2283 | * on link up we are forcing recheck on the duplex and speed since | ||
2284 | * some of he adaptors(ce1000.lan) report. | ||
2285 | */ | ||
2270 | if (link == BOND_LINK_UP) { | 2286 | if (link == BOND_LINK_UP) { |
2271 | port->is_enabled = true; | 2287 | port->is_enabled = true; |
2272 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; | 2288 | port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; |
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) | |||
2282 | port->actor_oper_port_key = (port->actor_admin_port_key &= | 2298 | port->actor_oper_port_key = (port->actor_admin_port_key &= |
2283 | ~AD_SPEED_KEY_BITS); | 2299 | ~AD_SPEED_KEY_BITS); |
2284 | } | 2300 | } |
2285 | //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); | 2301 | pr_debug("Port %d changed link status to %s", |
2286 | // there is no need to reselect a new aggregator, just signal the | 2302 | port->actor_port_number, |
2287 | // state machines to reinitialize | 2303 | (link == BOND_LINK_UP) ? "UP" : "DOWN"); |
2304 | /* there is no need to reselect a new aggregator, just signal the | ||
2305 | * state machines to reinitialize | ||
2306 | */ | ||
2288 | port->sm_vars |= AD_PORT_BEGIN; | 2307 | port->sm_vars |= AD_PORT_BEGIN; |
2308 | |||
2309 | __release_state_machine_lock(port); | ||
2289 | } | 2310 | } |
2290 | 2311 | ||
2291 | /* | 2312 | /* |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 36eab0c4fb33..4b8c58b0ec24 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
3732 | } | 3732 | } |
3733 | 3733 | ||
3734 | 3734 | ||
3735 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | 3735 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, |
3736 | void *accel_priv) | ||
3736 | { | 3737 | { |
3737 | /* | 3738 | /* |
3738 | * This helper function exists to help dev_pick_tx get the correct | 3739 | * This helper function exists to help dev_pick_tx get the correct |
@@ -4199,9 +4200,9 @@ static int bond_check_params(struct bond_params *params) | |||
4199 | (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { | 4200 | (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { |
4200 | /* not complete check, but should be good enough to | 4201 | /* not complete check, but should be good enough to |
4201 | catch mistakes */ | 4202 | catch mistakes */ |
4202 | __be32 ip = in_aton(arp_ip_target[i]); | 4203 | __be32 ip; |
4203 | if (!isdigit(arp_ip_target[i][0]) || ip == 0 || | 4204 | if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || |
4204 | ip == htonl(INADDR_BROADCAST)) { | 4205 | IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { |
4205 | pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", | 4206 | pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", |
4206 | arp_ip_target[i]); | 4207 | arp_ip_target[i]); |
4207 | arp_interval = 0; | 4208 | arp_interval = 0; |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index abf5e106edc5..0ae580bbc5db 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -1635,12 +1635,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d, | |||
1635 | char *buf) | 1635 | char *buf) |
1636 | { | 1636 | { |
1637 | struct bonding *bond = to_bond(d); | 1637 | struct bonding *bond = to_bond(d); |
1638 | int packets_per_slave = bond->params.packets_per_slave; | 1638 | unsigned int packets_per_slave = bond->params.packets_per_slave; |
1639 | 1639 | ||
1640 | if (packets_per_slave > 1) | 1640 | if (packets_per_slave > 1) |
1641 | packets_per_slave = reciprocal_value(packets_per_slave); | 1641 | packets_per_slave = reciprocal_value(packets_per_slave); |
1642 | 1642 | ||
1643 | return sprintf(buf, "%d\n", packets_per_slave); | 1643 | return sprintf(buf, "%u\n", packets_per_slave); |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | static ssize_t bonding_store_packets_per_slave(struct device *d, | 1646 | static ssize_t bonding_store_packets_per_slave(struct device *d, |
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 5f9a7ad9b964..8aeec0b4601a 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev) | |||
625 | usb_unanchor_urb(urb); | 625 | usb_unanchor_urb(urb); |
626 | usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, | 626 | usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, |
627 | urb->transfer_dma); | 627 | urb->transfer_dma); |
628 | usb_free_urb(urb); | ||
628 | break; | 629 | break; |
629 | } | 630 | } |
630 | 631 | ||
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne | |||
798 | * allowed (MAX_TX_URBS). | 799 | * allowed (MAX_TX_URBS). |
799 | */ | 800 | */ |
800 | if (!context) { | 801 | if (!context) { |
801 | usb_unanchor_urb(urb); | ||
802 | usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); | 802 | usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); |
803 | usb_free_urb(urb); | ||
803 | 804 | ||
804 | netdev_warn(netdev, "couldn't find free context\n"); | 805 | netdev_warn(netdev, "couldn't find free context\n"); |
805 | 806 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 8ee9d1556e6e..263dd921edc4 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) | |||
927 | /* set LED in default state (end of init phase) */ | 927 | /* set LED in default state (end of init phase) */ |
928 | pcan_usb_pro_set_led(dev, 0, 1); | 928 | pcan_usb_pro_set_led(dev, 0, 1); |
929 | 929 | ||
930 | kfree(bi); | ||
931 | kfree(fi); | ||
932 | |||
930 | return 0; | 933 | return 0; |
931 | 934 | ||
932 | err_out: | 935 | err_out: |
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 50b853a79d77..46dfb1378c17 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c | |||
@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev) | |||
717 | if (netif_msg_ifup(db)) | 717 | if (netif_msg_ifup(db)) |
718 | dev_dbg(db->dev, "enabling %s\n", dev->name); | 718 | dev_dbg(db->dev, "enabling %s\n", dev->name); |
719 | 719 | ||
720 | if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, | 720 | if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev)) |
721 | 0, dev->name, dev)) | ||
722 | return -EAGAIN; | 721 | return -EAGAIN; |
723 | 722 | ||
724 | /* Initialize EMAC board */ | 723 | /* Initialize EMAC board */ |
@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev) | |||
774 | 773 | ||
775 | emac_shutdown(ndev); | 774 | emac_shutdown(ndev); |
776 | 775 | ||
776 | free_irq(ndev->irq, ndev); | ||
777 | |||
777 | return 0; | 778 | return 0; |
778 | } | 779 | } |
779 | 780 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b2ffad1304d2..248baf6273fb 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
565 | /* Make sure pointer to data buffer is set */ | 565 | /* Make sure pointer to data buffer is set */ |
566 | wmb(); | 566 | wmb(); |
567 | 567 | ||
568 | skb_tx_timestamp(skb); | ||
569 | |||
568 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); | 570 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); |
569 | 571 | ||
570 | /* Increment index to point to the next BD */ | 572 | /* Increment index to point to the next BD */ |
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
579 | 581 | ||
580 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 582 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
581 | 583 | ||
582 | skb_tx_timestamp(skb); | ||
583 | |||
584 | return NETDEV_TX_OK; | 584 | return NETDEV_TX_OK; |
585 | } | 585 | } |
586 | 586 | ||
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a36a760ada28..29801750f239 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) | |||
145 | * Mask some pcie error bits | 145 | * Mask some pcie error bits |
146 | */ | 146 | */ |
147 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | 147 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
148 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); | 148 | if (pos) { |
149 | data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); | 149 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); |
150 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); | 150 | data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); |
151 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); | ||
152 | } | ||
151 | /* clear error status */ | 153 | /* clear error status */ |
152 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, | 154 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, |
153 | PCI_EXP_DEVSTA_NFED | | 155 | PCI_EXP_DEVSTA_NFED | |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a1f66e2c9a86..ec6119089b82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -520,10 +520,12 @@ struct bnx2x_fastpath { | |||
520 | #define BNX2X_FP_STATE_IDLE 0 | 520 | #define BNX2X_FP_STATE_IDLE 0 |
521 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ | 521 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ |
522 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ | 522 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ |
523 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ | 523 | #define BNX2X_FP_STATE_DISABLED (1 << 2) |
524 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ | 524 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ |
525 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ | ||
526 | #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | ||
525 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) | 527 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) |
526 | #define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | 528 | #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) |
527 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) | 529 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) |
528 | /* protect state */ | 530 | /* protect state */ |
529 | spinlock_t lock; | 531 | spinlock_t lock; |
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
613 | { | 615 | { |
614 | bool rc = true; | 616 | bool rc = true; |
615 | 617 | ||
616 | spin_lock(&fp->lock); | 618 | spin_lock_bh(&fp->lock); |
617 | if (fp->state & BNX2X_FP_LOCKED) { | 619 | if (fp->state & BNX2X_FP_LOCKED) { |
618 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); | 620 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); |
619 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; | 621 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; |
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
622 | /* we don't care if someone yielded */ | 624 | /* we don't care if someone yielded */ |
623 | fp->state = BNX2X_FP_STATE_NAPI; | 625 | fp->state = BNX2X_FP_STATE_NAPI; |
624 | } | 626 | } |
625 | spin_unlock(&fp->lock); | 627 | spin_unlock_bh(&fp->lock); |
626 | return rc; | 628 | return rc; |
627 | } | 629 | } |
628 | 630 | ||
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) | |||
631 | { | 633 | { |
632 | bool rc = false; | 634 | bool rc = false; |
633 | 635 | ||
634 | spin_lock(&fp->lock); | 636 | spin_lock_bh(&fp->lock); |
635 | WARN_ON(fp->state & | 637 | WARN_ON(fp->state & |
636 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); | 638 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); |
637 | 639 | ||
638 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | 640 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
639 | rc = true; | 641 | rc = true; |
640 | fp->state = BNX2X_FP_STATE_IDLE; | 642 | |
641 | spin_unlock(&fp->lock); | 643 | /* state ==> idle, unless currently disabled */ |
644 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
645 | spin_unlock_bh(&fp->lock); | ||
642 | return rc; | 646 | return rc; |
643 | } | 647 | } |
644 | 648 | ||
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | |||
669 | 673 | ||
670 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | 674 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
671 | rc = true; | 675 | rc = true; |
672 | fp->state = BNX2X_FP_STATE_IDLE; | 676 | |
677 | /* state ==> idle, unless currently disabled */ | ||
678 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
673 | spin_unlock_bh(&fp->lock); | 679 | spin_unlock_bh(&fp->lock); |
674 | return rc; | 680 | return rc; |
675 | } | 681 | } |
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | |||
677 | /* true if a socket is polling, even if it did not get the lock */ | 683 | /* true if a socket is polling, even if it did not get the lock */ |
678 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | 684 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
679 | { | 685 | { |
680 | WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); | 686 | WARN_ON(!(fp->state & BNX2X_FP_OWNED)); |
681 | return fp->state & BNX2X_FP_USER_PEND; | 687 | return fp->state & BNX2X_FP_USER_PEND; |
682 | } | 688 | } |
689 | |||
690 | /* false if fp is currently owned */ | ||
691 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | ||
692 | { | ||
693 | int rc = true; | ||
694 | |||
695 | spin_lock_bh(&fp->lock); | ||
696 | if (fp->state & BNX2X_FP_OWNED) | ||
697 | rc = false; | ||
698 | fp->state |= BNX2X_FP_STATE_DISABLED; | ||
699 | spin_unlock_bh(&fp->lock); | ||
700 | |||
701 | return rc; | ||
702 | } | ||
683 | #else | 703 | #else |
684 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) | 704 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) |
685 | { | 705 | { |
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | |||
709 | { | 729 | { |
710 | return false; | 730 | return false; |
711 | } | 731 | } |
732 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | ||
733 | { | ||
734 | return true; | ||
735 | } | ||
712 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 736 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
713 | 737 | ||
714 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 738 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath { | |||
1250 | * Therefore, if they would have been defined in the same union, | 1274 | * Therefore, if they would have been defined in the same union, |
1251 | * data can get corrupted. | 1275 | * data can get corrupted. |
1252 | */ | 1276 | */ |
1253 | struct afex_vif_list_ramrod_data func_afex_rdata; | 1277 | union { |
1278 | struct afex_vif_list_ramrod_data viflist_data; | ||
1279 | struct function_update_data func_update; | ||
1280 | } func_afex_rdata; | ||
1254 | 1281 | ||
1255 | /* used by dmae command executer */ | 1282 | /* used by dmae command executer */ |
1256 | struct dmae_command dmae[MAX_DMAE_C]; | 1283 | struct dmae_command dmae[MAX_DMAE_C]; |
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); | |||
2499 | #define MCPR_SCRATCH_BASE(bp) \ | 2526 | #define MCPR_SCRATCH_BASE(bp) \ |
2500 | (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) | 2527 | (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) |
2501 | 2528 | ||
2529 | #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) | ||
2530 | |||
2502 | #endif /* bnx2x.h */ | 2531 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ec96130533cc..bf811565ee24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
160 | struct sk_buff *skb = tx_buf->skb; | 160 | struct sk_buff *skb = tx_buf->skb; |
161 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | 161 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
162 | int nbd; | 162 | int nbd; |
163 | u16 split_bd_len = 0; | ||
163 | 164 | ||
164 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | 165 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
165 | prefetch(&skb->end); | 166 | prefetch(&skb->end); |
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
167 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", | 168 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", |
168 | txdata->txq_index, idx, tx_buf, skb); | 169 | txdata->txq_index, idx, tx_buf, skb); |
169 | 170 | ||
170 | /* unmap first bd */ | ||
171 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; | 171 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; |
172 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
173 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); | ||
174 | 172 | ||
175 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | 173 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
176 | #ifdef BNX2X_STOP_ON_ERROR | 174 | #ifdef BNX2X_STOP_ON_ERROR |
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
188 | --nbd; | 186 | --nbd; |
189 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 187 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
190 | 188 | ||
191 | /* ...and the TSO split header bd since they have no mapping */ | 189 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ |
192 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | 190 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
191 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; | ||
192 | split_bd_len = BD_UNMAP_LEN(tx_data_bd); | ||
193 | --nbd; | 193 | --nbd; |
194 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 194 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
195 | } | 195 | } |
196 | 196 | ||
197 | /* unmap first bd */ | ||
198 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
199 | BD_UNMAP_LEN(tx_start_bd) + split_bd_len, | ||
200 | DMA_TO_DEVICE); | ||
201 | |||
197 | /* now free frags */ | 202 | /* now free frags */ |
198 | while (nbd > 0) { | 203 | while (nbd > 0) { |
199 | 204 | ||
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) | |||
1790 | { | 1795 | { |
1791 | int i; | 1796 | int i; |
1792 | 1797 | ||
1793 | local_bh_disable(); | ||
1794 | for_each_rx_queue_cnic(bp, i) { | 1798 | for_each_rx_queue_cnic(bp, i) { |
1795 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1799 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1796 | while (!bnx2x_fp_lock_napi(&bp->fp[i])) | 1800 | while (!bnx2x_fp_ll_disable(&bp->fp[i])) |
1797 | mdelay(1); | 1801 | usleep_range(1000, 2000); |
1798 | } | 1802 | } |
1799 | local_bh_enable(); | ||
1800 | } | 1803 | } |
1801 | 1804 | ||
1802 | static void bnx2x_napi_disable(struct bnx2x *bp) | 1805 | static void bnx2x_napi_disable(struct bnx2x *bp) |
1803 | { | 1806 | { |
1804 | int i; | 1807 | int i; |
1805 | 1808 | ||
1806 | local_bh_disable(); | ||
1807 | for_each_eth_queue(bp, i) { | 1809 | for_each_eth_queue(bp, i) { |
1808 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1810 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1809 | while (!bnx2x_fp_lock_napi(&bp->fp[i])) | 1811 | while (!bnx2x_fp_ll_disable(&bp->fp[i])) |
1810 | mdelay(1); | 1812 | usleep_range(1000, 2000); |
1811 | } | 1813 | } |
1812 | local_bh_enable(); | ||
1813 | } | 1814 | } |
1814 | 1815 | ||
1815 | void bnx2x_netif_start(struct bnx2x *bp) | 1816 | void bnx2x_netif_start(struct bnx2x *bp) |
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1832 | bnx2x_napi_disable_cnic(bp); | 1833 | bnx2x_napi_disable_cnic(bp); |
1833 | } | 1834 | } |
1834 | 1835 | ||
1835 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1836 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
1837 | void *accel_priv) | ||
1836 | { | 1838 | { |
1837 | struct bnx2x *bp = netdev_priv(dev); | 1839 | struct bnx2x *bp = netdev_priv(dev); |
1838 | 1840 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..41f3ca5ad972 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); | |||
524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); | 524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); |
525 | 525 | ||
526 | /* select_queue callback */ | 526 | /* select_queue callback */ |
527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); | 527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
528 | void *accel_priv); | ||
528 | 529 | ||
529 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 530 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, |
530 | struct bnx2x_fastpath *fp, | 531 | struct bnx2x_fastpath *fp, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 20dcc02431ca..11fc79585491 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3865 | 3865 | ||
3866 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); | 3866 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); |
3867 | } else { | 3867 | } else { |
3868 | /* Enable Auto-Detect to support 1G over CL37 as well */ | ||
3869 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3870 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); | ||
3871 | |||
3872 | /* Force cl48 sync_status LOW to avoid getting stuck in CL73 | ||
3873 | * parallel-detect loop when CL73 and CL37 are enabled. | ||
3874 | */ | ||
3875 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | ||
3876 | MDIO_AER_BLOCK_AER_REG, 0); | ||
3877 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3878 | MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); | ||
3879 | bnx2x_set_aer_mmd(params, phy); | ||
3880 | |||
3868 | bnx2x_disable_kr2(params, vars, phy); | 3881 | bnx2x_disable_kr2(params, vars, phy); |
3869 | } | 3882 | } |
3870 | 3883 | ||
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8120 | *edc_mode = EDC_MODE_ACTIVE_DAC; | 8133 | *edc_mode = EDC_MODE_ACTIVE_DAC; |
8121 | else | 8134 | else |
8122 | check_limiting_mode = 1; | 8135 | check_limiting_mode = 1; |
8123 | } else if (copper_module_type & | 8136 | } else { |
8124 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | 8137 | *edc_mode = EDC_MODE_PASSIVE_DAC; |
8138 | /* Even in case PASSIVE_DAC indication is not set, | ||
8139 | * treat it as a passive DAC cable, since some cables | ||
8140 | * don't have this indication. | ||
8141 | */ | ||
8142 | if (copper_module_type & | ||
8143 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | ||
8125 | DP(NETIF_MSG_LINK, | 8144 | DP(NETIF_MSG_LINK, |
8126 | "Passive Copper cable detected\n"); | 8145 | "Passive Copper cable detected\n"); |
8127 | *edc_mode = | 8146 | } else { |
8128 | EDC_MODE_PASSIVE_DAC; | 8147 | DP(NETIF_MSG_LINK, |
8129 | } else { | 8148 | "Unknown copper-cable-type\n"); |
8130 | DP(NETIF_MSG_LINK, | 8149 | } |
8131 | "Unknown copper-cable-type 0x%x !!!\n", | ||
8132 | copper_module_type); | ||
8133 | return -EINVAL; | ||
8134 | } | 8150 | } |
8135 | break; | 8151 | break; |
8136 | } | 8152 | } |
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | |||
10825 | (1<<11)); | 10841 | (1<<11)); |
10826 | 10842 | ||
10827 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 10843 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && |
10828 | (phy->speed_cap_mask & | 10844 | (phy->speed_cap_mask & |
10829 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || | 10845 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || |
10830 | (phy->req_line_speed == SPEED_1000)) { | 10846 | (phy->req_line_speed == SPEED_1000)) { |
10831 | an_1000_val |= (1<<8); | 10847 | an_1000_val |= (1<<8); |
10832 | autoneg_val |= (1<<9 | 1<<12); | 10848 | autoneg_val |= (1<<9 | 1<<12); |
10833 | if (phy->req_duplex == DUPLEX_FULL) | 10849 | if (phy->req_duplex == DUPLEX_FULL) |
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | |||
10843 | 0x09, | 10859 | 0x09, |
10844 | &an_1000_val); | 10860 | &an_1000_val); |
10845 | 10861 | ||
10846 | /* Set 100 speed advertisement */ | 10862 | /* Advertise 10/100 link speed */ |
10847 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 10863 | if (phy->req_line_speed == SPEED_AUTO_NEG) { |
10848 | (phy->speed_cap_mask & | 10864 | if (phy->speed_cap_mask & |
10849 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | 10865 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { |
10850 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { | 10866 | an_10_100_val |= (1<<5); |
10851 | an_10_100_val |= (1<<7); | 10867 | autoneg_val |= (1<<9 | 1<<12); |
10852 | /* Enable autoneg and restart autoneg for legacy speeds */ | 10868 | DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); |
10853 | autoneg_val |= (1<<9 | 1<<12); | 10869 | } |
10854 | 10870 | if (phy->speed_cap_mask & | |
10855 | if (phy->req_duplex == DUPLEX_FULL) | 10871 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { |
10856 | an_10_100_val |= (1<<8); | ||
10857 | DP(NETIF_MSG_LINK, "Advertising 100M\n"); | ||
10858 | } | ||
10859 | |||
10860 | /* Set 10 speed advertisement */ | ||
10861 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | ||
10862 | (phy->speed_cap_mask & | ||
10863 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | ||
10864 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { | ||
10865 | an_10_100_val |= (1<<5); | ||
10866 | autoneg_val |= (1<<9 | 1<<12); | ||
10867 | if (phy->req_duplex == DUPLEX_FULL) | ||
10868 | an_10_100_val |= (1<<6); | 10872 | an_10_100_val |= (1<<6); |
10869 | DP(NETIF_MSG_LINK, "Advertising 10M\n"); | 10873 | autoneg_val |= (1<<9 | 1<<12); |
10874 | DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); | ||
10875 | } | ||
10876 | if (phy->speed_cap_mask & | ||
10877 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { | ||
10878 | an_10_100_val |= (1<<7); | ||
10879 | autoneg_val |= (1<<9 | 1<<12); | ||
10880 | DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); | ||
10881 | } | ||
10882 | if (phy->speed_cap_mask & | ||
10883 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { | ||
10884 | an_10_100_val |= (1<<8); | ||
10885 | autoneg_val |= (1<<9 | 1<<12); | ||
10886 | DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); | ||
10887 | } | ||
10870 | } | 10888 | } |
10871 | 10889 | ||
10872 | /* Only 10/100 are allowed to work in FORCE mode */ | 10890 | /* Only 10/100 are allowed to work in FORCE mode */ |
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, | |||
13342 | DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, | 13360 | DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, |
13343 | old_status, status); | 13361 | old_status, status); |
13344 | 13362 | ||
13363 | /* Do not touch the link in case physical link down */ | ||
13364 | if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) | ||
13365 | return 1; | ||
13366 | |||
13345 | /* a. Update shmem->link_status accordingly | 13367 | /* a. Update shmem->link_status accordingly |
13346 | * b. Update link_vars->link_up | 13368 | * b. Update link_vars->link_up |
13347 | */ | 13369 | */ |
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13550 | */ | 13572 | */ |
13551 | not_kr2_device = (((base_page & 0x8000) == 0) || | 13573 | not_kr2_device = (((base_page & 0x8000) == 0) || |
13552 | (((base_page & 0x8000) && | 13574 | (((base_page & 0x8000) && |
13553 | ((next_page & 0xe0) == 0x2)))); | 13575 | ((next_page & 0xe0) == 0x20)))); |
13554 | 13576 | ||
13555 | /* In case KR2 is already disabled, check if we need to re-enable it */ | 13577 | /* In case KR2 is already disabled, check if we need to re-enable it */ |
13556 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13578 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 814d0eca9b33..8b3107b2fcc1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) | |||
11447 | } | 11447 | } |
11448 | } | 11448 | } |
11449 | 11449 | ||
11450 | /* adjust igu_sb_cnt to MF for E1x */ | 11450 | /* adjust igu_sb_cnt to MF for E1H */ |
11451 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) | 11451 | if (CHIP_IS_E1H(bp) && IS_MF(bp)) |
11452 | bp->igu_sb_cnt /= E1HVN_MAX; | 11452 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); |
11453 | 11453 | ||
11454 | /* port info */ | 11454 | /* port info */ |
11455 | bnx2x_get_port_hwinfo(bp); | 11455 | bnx2x_get_port_hwinfo(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 3efbb35267c8..14ffb6e56e59 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/ | |||
7179 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca | 7179 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca |
7180 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da | 7180 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da |
7181 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea | 7181 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea |
7182 | #define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa | ||
7182 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 | 7183 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 |
7183 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 | 7184 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 |
7184 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 | 7185 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32c92abf5094..18438a504d57 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | |||
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
2038 | struct bnx2x_vlan_mac_ramrod_params p; | 2038 | struct bnx2x_vlan_mac_ramrod_params p; |
2039 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | 2039 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; |
2040 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; | 2040 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; |
2041 | unsigned long flags; | ||
2041 | int read_lock; | 2042 | int read_lock; |
2042 | int rc = 0; | 2043 | int rc = 0; |
2043 | 2044 | ||
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
2046 | spin_lock_bh(&exeq->lock); | 2047 | spin_lock_bh(&exeq->lock); |
2047 | 2048 | ||
2048 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { | 2049 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { |
2049 | if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == | 2050 | flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; |
2050 | *vlan_mac_flags) { | 2051 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == |
2052 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { | ||
2051 | rc = exeq->remove(bp, exeq->owner, exeq_pos); | 2053 | rc = exeq->remove(bp, exeq->owner, exeq_pos); |
2052 | if (rc) { | 2054 | if (rc) { |
2053 | BNX2X_ERR("Failed to remove command\n"); | 2055 | BNX2X_ERR("Failed to remove command\n"); |
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | |||
2080 | return read_lock; | 2082 | return read_lock; |
2081 | 2083 | ||
2082 | list_for_each_entry(pos, &o->head, link) { | 2084 | list_for_each_entry(pos, &o->head, link) { |
2083 | if (pos->vlan_mac_flags == *vlan_mac_flags) { | 2085 | flags = pos->vlan_mac_flags; |
2086 | if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == | ||
2087 | BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { | ||
2084 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; | 2088 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; |
2085 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); | 2089 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); |
2086 | rc = bnx2x_config_vlan_mac(bp, &p); | 2090 | rc = bnx2x_config_vlan_mac(bp, &p); |
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp, | |||
4382 | struct bnx2x_raw_obj *r = &o->raw; | 4386 | struct bnx2x_raw_obj *r = &o->raw; |
4383 | 4387 | ||
4384 | /* Do nothing if only driver cleanup was requested */ | 4388 | /* Do nothing if only driver cleanup was requested */ |
4385 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) | 4389 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { |
4390 | DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", | ||
4391 | p->ramrod_flags); | ||
4386 | return 0; | 4392 | return 0; |
4393 | } | ||
4387 | 4394 | ||
4388 | r->set_pending(r); | 4395 | r->set_pending(r); |
4389 | 4396 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf9..6a53c15c85a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | |||
@@ -266,6 +266,13 @@ enum { | |||
266 | BNX2X_DONT_CONSUME_CAM_CREDIT, | 266 | BNX2X_DONT_CONSUME_CAM_CREDIT, |
267 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | 267 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, |
268 | }; | 268 | }; |
269 | /* When looking for matching filters, some flags are not interesting */ | ||
270 | #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ | ||
271 | 1 << BNX2X_ETH_MAC | \ | ||
272 | 1 << BNX2X_ISCSI_ETH_MAC | \ | ||
273 | 1 << BNX2X_NETQ_ETH_MAC) | ||
274 | #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ | ||
275 | ((flags) & BNX2X_VLAN_MAC_CMP_MASK) | ||
269 | 276 | ||
270 | struct bnx2x_vlan_mac_ramrod_params { | 277 | struct bnx2x_vlan_mac_ramrod_params { |
271 | /* Object to run the command from */ | 278 | /* Object to run the command from */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 0216d592d0ce..e7845e5be1c7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1209 | /* next state */ | 1209 | /* next state */ |
1210 | vfop->state = BNX2X_VFOP_RXMODE_DONE; | 1210 | vfop->state = BNX2X_VFOP_RXMODE_DONE; |
1211 | 1211 | ||
1212 | /* record the accept flags in vfdb so hypervisor can modify them | ||
1213 | * if necessary | ||
1214 | */ | ||
1215 | bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = | ||
1216 | ramrod->rx_accept_flags; | ||
1212 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); | 1217 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); |
1213 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 1218 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
1214 | op_err: | 1219 | op_err: |
@@ -1224,39 +1229,43 @@ op_pending: | |||
1224 | return; | 1229 | return; |
1225 | } | 1230 | } |
1226 | 1231 | ||
1232 | static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, | ||
1233 | struct bnx2x_rx_mode_ramrod_params *ramrod, | ||
1234 | struct bnx2x_virtf *vf, | ||
1235 | unsigned long accept_flags) | ||
1236 | { | ||
1237 | struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); | ||
1238 | |||
1239 | memset(ramrod, 0, sizeof(*ramrod)); | ||
1240 | ramrod->cid = vfq->cid; | ||
1241 | ramrod->cl_id = vfq_cl_id(vf, vfq); | ||
1242 | ramrod->rx_mode_obj = &bp->rx_mode_obj; | ||
1243 | ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); | ||
1244 | ramrod->rx_accept_flags = accept_flags; | ||
1245 | ramrod->tx_accept_flags = accept_flags; | ||
1246 | ramrod->pstate = &vf->filter_state; | ||
1247 | ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; | ||
1248 | |||
1249 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
1250 | set_bit(RAMROD_RX, &ramrod->ramrod_flags); | ||
1251 | set_bit(RAMROD_TX, &ramrod->ramrod_flags); | ||
1252 | |||
1253 | ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); | ||
1254 | ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | ||
1255 | } | ||
1256 | |||
1227 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, | 1257 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, |
1228 | struct bnx2x_virtf *vf, | 1258 | struct bnx2x_virtf *vf, |
1229 | struct bnx2x_vfop_cmd *cmd, | 1259 | struct bnx2x_vfop_cmd *cmd, |
1230 | int qid, unsigned long accept_flags) | 1260 | int qid, unsigned long accept_flags) |
1231 | { | 1261 | { |
1232 | struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); | ||
1233 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 1262 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); |
1234 | 1263 | ||
1235 | if (vfop) { | 1264 | if (vfop) { |
1236 | struct bnx2x_rx_mode_ramrod_params *ramrod = | 1265 | struct bnx2x_rx_mode_ramrod_params *ramrod = |
1237 | &vf->op_params.rx_mode; | 1266 | &vf->op_params.rx_mode; |
1238 | 1267 | ||
1239 | memset(ramrod, 0, sizeof(*ramrod)); | 1268 | bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); |
1240 | |||
1241 | /* Prepare ramrod parameters */ | ||
1242 | ramrod->cid = vfq->cid; | ||
1243 | ramrod->cl_id = vfq_cl_id(vf, vfq); | ||
1244 | ramrod->rx_mode_obj = &bp->rx_mode_obj; | ||
1245 | ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); | ||
1246 | |||
1247 | ramrod->rx_accept_flags = accept_flags; | ||
1248 | ramrod->tx_accept_flags = accept_flags; | ||
1249 | ramrod->pstate = &vf->filter_state; | ||
1250 | ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; | ||
1251 | |||
1252 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
1253 | set_bit(RAMROD_RX, &ramrod->ramrod_flags); | ||
1254 | set_bit(RAMROD_TX, &ramrod->ramrod_flags); | ||
1255 | |||
1256 | ramrod->rdata = | ||
1257 | bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); | ||
1258 | ramrod->rdata_mapping = | ||
1259 | bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | ||
1260 | 1269 | ||
1261 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, | 1270 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, |
1262 | bnx2x_vfop_rxmode, cmd->done); | 1271 | bnx2x_vfop_rxmode, cmd->done); |
@@ -3114,6 +3123,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) | |||
3114 | { | 3123 | { |
3115 | struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); | 3124 | struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); |
3116 | 3125 | ||
3126 | if (!IS_SRIOV(bp)) { | ||
3127 | BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); | ||
3128 | return -EINVAL; | ||
3129 | } | ||
3130 | |||
3117 | DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", | 3131 | DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", |
3118 | num_vfs_param, BNX2X_NR_VIRTFN(bp)); | 3132 | num_vfs_param, BNX2X_NR_VIRTFN(bp)); |
3119 | 3133 | ||
@@ -3197,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
3197 | bnx2x_iov_static_resc(bp, vf); | 3211 | bnx2x_iov_static_resc(bp, vf); |
3198 | } | 3212 | } |
3199 | 3213 | ||
3200 | /* prepare msix vectors in VF configuration space */ | 3214 | /* prepare msix vectors in VF configuration space - the value in the |
3215 | * PCI configuration space should be the index of the last entry, | ||
3216 | * namely one less than the actual size of the table | ||
3217 | */ | ||
3201 | for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { | 3218 | for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { |
3202 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); | 3219 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); |
3203 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, | 3220 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, |
3204 | num_vf_queues); | 3221 | num_vf_queues - 1); |
3205 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", | 3222 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", |
3206 | vf_idx, num_vf_queues); | 3223 | vf_idx, num_vf_queues - 1); |
3207 | } | 3224 | } |
3208 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 3225 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
3209 | 3226 | ||
@@ -3431,10 +3448,18 @@ out: | |||
3431 | 3448 | ||
3432 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | 3449 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) |
3433 | { | 3450 | { |
3451 | struct bnx2x_queue_state_params q_params = {NULL}; | ||
3452 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | ||
3453 | struct bnx2x_queue_update_params *update_params; | ||
3454 | struct pf_vf_bulletin_content *bulletin = NULL; | ||
3455 | struct bnx2x_rx_mode_ramrod_params rx_ramrod; | ||
3434 | struct bnx2x *bp = netdev_priv(dev); | 3456 | struct bnx2x *bp = netdev_priv(dev); |
3435 | int rc, q_logical_state; | 3457 | struct bnx2x_vlan_mac_obj *vlan_obj; |
3458 | unsigned long vlan_mac_flags = 0; | ||
3459 | unsigned long ramrod_flags = 0; | ||
3436 | struct bnx2x_virtf *vf = NULL; | 3460 | struct bnx2x_virtf *vf = NULL; |
3437 | struct pf_vf_bulletin_content *bulletin = NULL; | 3461 | unsigned long accept_flags; |
3462 | int rc; | ||
3438 | 3463 | ||
3439 | /* sanity and init */ | 3464 | /* sanity and init */ |
3440 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); | 3465 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
@@ -3452,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
3452 | /* update PF's copy of the VF's bulletin. No point in posting the vlan | 3477 | /* update PF's copy of the VF's bulletin. No point in posting the vlan |
3453 | * to the VF since it doesn't have anything to do with it. But it useful | 3478 | * to the VF since it doesn't have anything to do with it. But it useful |
3454 | * to store it here in case the VF is not up yet and we can only | 3479 | * to store it here in case the VF is not up yet and we can only |
3455 | * configure the vlan later when it does. | 3480 | * configure the vlan later when it does. Treat vlan id 0 as remove the |
3481 | * Host tag. | ||
3456 | */ | 3482 | */ |
3457 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | 3483 | if (vlan > 0) |
3484 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | ||
3485 | else | ||
3486 | bulletin->valid_bitmap &= ~(1 << VLAN_VALID); | ||
3458 | bulletin->vlan = vlan; | 3487 | bulletin->vlan = vlan; |
3459 | 3488 | ||
3460 | /* is vf initialized and queue set up? */ | 3489 | /* is vf initialized and queue set up? */ |
3461 | q_logical_state = | 3490 | if (vf->state != VF_ENABLED || |
3462 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); | 3491 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != |
3463 | if (vf->state == VF_ENABLED && | 3492 | BNX2X_Q_LOGICAL_STATE_ACTIVE) |
3464 | q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { | 3493 | return rc; |
3465 | /* configure the vlan in device on this vf's queue */ | ||
3466 | unsigned long ramrod_flags = 0; | ||
3467 | unsigned long vlan_mac_flags = 0; | ||
3468 | struct bnx2x_vlan_mac_obj *vlan_obj = | ||
3469 | &bnx2x_leading_vfq(vf, vlan_obj); | ||
3470 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | ||
3471 | struct bnx2x_queue_state_params q_params = {NULL}; | ||
3472 | struct bnx2x_queue_update_params *update_params; | ||
3473 | 3494 | ||
3474 | rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); | 3495 | /* configure the vlan in device on this vf's queue */ |
3475 | if (rc) | 3496 | vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); |
3476 | return rc; | 3497 | rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); |
3477 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | 3498 | if (rc) |
3499 | return rc; | ||
3478 | 3500 | ||
3479 | /* must lock vfpf channel to protect against vf flows */ | 3501 | /* must lock vfpf channel to protect against vf flows */ |
3480 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); | 3502 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); |
3481 | 3503 | ||
3482 | /* remove existing vlans */ | 3504 | /* remove existing vlans */ |
3483 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | 3505 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
3484 | rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, | 3506 | rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, |
3485 | &ramrod_flags); | 3507 | &ramrod_flags); |
3486 | if (rc) { | 3508 | if (rc) { |
3487 | BNX2X_ERR("failed to delete vlans\n"); | 3509 | BNX2X_ERR("failed to delete vlans\n"); |
3488 | rc = -EINVAL; | 3510 | rc = -EINVAL; |
3489 | goto out; | 3511 | goto out; |
3490 | } | 3512 | } |
3513 | |||
3514 | /* need to remove/add the VF's accept_any_vlan bit */ | ||
3515 | accept_flags = bnx2x_leading_vfq(vf, accept_flags); | ||
3516 | if (vlan) | ||
3517 | clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
3518 | else | ||
3519 | set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
3520 | |||
3521 | bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, | ||
3522 | accept_flags); | ||
3523 | bnx2x_leading_vfq(vf, accept_flags) = accept_flags; | ||
3524 | bnx2x_config_rx_mode(bp, &rx_ramrod); | ||
3525 | |||
3526 | /* configure the new vlan to device */ | ||
3527 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | ||
3528 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
3529 | ramrod_param.vlan_mac_obj = vlan_obj; | ||
3530 | ramrod_param.ramrod_flags = ramrod_flags; | ||
3531 | set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
3532 | &ramrod_param.user_req.vlan_mac_flags); | ||
3533 | ramrod_param.user_req.u.vlan.vlan = vlan; | ||
3534 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
3535 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); | ||
3536 | if (rc) { | ||
3537 | BNX2X_ERR("failed to configure vlan\n"); | ||
3538 | rc = -EINVAL; | ||
3539 | goto out; | ||
3540 | } | ||
3491 | 3541 | ||
3492 | /* send queue update ramrod to configure default vlan and silent | 3542 | /* send queue update ramrod to configure default vlan and silent |
3493 | * vlan removal | 3543 | * vlan removal |
3544 | */ | ||
3545 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | ||
3546 | q_params.cmd = BNX2X_Q_CMD_UPDATE; | ||
3547 | q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); | ||
3548 | update_params = &q_params.params.update; | ||
3549 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
3550 | &update_params->update_flags); | ||
3551 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
3552 | &update_params->update_flags); | ||
3553 | if (vlan == 0) { | ||
3554 | /* if vlan is 0 then we want to leave the VF traffic | ||
3555 | * untagged, and leave the incoming traffic untouched | ||
3556 | * (i.e. do not remove any vlan tags). | ||
3557 | */ | ||
3558 | __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
3559 | &update_params->update_flags); | ||
3560 | __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
3561 | &update_params->update_flags); | ||
3562 | } else { | ||
3563 | /* configure default vlan to vf queue and set silent | ||
3564 | * vlan removal (the vf remains unaware of this vlan). | ||
3494 | */ | 3565 | */ |
3495 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | 3566 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, |
3496 | q_params.cmd = BNX2X_Q_CMD_UPDATE; | ||
3497 | q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); | ||
3498 | update_params = &q_params.params.update; | ||
3499 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
3500 | &update_params->update_flags); | 3567 | &update_params->update_flags); |
3501 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | 3568 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, |
3502 | &update_params->update_flags); | 3569 | &update_params->update_flags); |
3570 | update_params->def_vlan = vlan; | ||
3571 | update_params->silent_removal_value = | ||
3572 | vlan & VLAN_VID_MASK; | ||
3573 | update_params->silent_removal_mask = VLAN_VID_MASK; | ||
3574 | } | ||
3503 | 3575 | ||
3504 | if (vlan == 0) { | 3576 | /* Update the Queue state */ |
3505 | /* if vlan is 0 then we want to leave the VF traffic | 3577 | rc = bnx2x_queue_state_change(bp, &q_params); |
3506 | * untagged, and leave the incoming traffic untouched | 3578 | if (rc) { |
3507 | * (i.e. do not remove any vlan tags). | 3579 | BNX2X_ERR("Failed to configure default VLAN\n"); |
3508 | */ | 3580 | goto out; |
3509 | __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | 3581 | } |
3510 | &update_params->update_flags); | ||
3511 | __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
3512 | &update_params->update_flags); | ||
3513 | } else { | ||
3514 | /* configure the new vlan to device */ | ||
3515 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
3516 | ramrod_param.vlan_mac_obj = vlan_obj; | ||
3517 | ramrod_param.ramrod_flags = ramrod_flags; | ||
3518 | ramrod_param.user_req.u.vlan.vlan = vlan; | ||
3519 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
3520 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); | ||
3521 | if (rc) { | ||
3522 | BNX2X_ERR("failed to configure vlan\n"); | ||
3523 | rc = -EINVAL; | ||
3524 | goto out; | ||
3525 | } | ||
3526 | |||
3527 | /* configure default vlan to vf queue and set silent | ||
3528 | * vlan removal (the vf remains unaware of this vlan). | ||
3529 | */ | ||
3530 | update_params = &q_params.params.update; | ||
3531 | __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
3532 | &update_params->update_flags); | ||
3533 | __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, | ||
3534 | &update_params->update_flags); | ||
3535 | update_params->def_vlan = vlan; | ||
3536 | } | ||
3537 | 3582 | ||
3538 | /* Update the Queue state */ | ||
3539 | rc = bnx2x_queue_state_change(bp, &q_params); | ||
3540 | if (rc) { | ||
3541 | BNX2X_ERR("Failed to configure default VLAN\n"); | ||
3542 | goto out; | ||
3543 | } | ||
3544 | 3583 | ||
3545 | /* clear the flag indicating that this VF needs its vlan | 3584 | /* clear the flag indicating that this VF needs its vlan |
3546 | * (will only be set if the HV configured the Vlan before vf was | 3585 | * (will only be set if the HV configured the Vlan before vf was |
3547 | * up and we were called because the VF came up later | 3586 | * up and we were called because the VF came up later |
3548 | */ | 3587 | */ |
3549 | out: | 3588 | out: |
3550 | vf->cfg_flags &= ~VF_CFG_VLAN; | 3589 | vf->cfg_flags &= ~VF_CFG_VLAN; |
3551 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); | 3590 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); |
3552 | } | 3591 | |
3553 | return rc; | 3592 | return rc; |
3554 | } | 3593 | } |
3555 | 3594 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 1ff6a9366629..8c213fa52174 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue { | |||
74 | /* VLANs object */ | 74 | /* VLANs object */ |
75 | struct bnx2x_vlan_mac_obj vlan_obj; | 75 | struct bnx2x_vlan_mac_obj vlan_obj; |
76 | atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ | 76 | atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ |
77 | unsigned long accept_flags; /* last accept flags configured */ | ||
77 | 78 | ||
78 | /* Queue Slow-path State object */ | 79 | /* Queue Slow-path State object */ |
79 | struct bnx2x_queue_sp_obj sp_obj; | 80 | struct bnx2x_queue_sp_obj sp_obj; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index efa8a151d789..0756d7dabdd5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) | |||
208 | return -EINVAL; | 208 | return -EINVAL; |
209 | } | 209 | } |
210 | 210 | ||
211 | BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); | 211 | DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); |
212 | 212 | ||
213 | *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; | 213 | *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; |
214 | 214 | ||
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1598 | 1598 | ||
1599 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { | 1599 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { |
1600 | unsigned long accept = 0; | 1600 | unsigned long accept = 0; |
1601 | struct pf_vf_bulletin_content *bulletin = | ||
1602 | BP_VF_BULLETIN(bp, vf->index); | ||
1601 | 1603 | ||
1602 | /* covert VF-PF if mask to bnx2x accept flags */ | 1604 | /* covert VF-PF if mask to bnx2x accept flags */ |
1603 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) | 1605 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) |
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1617 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); | 1619 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); |
1618 | 1620 | ||
1619 | /* A packet arriving the vf's mac should be accepted | 1621 | /* A packet arriving the vf's mac should be accepted |
1620 | * with any vlan | 1622 | * with any vlan, unless a vlan has already been |
1623 | * configured. | ||
1621 | */ | 1624 | */ |
1622 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | 1625 | if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) |
1626 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | ||
1623 | 1627 | ||
1624 | /* set rx-mode */ | 1628 | /* set rx-mode */ |
1625 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, | 1629 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, |
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, | |||
1710 | goto response; | 1714 | goto response; |
1711 | } | 1715 | } |
1712 | } | 1716 | } |
1717 | /* if vlan was set by hypervisor we don't allow guest to config vlan */ | ||
1718 | if (bulletin->valid_bitmap & 1 << VLAN_VALID) { | ||
1719 | int i; | ||
1720 | |||
1721 | /* search for vlan filters */ | ||
1722 | for (i = 0; i < filters->n_mac_vlan_filters; i++) { | ||
1723 | if (filters->filters[i].flags & | ||
1724 | VFPF_Q_FILTER_VLAN_TAG_VALID) { | ||
1725 | BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", | ||
1726 | vf->abs_vfid); | ||
1727 | vf->op_rc = -EPERM; | ||
1728 | goto response; | ||
1729 | } | ||
1730 | } | ||
1731 | } | ||
1713 | 1732 | ||
1714 | /* verify vf_qid */ | 1733 | /* verify vf_qid */ |
1715 | if (filters->vf_qid > vf_rxq_count(vf)) | 1734 | if (filters->vf_qid > vf_rxq_count(vf)) |
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1805 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; | 1824 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; |
1806 | 1825 | ||
1807 | /* flags handled individually for backward/forward compatability */ | 1826 | /* flags handled individually for backward/forward compatability */ |
1827 | vf_op_params->rss_flags = 0; | ||
1828 | vf_op_params->ramrod_flags = 0; | ||
1829 | |||
1808 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) | 1830 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) |
1809 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); | 1831 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); |
1810 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) | 1832 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 369b736dde05..15a66e4b1f57 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | |||
7622 | { | 7622 | { |
7623 | u32 base = (u32) mapping & 0xffffffff; | 7623 | u32 base = (u32) mapping & 0xffffffff; |
7624 | 7624 | ||
7625 | return (base > 0xffffdcc0) && (base + len + 8 < base); | 7625 | return base + len + 8 < base; |
7626 | } | 7626 | } |
7627 | 7627 | ||
7628 | /* Test for TSO DMA buffers that cross into regions which are within MSS bytes | 7628 | /* Test for TSO DMA buffers that cross into regions which are within MSS bytes |
@@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
8932 | void (*write_op)(struct tg3 *, u32, u32); | 8932 | void (*write_op)(struct tg3 *, u32, u32); |
8933 | int i, err; | 8933 | int i, err; |
8934 | 8934 | ||
8935 | if (!pci_device_is_present(tp->pdev)) | ||
8936 | return -ENODEV; | ||
8937 | |||
8935 | tg3_nvram_lock(tp); | 8938 | tg3_nvram_lock(tp); |
8936 | 8939 | ||
8937 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); | 8940 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); |
@@ -11581,10 +11584,11 @@ static int tg3_close(struct net_device *dev) | |||
11581 | memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); | 11584 | memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); |
11582 | memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); | 11585 | memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); |
11583 | 11586 | ||
11584 | tg3_power_down_prepare(tp); | 11587 | if (pci_device_is_present(tp->pdev)) { |
11585 | 11588 | tg3_power_down_prepare(tp); | |
11586 | tg3_carrier_off(tp); | ||
11587 | 11589 | ||
11590 | tg3_carrier_off(tp); | ||
11591 | } | ||
11588 | return 0; | 11592 | return 0; |
11589 | } | 11593 | } |
11590 | 11594 | ||
@@ -16499,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) | |||
16499 | /* Clear this out for sanity. */ | 16503 | /* Clear this out for sanity. */ |
16500 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 16504 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
16501 | 16505 | ||
16506 | /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ | ||
16507 | tw32(TG3PCI_REG_BASE_ADDR, 0); | ||
16508 | |||
16502 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 16509 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
16503 | &pci_state_reg); | 16510 | &pci_state_reg); |
16504 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && | 16511 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && |
@@ -17726,10 +17733,12 @@ static int tg3_suspend(struct device *device) | |||
17726 | struct pci_dev *pdev = to_pci_dev(device); | 17733 | struct pci_dev *pdev = to_pci_dev(device); |
17727 | struct net_device *dev = pci_get_drvdata(pdev); | 17734 | struct net_device *dev = pci_get_drvdata(pdev); |
17728 | struct tg3 *tp = netdev_priv(dev); | 17735 | struct tg3 *tp = netdev_priv(dev); |
17729 | int err; | 17736 | int err = 0; |
17737 | |||
17738 | rtnl_lock(); | ||
17730 | 17739 | ||
17731 | if (!netif_running(dev)) | 17740 | if (!netif_running(dev)) |
17732 | return 0; | 17741 | goto unlock; |
17733 | 17742 | ||
17734 | tg3_reset_task_cancel(tp); | 17743 | tg3_reset_task_cancel(tp); |
17735 | tg3_phy_stop(tp); | 17744 | tg3_phy_stop(tp); |
@@ -17771,6 +17780,8 @@ out: | |||
17771 | tg3_phy_start(tp); | 17780 | tg3_phy_start(tp); |
17772 | } | 17781 | } |
17773 | 17782 | ||
17783 | unlock: | ||
17784 | rtnl_unlock(); | ||
17774 | return err; | 17785 | return err; |
17775 | } | 17786 | } |
17776 | 17787 | ||
@@ -17779,10 +17790,12 @@ static int tg3_resume(struct device *device) | |||
17779 | struct pci_dev *pdev = to_pci_dev(device); | 17790 | struct pci_dev *pdev = to_pci_dev(device); |
17780 | struct net_device *dev = pci_get_drvdata(pdev); | 17791 | struct net_device *dev = pci_get_drvdata(pdev); |
17781 | struct tg3 *tp = netdev_priv(dev); | 17792 | struct tg3 *tp = netdev_priv(dev); |
17782 | int err; | 17793 | int err = 0; |
17794 | |||
17795 | rtnl_lock(); | ||
17783 | 17796 | ||
17784 | if (!netif_running(dev)) | 17797 | if (!netif_running(dev)) |
17785 | return 0; | 17798 | goto unlock; |
17786 | 17799 | ||
17787 | netif_device_attach(dev); | 17800 | netif_device_attach(dev); |
17788 | 17801 | ||
@@ -17806,6 +17819,8 @@ out: | |||
17806 | if (!err) | 17819 | if (!err) |
17807 | tg3_phy_start(tp); | 17820 | tg3_phy_start(tp); |
17808 | 17821 | ||
17822 | unlock: | ||
17823 | rtnl_unlock(); | ||
17809 | return err; | 17824 | return err; |
17810 | } | 17825 | } |
17811 | #endif /* CONFIG_PM_SLEEP */ | 17826 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ecd2fb3ef695..56e0415f8cdf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -49,13 +49,15 @@ | |||
49 | #include <asm/io.h> | 49 | #include <asm/io.h> |
50 | #include "cxgb4_uld.h" | 50 | #include "cxgb4_uld.h" |
51 | 51 | ||
52 | #define FW_VERSION_MAJOR 1 | 52 | #define T4FW_VERSION_MAJOR 0x01 |
53 | #define FW_VERSION_MINOR 4 | 53 | #define T4FW_VERSION_MINOR 0x06 |
54 | #define FW_VERSION_MICRO 0 | 54 | #define T4FW_VERSION_MICRO 0x18 |
55 | #define T4FW_VERSION_BUILD 0x00 | ||
55 | 56 | ||
56 | #define FW_VERSION_MAJOR_T5 0 | 57 | #define T5FW_VERSION_MAJOR 0x01 |
57 | #define FW_VERSION_MINOR_T5 0 | 58 | #define T5FW_VERSION_MINOR 0x08 |
58 | #define FW_VERSION_MICRO_T5 0 | 59 | #define T5FW_VERSION_MICRO 0x1C |
60 | #define T5FW_VERSION_BUILD 0x00 | ||
59 | 61 | ||
60 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) | 62 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) |
61 | 63 | ||
@@ -226,6 +228,25 @@ struct tp_params { | |||
226 | 228 | ||
227 | uint32_t dack_re; /* DACK timer resolution */ | 229 | uint32_t dack_re; /* DACK timer resolution */ |
228 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ | 230 | unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ |
231 | |||
232 | u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ | ||
233 | u32 ingress_config; /* cached TP_INGRESS_CONFIG */ | ||
234 | |||
235 | /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a | ||
236 | * subset of the set of fields which may be present in the Compressed | ||
237 | * Filter Tuple portion of filters and TCP TCB connections. The | ||
238 | * fields which are present are controlled by the TP_VLAN_PRI_MAP. | ||
239 | * Since a variable number of fields may or may not be present, their | ||
240 | * shifted field positions within the Compressed Filter Tuple may | ||
241 | * vary, or not even be present if the field isn't selected in | ||
242 | * TP_VLAN_PRI_MAP. Since some of these fields are needed in various | ||
243 | * places we store their offsets here, or a -1 if the field isn't | ||
244 | * present. | ||
245 | */ | ||
246 | int vlan_shift; | ||
247 | int vnic_shift; | ||
248 | int port_shift; | ||
249 | int protocol_shift; | ||
229 | }; | 250 | }; |
230 | 251 | ||
231 | struct vpd_params { | 252 | struct vpd_params { |
@@ -240,6 +261,26 @@ struct pci_params { | |||
240 | unsigned char width; | 261 | unsigned char width; |
241 | }; | 262 | }; |
242 | 263 | ||
264 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) | ||
265 | #define CHELSIO_CHIP_FPGA 0x100 | ||
266 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) | ||
267 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | ||
268 | |||
269 | #define CHELSIO_T4 0x4 | ||
270 | #define CHELSIO_T5 0x5 | ||
271 | |||
272 | enum chip_type { | ||
273 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | ||
274 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | ||
275 | T4_FIRST_REV = T4_A1, | ||
276 | T4_LAST_REV = T4_A2, | ||
277 | |||
278 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | ||
279 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), | ||
280 | T5_FIRST_REV = T5_A0, | ||
281 | T5_LAST_REV = T5_A1, | ||
282 | }; | ||
283 | |||
243 | struct adapter_params { | 284 | struct adapter_params { |
244 | struct tp_params tp; | 285 | struct tp_params tp; |
245 | struct vpd_params vpd; | 286 | struct vpd_params vpd; |
@@ -259,7 +300,7 @@ struct adapter_params { | |||
259 | 300 | ||
260 | unsigned char nports; /* # of ethernet ports */ | 301 | unsigned char nports; /* # of ethernet ports */ |
261 | unsigned char portvec; | 302 | unsigned char portvec; |
262 | unsigned char rev; /* chip revision */ | 303 | enum chip_type chip; /* chip code */ |
263 | unsigned char offload; | 304 | unsigned char offload; |
264 | 305 | ||
265 | unsigned char bypass; | 306 | unsigned char bypass; |
@@ -267,6 +308,23 @@ struct adapter_params { | |||
267 | unsigned int ofldq_wr_cred; | 308 | unsigned int ofldq_wr_cred; |
268 | }; | 309 | }; |
269 | 310 | ||
311 | #include "t4fw_api.h" | ||
312 | |||
313 | #define FW_VERSION(chip) ( \ | ||
314 | FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ | ||
315 | FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ | ||
316 | FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ | ||
317 | FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) | ||
318 | #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) | ||
319 | |||
320 | struct fw_info { | ||
321 | u8 chip; | ||
322 | char *fs_name; | ||
323 | char *fw_mod_name; | ||
324 | struct fw_hdr fw_hdr; | ||
325 | }; | ||
326 | |||
327 | |||
270 | struct trace_params { | 328 | struct trace_params { |
271 | u32 data[TRACE_LEN / 4]; | 329 | u32 data[TRACE_LEN / 4]; |
272 | u32 mask[TRACE_LEN / 4]; | 330 | u32 mask[TRACE_LEN / 4]; |
@@ -512,25 +570,6 @@ struct sge { | |||
512 | 570 | ||
513 | struct l2t_data; | 571 | struct l2t_data; |
514 | 572 | ||
515 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) | ||
516 | #define CHELSIO_CHIP_VERSION(code) ((code) >> 4) | ||
517 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | ||
518 | |||
519 | #define CHELSIO_T4 0x4 | ||
520 | #define CHELSIO_T5 0x5 | ||
521 | |||
522 | enum chip_type { | ||
523 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), | ||
524 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | ||
525 | T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | ||
526 | T4_FIRST_REV = T4_A1, | ||
527 | T4_LAST_REV = T4_A3, | ||
528 | |||
529 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | ||
530 | T5_FIRST_REV = T5_A1, | ||
531 | T5_LAST_REV = T5_A1, | ||
532 | }; | ||
533 | |||
534 | #ifdef CONFIG_PCI_IOV | 573 | #ifdef CONFIG_PCI_IOV |
535 | 574 | ||
536 | /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial | 575 | /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial |
@@ -715,12 +754,12 @@ enum { | |||
715 | 754 | ||
716 | static inline int is_t5(enum chip_type chip) | 755 | static inline int is_t5(enum chip_type chip) |
717 | { | 756 | { |
718 | return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); | 757 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; |
719 | } | 758 | } |
720 | 759 | ||
721 | static inline int is_t4(enum chip_type chip) | 760 | static inline int is_t4(enum chip_type chip) |
722 | { | 761 | { |
723 | return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); | 762 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; |
724 | } | 763 | } |
725 | 764 | ||
726 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) | 765 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) |
@@ -900,8 +939,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p); | |||
900 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); | 939 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); |
901 | unsigned int t4_flash_cfg_addr(struct adapter *adapter); | 940 | unsigned int t4_flash_cfg_addr(struct adapter *adapter); |
902 | int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); | 941 | int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); |
903 | int t4_check_fw_version(struct adapter *adapter); | 942 | int t4_get_fw_version(struct adapter *adapter, u32 *vers); |
943 | int t4_get_tp_version(struct adapter *adapter, u32 *vers); | ||
944 | int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | ||
945 | const u8 *fw_data, unsigned int fw_size, | ||
946 | struct fw_hdr *card_fw, enum dev_state state, int *reset); | ||
904 | int t4_prep_adapter(struct adapter *adapter); | 947 | int t4_prep_adapter(struct adapter *adapter); |
948 | int t4_init_tp_params(struct adapter *adap); | ||
949 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | ||
905 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); | 950 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
906 | void t4_fatal_err(struct adapter *adapter); | 951 | void t4_fatal_err(struct adapter *adapter); |
907 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | 952 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8b929eeecd2d..fff02ed1295e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { | |||
276 | { 0, } | 276 | { 0, } |
277 | }; | 277 | }; |
278 | 278 | ||
279 | #define FW_FNAME "cxgb4/t4fw.bin" | 279 | #define FW4_FNAME "cxgb4/t4fw.bin" |
280 | #define FW5_FNAME "cxgb4/t5fw.bin" | 280 | #define FW5_FNAME "cxgb4/t5fw.bin" |
281 | #define FW_CFNAME "cxgb4/t4-config.txt" | 281 | #define FW4_CFNAME "cxgb4/t4-config.txt" |
282 | #define FW5_CFNAME "cxgb4/t5-config.txt" | 282 | #define FW5_CFNAME "cxgb4/t5-config.txt" |
283 | 283 | ||
284 | MODULE_DESCRIPTION(DRV_DESC); | 284 | MODULE_DESCRIPTION(DRV_DESC); |
@@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications"); | |||
286 | MODULE_LICENSE("Dual BSD/GPL"); | 286 | MODULE_LICENSE("Dual BSD/GPL"); |
287 | MODULE_VERSION(DRV_VERSION); | 287 | MODULE_VERSION(DRV_VERSION); |
288 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); | 288 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); |
289 | MODULE_FIRMWARE(FW_FNAME); | 289 | MODULE_FIRMWARE(FW4_FNAME); |
290 | MODULE_FIRMWARE(FW5_FNAME); | 290 | MODULE_FIRMWARE(FW5_FNAME); |
291 | 291 | ||
292 | /* | 292 | /* |
@@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap); | |||
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | /* | 1073 | /* |
1074 | * Returns 0 if new FW was successfully loaded, a positive errno if a load was | ||
1075 | * started but failed, and a negative errno if flash load couldn't start. | ||
1076 | */ | ||
1077 | static int upgrade_fw(struct adapter *adap) | ||
1078 | { | ||
1079 | int ret; | ||
1080 | u32 vers, exp_major; | ||
1081 | const struct fw_hdr *hdr; | ||
1082 | const struct firmware *fw; | ||
1083 | struct device *dev = adap->pdev_dev; | ||
1084 | char *fw_file_name; | ||
1085 | |||
1086 | switch (CHELSIO_CHIP_VERSION(adap->chip)) { | ||
1087 | case CHELSIO_T4: | ||
1088 | fw_file_name = FW_FNAME; | ||
1089 | exp_major = FW_VERSION_MAJOR; | ||
1090 | break; | ||
1091 | case CHELSIO_T5: | ||
1092 | fw_file_name = FW5_FNAME; | ||
1093 | exp_major = FW_VERSION_MAJOR_T5; | ||
1094 | break; | ||
1095 | default: | ||
1096 | dev_err(dev, "Unsupported chip type, %x\n", adap->chip); | ||
1097 | return -EINVAL; | ||
1098 | } | ||
1099 | |||
1100 | ret = request_firmware(&fw, fw_file_name, dev); | ||
1101 | if (ret < 0) { | ||
1102 | dev_err(dev, "unable to load firmware image %s, error %d\n", | ||
1103 | fw_file_name, ret); | ||
1104 | return ret; | ||
1105 | } | ||
1106 | |||
1107 | hdr = (const struct fw_hdr *)fw->data; | ||
1108 | vers = ntohl(hdr->fw_ver); | ||
1109 | if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) { | ||
1110 | ret = -EINVAL; /* wrong major version, won't do */ | ||
1111 | goto out; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * If the flash FW is unusable or we found something newer, load it. | ||
1116 | */ | ||
1117 | if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major || | ||
1118 | vers > adap->params.fw_vers) { | ||
1119 | dev_info(dev, "upgrading firmware ...\n"); | ||
1120 | ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, | ||
1121 | /*force=*/false); | ||
1122 | if (!ret) | ||
1123 | dev_info(dev, | ||
1124 | "firmware upgraded to version %pI4 from %s\n", | ||
1125 | &hdr->fw_ver, fw_file_name); | ||
1126 | else | ||
1127 | dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); | ||
1128 | } else { | ||
1129 | /* | ||
1130 | * Tell our caller that we didn't upgrade the firmware. | ||
1131 | */ | ||
1132 | ret = -EINVAL; | ||
1133 | } | ||
1134 | |||
1135 | out: release_firmware(fw); | ||
1136 | return ret; | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. | 1074 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. |
1141 | * The allocated memory is cleared. | 1075 | * The allocated memory is cleared. |
1142 | */ | 1076 | */ |
@@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset) | |||
1415 | static int get_regs_len(struct net_device *dev) | 1349 | static int get_regs_len(struct net_device *dev) |
1416 | { | 1350 | { |
1417 | struct adapter *adap = netdev2adap(dev); | 1351 | struct adapter *adap = netdev2adap(dev); |
1418 | if (is_t4(adap->chip)) | 1352 | if (is_t4(adap->params.chip)) |
1419 | return T4_REGMAP_SIZE; | 1353 | return T4_REGMAP_SIZE; |
1420 | else | 1354 | else |
1421 | return T5_REGMAP_SIZE; | 1355 | return T5_REGMAP_SIZE; |
@@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
1499 | data += sizeof(struct port_stats) / sizeof(u64); | 1433 | data += sizeof(struct port_stats) / sizeof(u64); |
1500 | collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); | 1434 | collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); |
1501 | data += sizeof(struct queue_port_stats) / sizeof(u64); | 1435 | data += sizeof(struct queue_port_stats) / sizeof(u64); |
1502 | if (!is_t4(adapter->chip)) { | 1436 | if (!is_t4(adapter->params.chip)) { |
1503 | t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); | 1437 | t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); |
1504 | val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); | 1438 | val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); |
1505 | val2 = t4_read_reg(adapter, SGE_STAT_MATCH); | 1439 | val2 = t4_read_reg(adapter, SGE_STAT_MATCH); |
@@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
1521 | */ | 1455 | */ |
1522 | static inline unsigned int mk_adap_vers(const struct adapter *ap) | 1456 | static inline unsigned int mk_adap_vers(const struct adapter *ap) |
1523 | { | 1457 | { |
1524 | return CHELSIO_CHIP_VERSION(ap->chip) | | 1458 | return CHELSIO_CHIP_VERSION(ap->params.chip) | |
1525 | (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); | 1459 | (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); |
1526 | } | 1460 | } |
1527 | 1461 | ||
1528 | static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, | 1462 | static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, |
@@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
2189 | static const unsigned int *reg_ranges; | 2123 | static const unsigned int *reg_ranges; |
2190 | int arr_size = 0, buf_size = 0; | 2124 | int arr_size = 0, buf_size = 0; |
2191 | 2125 | ||
2192 | if (is_t4(ap->chip)) { | 2126 | if (is_t4(ap->params.chip)) { |
2193 | reg_ranges = &t4_reg_ranges[0]; | 2127 | reg_ranges = &t4_reg_ranges[0]; |
2194 | arr_size = ARRAY_SIZE(t4_reg_ranges); | 2128 | arr_size = ARRAY_SIZE(t4_reg_ranges); |
2195 | buf_size = T4_REGMAP_SIZE; | 2129 | buf_size = T4_REGMAP_SIZE; |
@@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap) | |||
2967 | size = t4_read_reg(adap, MA_EDRAM1_BAR); | 2901 | size = t4_read_reg(adap, MA_EDRAM1_BAR); |
2968 | add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); | 2902 | add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); |
2969 | } | 2903 | } |
2970 | if (is_t4(adap->chip)) { | 2904 | if (is_t4(adap->params.chip)) { |
2971 | size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); | 2905 | size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); |
2972 | if (i & EXT_MEM_ENABLE) | 2906 | if (i & EXT_MEM_ENABLE) |
2973 | add_debugfs_mem(adap, "mc", MEM_MC, | 2907 | add_debugfs_mem(adap, "mc", MEM_MC, |
@@ -3052,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) | |||
3052 | if (stid >= 0) { | 2986 | if (stid >= 0) { |
3053 | t->stid_tab[stid].data = data; | 2987 | t->stid_tab[stid].data = data; |
3054 | stid += t->stid_base; | 2988 | stid += t->stid_base; |
3055 | t->stids_in_use++; | 2989 | /* IPv6 requires max of 520 bits or 16 cells in TCAM |
2990 | * This is equivalent to 4 TIDs. With CLIP enabled it | ||
2991 | * needs 2 TIDs. | ||
2992 | */ | ||
2993 | if (family == PF_INET) | ||
2994 | t->stids_in_use++; | ||
2995 | else | ||
2996 | t->stids_in_use += 4; | ||
3056 | } | 2997 | } |
3057 | spin_unlock_bh(&t->stid_lock); | 2998 | spin_unlock_bh(&t->stid_lock); |
3058 | return stid; | 2999 | return stid; |
@@ -3078,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) | |||
3078 | } | 3019 | } |
3079 | if (stid >= 0) { | 3020 | if (stid >= 0) { |
3080 | t->stid_tab[stid].data = data; | 3021 | t->stid_tab[stid].data = data; |
3081 | stid += t->stid_base; | 3022 | stid -= t->nstids; |
3023 | stid += t->sftid_base; | ||
3082 | t->stids_in_use++; | 3024 | t->stids_in_use++; |
3083 | } | 3025 | } |
3084 | spin_unlock_bh(&t->stid_lock); | 3026 | spin_unlock_bh(&t->stid_lock); |
@@ -3090,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid); | |||
3090 | */ | 3032 | */ |
3091 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) | 3033 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) |
3092 | { | 3034 | { |
3093 | stid -= t->stid_base; | 3035 | /* Is it a server filter TID? */ |
3036 | if (t->nsftids && (stid >= t->sftid_base)) { | ||
3037 | stid -= t->sftid_base; | ||
3038 | stid += t->nstids; | ||
3039 | } else { | ||
3040 | stid -= t->stid_base; | ||
3041 | } | ||
3042 | |||
3094 | spin_lock_bh(&t->stid_lock); | 3043 | spin_lock_bh(&t->stid_lock); |
3095 | if (family == PF_INET) | 3044 | if (family == PF_INET) |
3096 | __clear_bit(stid, t->stid_bmap); | 3045 | __clear_bit(stid, t->stid_bmap); |
3097 | else | 3046 | else |
3098 | bitmap_release_region(t->stid_bmap, stid, 2); | 3047 | bitmap_release_region(t->stid_bmap, stid, 2); |
3099 | t->stid_tab[stid].data = NULL; | 3048 | t->stid_tab[stid].data = NULL; |
3100 | t->stids_in_use--; | 3049 | if (family == PF_INET) |
3050 | t->stids_in_use--; | ||
3051 | else | ||
3052 | t->stids_in_use -= 4; | ||
3101 | spin_unlock_bh(&t->stid_lock); | 3053 | spin_unlock_bh(&t->stid_lock); |
3102 | } | 3054 | } |
3103 | EXPORT_SYMBOL(cxgb4_free_stid); | 3055 | EXPORT_SYMBOL(cxgb4_free_stid); |
@@ -3200,6 +3152,7 @@ static int tid_init(struct tid_info *t) | |||
3200 | size_t size; | 3152 | size_t size; |
3201 | unsigned int stid_bmap_size; | 3153 | unsigned int stid_bmap_size; |
3202 | unsigned int natids = t->natids; | 3154 | unsigned int natids = t->natids; |
3155 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
3203 | 3156 | ||
3204 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); | 3157 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); |
3205 | size = t->ntids * sizeof(*t->tid_tab) + | 3158 | size = t->ntids * sizeof(*t->tid_tab) + |
@@ -3233,6 +3186,11 @@ static int tid_init(struct tid_info *t) | |||
3233 | t->afree = t->atid_tab; | 3186 | t->afree = t->atid_tab; |
3234 | } | 3187 | } |
3235 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); | 3188 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); |
3189 | /* Reserve stid 0 for T4/T5 adapters */ | ||
3190 | if (!t->stid_base && | ||
3191 | (is_t4(adap->params.chip) || is_t5(adap->params.chip))) | ||
3192 | __set_bit(0, t->stid_bmap); | ||
3193 | |||
3236 | return 0; | 3194 | return 0; |
3237 | } | 3195 | } |
3238 | 3196 | ||
@@ -3419,7 +3377,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) | |||
3419 | 3377 | ||
3420 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | 3378 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); |
3421 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); | 3379 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); |
3422 | if (is_t4(adap->chip)) { | 3380 | if (is_t4(adap->params.chip)) { |
3423 | lp_count = G_LP_COUNT(v1); | 3381 | lp_count = G_LP_COUNT(v1); |
3424 | hp_count = G_HP_COUNT(v1); | 3382 | hp_count = G_HP_COUNT(v1); |
3425 | } else { | 3383 | } else { |
@@ -3588,7 +3546,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs) | |||
3588 | do { | 3546 | do { |
3589 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | 3547 | v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); |
3590 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); | 3548 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); |
3591 | if (is_t4(adap->chip)) { | 3549 | if (is_t4(adap->params.chip)) { |
3592 | lp_count = G_LP_COUNT(v1); | 3550 | lp_count = G_LP_COUNT(v1); |
3593 | hp_count = G_HP_COUNT(v1); | 3551 | hp_count = G_HP_COUNT(v1); |
3594 | } else { | 3552 | } else { |
@@ -3708,7 +3666,7 @@ static void process_db_drop(struct work_struct *work) | |||
3708 | 3666 | ||
3709 | adap = container_of(work, struct adapter, db_drop_task); | 3667 | adap = container_of(work, struct adapter, db_drop_task); |
3710 | 3668 | ||
3711 | if (is_t4(adap->chip)) { | 3669 | if (is_t4(adap->params.chip)) { |
3712 | disable_dbs(adap); | 3670 | disable_dbs(adap); |
3713 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); | 3671 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); |
3714 | drain_db_fifo(adap, 1); | 3672 | drain_db_fifo(adap, 1); |
@@ -3753,7 +3711,7 @@ static void process_db_drop(struct work_struct *work) | |||
3753 | 3711 | ||
3754 | void t4_db_full(struct adapter *adap) | 3712 | void t4_db_full(struct adapter *adap) |
3755 | { | 3713 | { |
3756 | if (is_t4(adap->chip)) { | 3714 | if (is_t4(adap->params.chip)) { |
3757 | t4_set_reg_field(adap, SGE_INT_ENABLE3, | 3715 | t4_set_reg_field(adap, SGE_INT_ENABLE3, |
3758 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); | 3716 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); |
3759 | queue_work(workq, &adap->db_full_task); | 3717 | queue_work(workq, &adap->db_full_task); |
@@ -3762,7 +3720,7 @@ void t4_db_full(struct adapter *adap) | |||
3762 | 3720 | ||
3763 | void t4_db_dropped(struct adapter *adap) | 3721 | void t4_db_dropped(struct adapter *adap) |
3764 | { | 3722 | { |
3765 | if (is_t4(adap->chip)) | 3723 | if (is_t4(adap->params.chip)) |
3766 | queue_work(workq, &adap->db_drop_task); | 3724 | queue_work(workq, &adap->db_drop_task); |
3767 | } | 3725 | } |
3768 | 3726 | ||
@@ -3789,7 +3747,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
3789 | lli.nchan = adap->params.nports; | 3747 | lli.nchan = adap->params.nports; |
3790 | lli.nports = adap->params.nports; | 3748 | lli.nports = adap->params.nports; |
3791 | lli.wr_cred = adap->params.ofldq_wr_cred; | 3749 | lli.wr_cred = adap->params.ofldq_wr_cred; |
3792 | lli.adapter_type = adap->params.rev; | 3750 | lli.adapter_type = adap->params.chip; |
3793 | lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); | 3751 | lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); |
3794 | lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( | 3752 | lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( |
3795 | t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> | 3753 | t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> |
@@ -3797,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
3797 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( | 3755 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( |
3798 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> | 3756 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> |
3799 | (adap->fn * 4)); | 3757 | (adap->fn * 4)); |
3800 | lli.filt_mode = adap->filter_mode; | 3758 | lli.filt_mode = adap->params.tp.vlan_pri_map; |
3801 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ | 3759 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ |
3802 | for (i = 0; i < NCHAN; i++) | 3760 | for (i = 0; i < NCHAN; i++) |
3803 | lli.tx_modq[i] = i; | 3761 | lli.tx_modq[i] = i; |
@@ -4245,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, | |||
4245 | adap = netdev2adap(dev); | 4203 | adap = netdev2adap(dev); |
4246 | 4204 | ||
4247 | /* Adjust stid to correct filter index */ | 4205 | /* Adjust stid to correct filter index */ |
4248 | stid -= adap->tids.nstids; | 4206 | stid -= adap->tids.sftid_base; |
4249 | stid += adap->tids.nftids; | 4207 | stid += adap->tids.nftids; |
4250 | 4208 | ||
4251 | /* Check to make sure the filter requested is writable ... | 4209 | /* Check to make sure the filter requested is writable ... |
@@ -4271,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, | |||
4271 | f->fs.val.lip[i] = val[i]; | 4229 | f->fs.val.lip[i] = val[i]; |
4272 | f->fs.mask.lip[i] = ~0; | 4230 | f->fs.mask.lip[i] = ~0; |
4273 | } | 4231 | } |
4274 | if (adap->filter_mode & F_PORT) { | 4232 | if (adap->params.tp.vlan_pri_map & F_PORT) { |
4275 | f->fs.val.iport = port; | 4233 | f->fs.val.iport = port; |
4276 | f->fs.mask.iport = mask; | 4234 | f->fs.mask.iport = mask; |
4277 | } | 4235 | } |
4278 | } | 4236 | } |
4279 | 4237 | ||
4238 | if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { | ||
4239 | f->fs.val.proto = IPPROTO_TCP; | ||
4240 | f->fs.mask.proto = ~0; | ||
4241 | } | ||
4242 | |||
4280 | f->fs.dirsteer = 1; | 4243 | f->fs.dirsteer = 1; |
4281 | f->fs.iq = queue; | 4244 | f->fs.iq = queue; |
4282 | /* Mark filter as locked */ | 4245 | /* Mark filter as locked */ |
@@ -4303,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, | |||
4303 | adap = netdev2adap(dev); | 4266 | adap = netdev2adap(dev); |
4304 | 4267 | ||
4305 | /* Adjust stid to correct filter index */ | 4268 | /* Adjust stid to correct filter index */ |
4306 | stid -= adap->tids.nstids; | 4269 | stid -= adap->tids.sftid_base; |
4307 | stid += adap->tids.nftids; | 4270 | stid += adap->tids.nftids; |
4308 | 4271 | ||
4309 | f = &adap->tids.ftid_tab[stid]; | 4272 | f = &adap->tids.ftid_tab[stid]; |
@@ -4483,7 +4446,7 @@ static void setup_memwin(struct adapter *adap) | |||
4483 | u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; | 4446 | u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; |
4484 | 4447 | ||
4485 | bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ | 4448 | bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ |
4486 | if (is_t4(adap->chip)) { | 4449 | if (is_t4(adap->params.chip)) { |
4487 | mem_win0_base = bar0 + MEMWIN0_BASE; | 4450 | mem_win0_base = bar0 + MEMWIN0_BASE; |
4488 | mem_win1_base = bar0 + MEMWIN1_BASE; | 4451 | mem_win1_base = bar0 + MEMWIN1_BASE; |
4489 | mem_win2_base = bar0 + MEMWIN2_BASE; | 4452 | mem_win2_base = bar0 + MEMWIN2_BASE; |
@@ -4668,8 +4631,10 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4668 | const struct firmware *cf; | 4631 | const struct firmware *cf; |
4669 | unsigned long mtype = 0, maddr = 0; | 4632 | unsigned long mtype = 0, maddr = 0; |
4670 | u32 finiver, finicsum, cfcsum; | 4633 | u32 finiver, finicsum, cfcsum; |
4671 | int ret, using_flash; | 4634 | int ret; |
4635 | int config_issued = 0; | ||
4672 | char *fw_config_file, fw_config_file_path[256]; | 4636 | char *fw_config_file, fw_config_file_path[256]; |
4637 | char *config_name = NULL; | ||
4673 | 4638 | ||
4674 | /* | 4639 | /* |
4675 | * Reset device if necessary. | 4640 | * Reset device if necessary. |
@@ -4686,9 +4651,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4686 | * then use that. Otherwise, use the configuration file stored | 4651 | * then use that. Otherwise, use the configuration file stored |
4687 | * in the adapter flash ... | 4652 | * in the adapter flash ... |
4688 | */ | 4653 | */ |
4689 | switch (CHELSIO_CHIP_VERSION(adapter->chip)) { | 4654 | switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { |
4690 | case CHELSIO_T4: | 4655 | case CHELSIO_T4: |
4691 | fw_config_file = FW_CFNAME; | 4656 | fw_config_file = FW4_CFNAME; |
4692 | break; | 4657 | break; |
4693 | case CHELSIO_T5: | 4658 | case CHELSIO_T5: |
4694 | fw_config_file = FW5_CFNAME; | 4659 | fw_config_file = FW5_CFNAME; |
@@ -4702,13 +4667,16 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4702 | 4667 | ||
4703 | ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); | 4668 | ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); |
4704 | if (ret < 0) { | 4669 | if (ret < 0) { |
4705 | using_flash = 1; | 4670 | config_name = "On FLASH"; |
4706 | mtype = FW_MEMTYPE_CF_FLASH; | 4671 | mtype = FW_MEMTYPE_CF_FLASH; |
4707 | maddr = t4_flash_cfg_addr(adapter); | 4672 | maddr = t4_flash_cfg_addr(adapter); |
4708 | } else { | 4673 | } else { |
4709 | u32 params[7], val[7]; | 4674 | u32 params[7], val[7]; |
4710 | 4675 | ||
4711 | using_flash = 0; | 4676 | sprintf(fw_config_file_path, |
4677 | "/lib/firmware/%s", fw_config_file); | ||
4678 | config_name = fw_config_file_path; | ||
4679 | |||
4712 | if (cf->size >= FLASH_CFG_MAX_SIZE) | 4680 | if (cf->size >= FLASH_CFG_MAX_SIZE) |
4713 | ret = -ENOMEM; | 4681 | ret = -ENOMEM; |
4714 | else { | 4682 | else { |
@@ -4776,6 +4744,26 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4776 | FW_LEN16(caps_cmd)); | 4744 | FW_LEN16(caps_cmd)); |
4777 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), | 4745 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), |
4778 | &caps_cmd); | 4746 | &caps_cmd); |
4747 | |||
4748 | /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware | ||
4749 | * Configuration File in FLASH), our last gasp effort is to use the | ||
4750 | * Firmware Configuration File which is embedded in the firmware. A | ||
4751 | * very few early versions of the firmware didn't have one embedded | ||
4752 | * but we can ignore those. | ||
4753 | */ | ||
4754 | if (ret == -ENOENT) { | ||
4755 | memset(&caps_cmd, 0, sizeof(caps_cmd)); | ||
4756 | caps_cmd.op_to_write = | ||
4757 | htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
4758 | FW_CMD_REQUEST | | ||
4759 | FW_CMD_READ); | ||
4760 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); | ||
4761 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, | ||
4762 | sizeof(caps_cmd), &caps_cmd); | ||
4763 | config_name = "Firmware Default"; | ||
4764 | } | ||
4765 | |||
4766 | config_issued = 1; | ||
4779 | if (ret < 0) | 4767 | if (ret < 0) |
4780 | goto bye; | 4768 | goto bye; |
4781 | 4769 | ||
@@ -4816,7 +4804,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4816 | if (ret < 0) | 4804 | if (ret < 0) |
4817 | goto bye; | 4805 | goto bye; |
4818 | 4806 | ||
4819 | sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file); | ||
4820 | /* | 4807 | /* |
4821 | * Return successfully and note that we're operating with parameters | 4808 | * Return successfully and note that we're operating with parameters |
4822 | * not supplied by the driver, rather than from hard-wired | 4809 | * not supplied by the driver, rather than from hard-wired |
@@ -4824,11 +4811,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4824 | */ | 4811 | */ |
4825 | adapter->flags |= USING_SOFT_PARAMS; | 4812 | adapter->flags |= USING_SOFT_PARAMS; |
4826 | dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ | 4813 | dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ |
4827 | "Configuration File %s, version %#x, computed checksum %#x\n", | 4814 | "Configuration File \"%s\", version %#x, computed checksum %#x\n", |
4828 | (using_flash | 4815 | config_name, finiver, cfcsum); |
4829 | ? "in device FLASH" | ||
4830 | : fw_config_file_path), | ||
4831 | finiver, cfcsum); | ||
4832 | return 0; | 4816 | return 0; |
4833 | 4817 | ||
4834 | /* | 4818 | /* |
@@ -4837,9 +4821,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) | |||
4837 | * want to issue a warning since this is fairly common.) | 4821 | * want to issue a warning since this is fairly common.) |
4838 | */ | 4822 | */ |
4839 | bye: | 4823 | bye: |
4840 | if (ret != -ENOENT) | 4824 | if (config_issued && ret != -ENOENT) |
4841 | dev_warn(adapter->pdev_dev, "Configuration file error %d\n", | 4825 | dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", |
4842 | -ret); | 4826 | config_name, -ret); |
4843 | return ret; | 4827 | return ret; |
4844 | } | 4828 | } |
4845 | 4829 | ||
@@ -5086,6 +5070,47 @@ bye: | |||
5086 | return ret; | 5070 | return ret; |
5087 | } | 5071 | } |
5088 | 5072 | ||
5073 | static struct fw_info fw_info_array[] = { | ||
5074 | { | ||
5075 | .chip = CHELSIO_T4, | ||
5076 | .fs_name = FW4_CFNAME, | ||
5077 | .fw_mod_name = FW4_FNAME, | ||
5078 | .fw_hdr = { | ||
5079 | .chip = FW_HDR_CHIP_T4, | ||
5080 | .fw_ver = __cpu_to_be32(FW_VERSION(T4)), | ||
5081 | .intfver_nic = FW_INTFVER(T4, NIC), | ||
5082 | .intfver_vnic = FW_INTFVER(T4, VNIC), | ||
5083 | .intfver_ri = FW_INTFVER(T4, RI), | ||
5084 | .intfver_iscsi = FW_INTFVER(T4, ISCSI), | ||
5085 | .intfver_fcoe = FW_INTFVER(T4, FCOE), | ||
5086 | }, | ||
5087 | }, { | ||
5088 | .chip = CHELSIO_T5, | ||
5089 | .fs_name = FW5_CFNAME, | ||
5090 | .fw_mod_name = FW5_FNAME, | ||
5091 | .fw_hdr = { | ||
5092 | .chip = FW_HDR_CHIP_T5, | ||
5093 | .fw_ver = __cpu_to_be32(FW_VERSION(T5)), | ||
5094 | .intfver_nic = FW_INTFVER(T5, NIC), | ||
5095 | .intfver_vnic = FW_INTFVER(T5, VNIC), | ||
5096 | .intfver_ri = FW_INTFVER(T5, RI), | ||
5097 | .intfver_iscsi = FW_INTFVER(T5, ISCSI), | ||
5098 | .intfver_fcoe = FW_INTFVER(T5, FCOE), | ||
5099 | }, | ||
5100 | } | ||
5101 | }; | ||
5102 | |||
5103 | static struct fw_info *find_fw_info(int chip) | ||
5104 | { | ||
5105 | int i; | ||
5106 | |||
5107 | for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { | ||
5108 | if (fw_info_array[i].chip == chip) | ||
5109 | return &fw_info_array[i]; | ||
5110 | } | ||
5111 | return NULL; | ||
5112 | } | ||
5113 | |||
5089 | /* | 5114 | /* |
5090 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. | 5115 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. |
5091 | */ | 5116 | */ |
@@ -5096,7 +5121,7 @@ static int adap_init0(struct adapter *adap) | |||
5096 | enum dev_state state; | 5121 | enum dev_state state; |
5097 | u32 params[7], val[7]; | 5122 | u32 params[7], val[7]; |
5098 | struct fw_caps_config_cmd caps_cmd; | 5123 | struct fw_caps_config_cmd caps_cmd; |
5099 | int reset = 1, j; | 5124 | int reset = 1; |
5100 | 5125 | ||
5101 | /* | 5126 | /* |
5102 | * Contact FW, advertising Master capability (and potentially forcing | 5127 | * Contact FW, advertising Master capability (and potentially forcing |
@@ -5123,44 +5148,54 @@ static int adap_init0(struct adapter *adap) | |||
5123 | * later reporting and B. to warn if the currently loaded firmware | 5148 | * later reporting and B. to warn if the currently loaded firmware |
5124 | * is excessively mismatched relative to the driver.) | 5149 | * is excessively mismatched relative to the driver.) |
5125 | */ | 5150 | */ |
5126 | ret = t4_check_fw_version(adap); | 5151 | t4_get_fw_version(adap, &adap->params.fw_vers); |
5127 | 5152 | t4_get_tp_version(adap, &adap->params.tp_vers); | |
5128 | /* The error code -EFAULT is returned by t4_check_fw_version() if | ||
5129 | * firmware on adapter < supported firmware. If firmware on adapter | ||
5130 | * is too old (not supported by driver) and we're the MASTER_PF set | ||
5131 | * adapter state to DEV_STATE_UNINIT to force firmware upgrade | ||
5132 | * and reinitialization. | ||
5133 | */ | ||
5134 | if ((adap->flags & MASTER_PF) && ret == -EFAULT) | ||
5135 | state = DEV_STATE_UNINIT; | ||
5136 | if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { | 5153 | if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { |
5137 | if (ret == -EINVAL || ret == -EFAULT || ret > 0) { | 5154 | struct fw_info *fw_info; |
5138 | if (upgrade_fw(adap) >= 0) { | 5155 | struct fw_hdr *card_fw; |
5139 | /* | 5156 | const struct firmware *fw; |
5140 | * Note that the chip was reset as part of the | 5157 | const u8 *fw_data = NULL; |
5141 | * firmware upgrade so we don't reset it again | 5158 | unsigned int fw_size = 0; |
5142 | * below and grab the new firmware version. | 5159 | |
5143 | */ | 5160 | /* This is the firmware whose headers the driver was compiled |
5144 | reset = 0; | 5161 | * against |
5145 | ret = t4_check_fw_version(adap); | 5162 | */ |
5146 | } else | 5163 | fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); |
5147 | if (ret == -EFAULT) { | 5164 | if (fw_info == NULL) { |
5148 | /* | 5165 | dev_err(adap->pdev_dev, |
5149 | * Firmware is old but still might | 5166 | "unable to get firmware info for chip %d.\n", |
5150 | * work if we force reinitialization | 5167 | CHELSIO_CHIP_VERSION(adap->params.chip)); |
5151 | * of the adapter. Ignoring FW upgrade | 5168 | return -EINVAL; |
5152 | * failure. | ||
5153 | */ | ||
5154 | dev_warn(adap->pdev_dev, | ||
5155 | "Ignoring firmware upgrade " | ||
5156 | "failure, and forcing driver " | ||
5157 | "to reinitialize the " | ||
5158 | "adapter.\n"); | ||
5159 | ret = 0; | ||
5160 | } | ||
5161 | } | 5169 | } |
5170 | |||
5171 | /* allocate memory to read the header of the firmware on the | ||
5172 | * card | ||
5173 | */ | ||
5174 | card_fw = t4_alloc_mem(sizeof(*card_fw)); | ||
5175 | |||
5176 | /* Get FW from from /lib/firmware/ */ | ||
5177 | ret = request_firmware(&fw, fw_info->fw_mod_name, | ||
5178 | adap->pdev_dev); | ||
5179 | if (ret < 0) { | ||
5180 | dev_err(adap->pdev_dev, | ||
5181 | "unable to load firmware image %s, error %d\n", | ||
5182 | fw_info->fw_mod_name, ret); | ||
5183 | } else { | ||
5184 | fw_data = fw->data; | ||
5185 | fw_size = fw->size; | ||
5186 | } | ||
5187 | |||
5188 | /* upgrade FW logic */ | ||
5189 | ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, | ||
5190 | state, &reset); | ||
5191 | |||
5192 | /* Cleaning up */ | ||
5193 | if (fw != NULL) | ||
5194 | release_firmware(fw); | ||
5195 | t4_free_mem(card_fw); | ||
5196 | |||
5162 | if (ret < 0) | 5197 | if (ret < 0) |
5163 | return ret; | 5198 | goto bye; |
5164 | } | 5199 | } |
5165 | 5200 | ||
5166 | /* | 5201 | /* |
@@ -5245,7 +5280,7 @@ static int adap_init0(struct adapter *adap) | |||
5245 | if (ret == -ENOENT) { | 5280 | if (ret == -ENOENT) { |
5246 | dev_info(adap->pdev_dev, | 5281 | dev_info(adap->pdev_dev, |
5247 | "No Configuration File present " | 5282 | "No Configuration File present " |
5248 | "on adapter. Using hard-wired " | 5283 | "on adapter. Using hard-wired " |
5249 | "configuration parameters.\n"); | 5284 | "configuration parameters.\n"); |
5250 | ret = adap_init0_no_config(adap, reset); | 5285 | ret = adap_init0_no_config(adap, reset); |
5251 | } | 5286 | } |
@@ -5428,21 +5463,11 @@ static int adap_init0(struct adapter *adap) | |||
5428 | /* | 5463 | /* |
5429 | * These are finalized by FW initialization, load their values now. | 5464 | * These are finalized by FW initialization, load their values now. |
5430 | */ | 5465 | */ |
5431 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
5432 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
5433 | adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); | ||
5434 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); | 5466 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); |
5435 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | 5467 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, |
5436 | adap->params.b_wnd); | 5468 | adap->params.b_wnd); |
5437 | 5469 | ||
5438 | /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ | 5470 | t4_init_tp_params(adap); |
5439 | for (j = 0; j < NCHAN; j++) | ||
5440 | adap->params.tp.tx_modq[j] = j; | ||
5441 | |||
5442 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
5443 | &adap->filter_mode, 1, | ||
5444 | TP_VLAN_PRI_MAP); | ||
5445 | |||
5446 | adap->flags |= FW_OK; | 5471 | adap->flags |= FW_OK; |
5447 | return 0; | 5472 | return 0; |
5448 | 5473 | ||
@@ -5787,7 +5812,7 @@ static void print_port_info(const struct net_device *dev) | |||
5787 | 5812 | ||
5788 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", | 5813 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", |
5789 | adap->params.vpd.id, | 5814 | adap->params.vpd.id, |
5790 | CHELSIO_CHIP_RELEASE(adap->params.rev), buf, | 5815 | CHELSIO_CHIP_RELEASE(adap->params.chip), buf, |
5791 | is_offload(adap) ? "R" : "", adap->params.pci.width, spd, | 5816 | is_offload(adap) ? "R" : "", adap->params.pci.width, spd, |
5792 | (adap->flags & USING_MSIX) ? " MSI-X" : | 5817 | (adap->flags & USING_MSIX) ? " MSI-X" : |
5793 | (adap->flags & USING_MSI) ? " MSI" : ""); | 5818 | (adap->flags & USING_MSI) ? " MSI" : ""); |
@@ -5910,7 +5935,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5910 | if (err) | 5935 | if (err) |
5911 | goto out_unmap_bar0; | 5936 | goto out_unmap_bar0; |
5912 | 5937 | ||
5913 | if (!is_t4(adapter->chip)) { | 5938 | if (!is_t4(adapter->params.chip)) { |
5914 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; | 5939 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; |
5915 | qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, | 5940 | qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, |
5916 | SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); | 5941 | SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); |
@@ -6064,7 +6089,7 @@ sriov: | |||
6064 | out_free_dev: | 6089 | out_free_dev: |
6065 | free_some_resources(adapter); | 6090 | free_some_resources(adapter); |
6066 | out_unmap_bar: | 6091 | out_unmap_bar: |
6067 | if (!is_t4(adapter->chip)) | 6092 | if (!is_t4(adapter->params.chip)) |
6068 | iounmap(adapter->bar2); | 6093 | iounmap(adapter->bar2); |
6069 | out_unmap_bar0: | 6094 | out_unmap_bar0: |
6070 | iounmap(adapter->regs); | 6095 | iounmap(adapter->regs); |
@@ -6116,7 +6141,7 @@ static void remove_one(struct pci_dev *pdev) | |||
6116 | 6141 | ||
6117 | free_some_resources(adapter); | 6142 | free_some_resources(adapter); |
6118 | iounmap(adapter->regs); | 6143 | iounmap(adapter->regs); |
6119 | if (!is_t4(adapter->chip)) | 6144 | if (!is_t4(adapter->params.chip)) |
6120 | iounmap(adapter->bar2); | 6145 | iounmap(adapter->bar2); |
6121 | kfree(adapter); | 6146 | kfree(adapter); |
6122 | pci_disable_pcie_error_reporting(pdev); | 6147 | pci_disable_pcie_error_reporting(pdev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6f21f2451c30..4dd0a82533e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) | |||
131 | 131 | ||
132 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) | 132 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) |
133 | { | 133 | { |
134 | stid -= t->stid_base; | 134 | /* Is it a server filter TID? */ |
135 | if (t->nsftids && (stid >= t->sftid_base)) { | ||
136 | stid -= t->sftid_base; | ||
137 | stid += t->nstids; | ||
138 | } else { | ||
139 | stid -= t->stid_base; | ||
140 | } | ||
141 | |||
135 | return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; | 142 | return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; |
136 | } | 143 | } |
137 | 144 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 29878098101e..cb05be905def 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "l2t.h" | 45 | #include "l2t.h" |
46 | #include "t4_msg.h" | 46 | #include "t4_msg.h" |
47 | #include "t4fw_api.h" | 47 | #include "t4fw_api.h" |
48 | #include "t4_regs.h" | ||
48 | 49 | ||
49 | #define VLAN_NONE 0xfff | 50 | #define VLAN_NONE 0xfff |
50 | 51 | ||
@@ -411,6 +412,40 @@ done: | |||
411 | } | 412 | } |
412 | EXPORT_SYMBOL(cxgb4_l2t_get); | 413 | EXPORT_SYMBOL(cxgb4_l2t_get); |
413 | 414 | ||
415 | u64 cxgb4_select_ntuple(struct net_device *dev, | ||
416 | const struct l2t_entry *l2t) | ||
417 | { | ||
418 | struct adapter *adap = netdev2adap(dev); | ||
419 | struct tp_params *tp = &adap->params.tp; | ||
420 | u64 ntuple = 0; | ||
421 | |||
422 | /* Initialize each of the fields which we care about which are present | ||
423 | * in the Compressed Filter Tuple. | ||
424 | */ | ||
425 | if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) | ||
426 | ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; | ||
427 | |||
428 | if (tp->port_shift >= 0) | ||
429 | ntuple |= (u64)l2t->lport << tp->port_shift; | ||
430 | |||
431 | if (tp->protocol_shift >= 0) | ||
432 | ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; | ||
433 | |||
434 | if (tp->vnic_shift >= 0) { | ||
435 | u32 viid = cxgb4_port_viid(dev); | ||
436 | u32 vf = FW_VIID_VIN_GET(viid); | ||
437 | u32 pf = FW_VIID_PFN_GET(viid); | ||
438 | u32 vld = FW_VIID_VIVLD_GET(viid); | ||
439 | |||
440 | ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | | ||
441 | V_FT_VNID_ID_PF(pf) | | ||
442 | V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; | ||
443 | } | ||
444 | |||
445 | return ntuple; | ||
446 | } | ||
447 | EXPORT_SYMBOL(cxgb4_select_ntuple); | ||
448 | |||
414 | /* | 449 | /* |
415 | * Called when address resolution fails for an L2T entry to handle packets | 450 | * Called when address resolution fails for an L2T entry to handle packets |
416 | * on the arpq head. If a packet specifies a failure handler it is invoked, | 451 | * on the arpq head. If a packet specifies a failure handler it is invoked, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 108c0f1fce1c..85eb5c71358d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h | |||
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | |||
98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | 98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, |
99 | const struct net_device *physdev, | 99 | const struct net_device *physdev, |
100 | unsigned int priority); | 100 | unsigned int priority); |
101 | 101 | u64 cxgb4_select_ntuple(struct net_device *dev, | |
102 | const struct l2t_entry *l2t); | ||
102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); | 103 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); |
103 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); | 104 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); |
104 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | 105 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ac311f5f3eb9..cc3511a5cd0c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) | |||
509 | u32 val; | 509 | u32 val; |
510 | if (q->pend_cred >= 8) { | 510 | if (q->pend_cred >= 8) { |
511 | val = PIDX(q->pend_cred / 8); | 511 | val = PIDX(q->pend_cred / 8); |
512 | if (!is_t4(adap->chip)) | 512 | if (!is_t4(adap->params.chip)) |
513 | val |= DBTYPE(1); | 513 | val |= DBTYPE(1); |
514 | wmb(); | 514 | wmb(); |
515 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | | 515 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | |
@@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | |||
847 | wmb(); /* write descriptors before telling HW */ | 847 | wmb(); /* write descriptors before telling HW */ |
848 | spin_lock(&q->db_lock); | 848 | spin_lock(&q->db_lock); |
849 | if (!q->db_disabled) { | 849 | if (!q->db_disabled) { |
850 | if (is_t4(adap->chip)) { | 850 | if (is_t4(adap->params.chip)) { |
851 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), | 851 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), |
852 | QID(q->cntxt_id) | PIDX(n)); | 852 | QID(q->cntxt_id) | PIDX(n)); |
853 | } else { | 853 | } else { |
@@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap, | |||
1596 | return 0; | 1596 | return 0; |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | if (is_t4(adap->chip)) | 1599 | if (is_t4(adap->params.chip)) |
1600 | __skb_pull(skb, sizeof(struct cpl_trace_pkt)); | 1600 | __skb_pull(skb, sizeof(struct cpl_trace_pkt)); |
1601 | else | 1601 | else |
1602 | __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); | 1602 | __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); |
@@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | |||
1661 | const struct cpl_rx_pkt *pkt; | 1661 | const struct cpl_rx_pkt *pkt; |
1662 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | 1662 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); |
1663 | struct sge *s = &q->adap->sge; | 1663 | struct sge *s = &q->adap->sge; |
1664 | int cpl_trace_pkt = is_t4(q->adap->chip) ? | 1664 | int cpl_trace_pkt = is_t4(q->adap->params.chip) ? |
1665 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; | 1665 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; |
1666 | 1666 | ||
1667 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) | 1667 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) |
@@ -2182,7 +2182,7 @@ err: | |||
2182 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | 2182 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) |
2183 | { | 2183 | { |
2184 | q->cntxt_id = id; | 2184 | q->cntxt_id = id; |
2185 | if (!is_t4(adap->chip)) { | 2185 | if (!is_t4(adap->params.chip)) { |
2186 | unsigned int s_qpp; | 2186 | unsigned int s_qpp; |
2187 | unsigned short udb_density; | 2187 | unsigned short udb_density; |
2188 | unsigned long qpshift; | 2188 | unsigned long qpshift; |
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap) | |||
2581 | #undef READ_FL_BUF | 2581 | #undef READ_FL_BUF |
2582 | 2582 | ||
2583 | if (fl_small_pg != PAGE_SIZE || | 2583 | if (fl_small_pg != PAGE_SIZE || |
2584 | (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || | 2584 | (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || |
2585 | (fl_large_pg & (fl_large_pg-1)) != 0))) { | 2585 | (fl_large_pg & (fl_large_pg-1)) != 0))) { |
2586 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", | 2586 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", |
2587 | fl_small_pg, fl_large_pg); | 2587 | fl_small_pg, fl_large_pg); |
@@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap) | |||
2641 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows | 2641 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows |
2642 | * and generate an interrupt when this occurs so we can recover. | 2642 | * and generate an interrupt when this occurs so we can recover. |
2643 | */ | 2643 | */ |
2644 | if (is_t4(adap->chip)) { | 2644 | if (is_t4(adap->params.chip)) { |
2645 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, | 2645 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, |
2646 | V_HP_INT_THRESH(M_HP_INT_THRESH) | | 2646 | V_HP_INT_THRESH(M_HP_INT_THRESH) | |
2647 | V_LP_INT_THRESH(M_LP_INT_THRESH), | 2647 | V_LP_INT_THRESH(M_LP_INT_THRESH), |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4cbb2f9850be..e1413eacdbd2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
296 | u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; | 296 | u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; |
297 | u32 mc_bist_status_rdata, mc_bist_data_pattern; | 297 | u32 mc_bist_status_rdata, mc_bist_data_pattern; |
298 | 298 | ||
299 | if (is_t4(adap->chip)) { | 299 | if (is_t4(adap->params.chip)) { |
300 | mc_bist_cmd = MC_BIST_CMD; | 300 | mc_bist_cmd = MC_BIST_CMD; |
301 | mc_bist_cmd_addr = MC_BIST_CMD_ADDR; | 301 | mc_bist_cmd_addr = MC_BIST_CMD_ADDR; |
302 | mc_bist_cmd_len = MC_BIST_CMD_LEN; | 302 | mc_bist_cmd_len = MC_BIST_CMD_LEN; |
@@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
349 | u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; | 349 | u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; |
350 | u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; | 350 | u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; |
351 | 351 | ||
352 | if (is_t4(adap->chip)) { | 352 | if (is_t4(adap->params.chip)) { |
353 | edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); | 353 | edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); |
354 | edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); | 354 | edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); |
355 | edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); | 355 | edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); |
@@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
402 | static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) | 402 | static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) |
403 | { | 403 | { |
404 | int i; | 404 | int i; |
405 | u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); | 405 | u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); |
406 | 406 | ||
407 | /* | 407 | /* |
408 | * Setup offset into PCIE memory window. Address must be a | 408 | * Setup offset into PCIE memory window. Address must be a |
@@ -863,104 +863,169 @@ unlock: | |||
863 | } | 863 | } |
864 | 864 | ||
865 | /** | 865 | /** |
866 | * get_fw_version - read the firmware version | 866 | * t4_get_fw_version - read the firmware version |
867 | * @adapter: the adapter | 867 | * @adapter: the adapter |
868 | * @vers: where to place the version | 868 | * @vers: where to place the version |
869 | * | 869 | * |
870 | * Reads the FW version from flash. | 870 | * Reads the FW version from flash. |
871 | */ | 871 | */ |
872 | static int get_fw_version(struct adapter *adapter, u32 *vers) | 872 | int t4_get_fw_version(struct adapter *adapter, u32 *vers) |
873 | { | 873 | { |
874 | return t4_read_flash(adapter, adapter->params.sf_fw_start + | 874 | return t4_read_flash(adapter, FLASH_FW_START + |
875 | offsetof(struct fw_hdr, fw_ver), 1, vers, 0); | 875 | offsetof(struct fw_hdr, fw_ver), 1, |
876 | vers, 0); | ||
876 | } | 877 | } |
877 | 878 | ||
878 | /** | 879 | /** |
879 | * get_tp_version - read the TP microcode version | 880 | * t4_get_tp_version - read the TP microcode version |
880 | * @adapter: the adapter | 881 | * @adapter: the adapter |
881 | * @vers: where to place the version | 882 | * @vers: where to place the version |
882 | * | 883 | * |
883 | * Reads the TP microcode version from flash. | 884 | * Reads the TP microcode version from flash. |
884 | */ | 885 | */ |
885 | static int get_tp_version(struct adapter *adapter, u32 *vers) | 886 | int t4_get_tp_version(struct adapter *adapter, u32 *vers) |
886 | { | 887 | { |
887 | return t4_read_flash(adapter, adapter->params.sf_fw_start + | 888 | return t4_read_flash(adapter, FLASH_FW_START + |
888 | offsetof(struct fw_hdr, tp_microcode_ver), | 889 | offsetof(struct fw_hdr, tp_microcode_ver), |
889 | 1, vers, 0); | 890 | 1, vers, 0); |
890 | } | 891 | } |
891 | 892 | ||
892 | /** | 893 | /* Is the given firmware API compatible with the one the driver was compiled |
893 | * t4_check_fw_version - check if the FW is compatible with this driver | 894 | * with? |
894 | * @adapter: the adapter | ||
895 | * | ||
896 | * Checks if an adapter's FW is compatible with the driver. Returns 0 | ||
897 | * if there's exact match, a negative error if the version could not be | ||
898 | * read or there's a major version mismatch, and a positive value if the | ||
899 | * expected major version is found but there's a minor version mismatch. | ||
900 | */ | 895 | */ |
901 | int t4_check_fw_version(struct adapter *adapter) | 896 | static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) |
902 | { | 897 | { |
903 | u32 api_vers[2]; | ||
904 | int ret, major, minor, micro; | ||
905 | int exp_major, exp_minor, exp_micro; | ||
906 | 898 | ||
907 | ret = get_fw_version(adapter, &adapter->params.fw_vers); | 899 | /* short circuit if it's the exact same firmware version */ |
908 | if (!ret) | 900 | if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) |
909 | ret = get_tp_version(adapter, &adapter->params.tp_vers); | 901 | return 1; |
910 | if (!ret) | ||
911 | ret = t4_read_flash(adapter, adapter->params.sf_fw_start + | ||
912 | offsetof(struct fw_hdr, intfver_nic), | ||
913 | 2, api_vers, 1); | ||
914 | if (ret) | ||
915 | return ret; | ||
916 | 902 | ||
917 | major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); | 903 | #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) |
918 | minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); | 904 | if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && |
919 | micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); | 905 | SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) |
906 | return 1; | ||
907 | #undef SAME_INTF | ||
920 | 908 | ||
921 | switch (CHELSIO_CHIP_VERSION(adapter->chip)) { | 909 | return 0; |
922 | case CHELSIO_T4: | 910 | } |
923 | exp_major = FW_VERSION_MAJOR; | ||
924 | exp_minor = FW_VERSION_MINOR; | ||
925 | exp_micro = FW_VERSION_MICRO; | ||
926 | break; | ||
927 | case CHELSIO_T5: | ||
928 | exp_major = FW_VERSION_MAJOR_T5; | ||
929 | exp_minor = FW_VERSION_MINOR_T5; | ||
930 | exp_micro = FW_VERSION_MICRO_T5; | ||
931 | break; | ||
932 | default: | ||
933 | dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", | ||
934 | adapter->chip); | ||
935 | return -EINVAL; | ||
936 | } | ||
937 | 911 | ||
938 | memcpy(adapter->params.api_vers, api_vers, | 912 | /* The firmware in the filesystem is usable, but should it be installed? |
939 | sizeof(adapter->params.api_vers)); | 913 | * This routine explains itself in detail if it indicates the filesystem |
914 | * firmware should be installed. | ||
915 | */ | ||
916 | static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, | ||
917 | int k, int c) | ||
918 | { | ||
919 | const char *reason; | ||
940 | 920 | ||
941 | if (major < exp_major || (major == exp_major && minor < exp_minor) || | 921 | if (!card_fw_usable) { |
942 | (major == exp_major && minor == exp_minor && micro < exp_micro)) { | 922 | reason = "incompatible or unusable"; |
943 | dev_err(adapter->pdev_dev, | 923 | goto install; |
944 | "Card has firmware version %u.%u.%u, minimum " | ||
945 | "supported firmware is %u.%u.%u.\n", major, minor, | ||
946 | micro, exp_major, exp_minor, exp_micro); | ||
947 | return -EFAULT; | ||
948 | } | 924 | } |
949 | 925 | ||
950 | if (major != exp_major) { /* major mismatch - fail */ | 926 | if (k > c) { |
951 | dev_err(adapter->pdev_dev, | 927 | reason = "older than the version supported with this driver"; |
952 | "card FW has major version %u, driver wants %u\n", | 928 | goto install; |
953 | major, exp_major); | ||
954 | return -EINVAL; | ||
955 | } | 929 | } |
956 | 930 | ||
957 | if (minor == exp_minor && micro == exp_micro) | 931 | return 0; |
958 | return 0; /* perfect match */ | 932 | |
933 | install: | ||
934 | dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " | ||
935 | "installing firmware %u.%u.%u.%u on card.\n", | ||
936 | FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), | ||
937 | FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, | ||
938 | FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), | ||
939 | FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); | ||
959 | 940 | ||
960 | /* Minor/micro version mismatch. Report it but often it's OK. */ | ||
961 | return 1; | 941 | return 1; |
962 | } | 942 | } |
963 | 943 | ||
944 | int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | ||
945 | const u8 *fw_data, unsigned int fw_size, | ||
946 | struct fw_hdr *card_fw, enum dev_state state, | ||
947 | int *reset) | ||
948 | { | ||
949 | int ret, card_fw_usable, fs_fw_usable; | ||
950 | const struct fw_hdr *fs_fw; | ||
951 | const struct fw_hdr *drv_fw; | ||
952 | |||
953 | drv_fw = &fw_info->fw_hdr; | ||
954 | |||
955 | /* Read the header of the firmware on the card */ | ||
956 | ret = -t4_read_flash(adap, FLASH_FW_START, | ||
957 | sizeof(*card_fw) / sizeof(uint32_t), | ||
958 | (uint32_t *)card_fw, 1); | ||
959 | if (ret == 0) { | ||
960 | card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); | ||
961 | } else { | ||
962 | dev_err(adap->pdev_dev, | ||
963 | "Unable to read card's firmware header: %d\n", ret); | ||
964 | card_fw_usable = 0; | ||
965 | } | ||
966 | |||
967 | if (fw_data != NULL) { | ||
968 | fs_fw = (const void *)fw_data; | ||
969 | fs_fw_usable = fw_compatible(drv_fw, fs_fw); | ||
970 | } else { | ||
971 | fs_fw = NULL; | ||
972 | fs_fw_usable = 0; | ||
973 | } | ||
974 | |||
975 | if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && | ||
976 | (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { | ||
977 | /* Common case: the firmware on the card is an exact match and | ||
978 | * the filesystem one is an exact match too, or the filesystem | ||
979 | * one is absent/incompatible. | ||
980 | */ | ||
981 | } else if (fs_fw_usable && state == DEV_STATE_UNINIT && | ||
982 | should_install_fs_fw(adap, card_fw_usable, | ||
983 | be32_to_cpu(fs_fw->fw_ver), | ||
984 | be32_to_cpu(card_fw->fw_ver))) { | ||
985 | ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, | ||
986 | fw_size, 0); | ||
987 | if (ret != 0) { | ||
988 | dev_err(adap->pdev_dev, | ||
989 | "failed to install firmware: %d\n", ret); | ||
990 | goto bye; | ||
991 | } | ||
992 | |||
993 | /* Installed successfully, update the cached header too. */ | ||
994 | memcpy(card_fw, fs_fw, sizeof(*card_fw)); | ||
995 | card_fw_usable = 1; | ||
996 | *reset = 0; /* already reset as part of load_fw */ | ||
997 | } | ||
998 | |||
999 | if (!card_fw_usable) { | ||
1000 | uint32_t d, c, k; | ||
1001 | |||
1002 | d = be32_to_cpu(drv_fw->fw_ver); | ||
1003 | c = be32_to_cpu(card_fw->fw_ver); | ||
1004 | k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; | ||
1005 | |||
1006 | dev_err(adap->pdev_dev, "Cannot find a usable firmware: " | ||
1007 | "chip state %d, " | ||
1008 | "driver compiled with %d.%d.%d.%d, " | ||
1009 | "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", | ||
1010 | state, | ||
1011 | FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), | ||
1012 | FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), | ||
1013 | FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), | ||
1014 | FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), | ||
1015 | FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), | ||
1016 | FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); | ||
1017 | ret = EINVAL; | ||
1018 | goto bye; | ||
1019 | } | ||
1020 | |||
1021 | /* We're using whatever's on the card and it's known to be good. */ | ||
1022 | adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); | ||
1023 | adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); | ||
1024 | |||
1025 | bye: | ||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
964 | /** | 1029 | /** |
965 | * t4_flash_erase_sectors - erase a range of flash sectors | 1030 | * t4_flash_erase_sectors - erase a range of flash sectors |
966 | * @adapter: the adapter | 1031 | * @adapter: the adapter |
@@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter) | |||
1368 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | 1433 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, |
1369 | pcie_port_intr_info) + | 1434 | pcie_port_intr_info) + |
1370 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, | 1435 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, |
1371 | is_t4(adapter->chip) ? | 1436 | is_t4(adapter->params.chip) ? |
1372 | pcie_intr_info : t5_pcie_intr_info); | 1437 | pcie_intr_info : t5_pcie_intr_info); |
1373 | 1438 | ||
1374 | if (fat) | 1439 | if (fat) |
@@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port) | |||
1782 | { | 1847 | { |
1783 | u32 v, int_cause_reg; | 1848 | u32 v, int_cause_reg; |
1784 | 1849 | ||
1785 | if (is_t4(adap->chip)) | 1850 | if (is_t4(adap->params.chip)) |
1786 | int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); | 1851 | int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); |
1787 | else | 1852 | else |
1788 | int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); | 1853 | int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); |
@@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) | |||
2250 | 2315 | ||
2251 | #define GET_STAT(name) \ | 2316 | #define GET_STAT(name) \ |
2252 | t4_read_reg64(adap, \ | 2317 | t4_read_reg64(adap, \ |
2253 | (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ | 2318 | (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ |
2254 | T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) | 2319 | T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) |
2255 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | 2320 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) |
2256 | 2321 | ||
@@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, | |||
2332 | { | 2397 | { |
2333 | u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; | 2398 | u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; |
2334 | 2399 | ||
2335 | if (is_t4(adap->chip)) { | 2400 | if (is_t4(adap->params.chip)) { |
2336 | mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); | 2401 | mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); |
2337 | mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); | 2402 | mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); |
2338 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); | 2403 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); |
@@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |||
2374 | int i; | 2439 | int i; |
2375 | u32 port_cfg_reg; | 2440 | u32 port_cfg_reg; |
2376 | 2441 | ||
2377 | if (is_t4(adap->chip)) | 2442 | if (is_t4(adap->params.chip)) |
2378 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); | 2443 | port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); |
2379 | else | 2444 | else |
2380 | port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); | 2445 | port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); |
@@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |||
2387 | return -EINVAL; | 2452 | return -EINVAL; |
2388 | 2453 | ||
2389 | #define EPIO_REG(name) \ | 2454 | #define EPIO_REG(name) \ |
2390 | (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ | 2455 | (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ |
2391 | T5_PORT_REG(port, MAC_PORT_EPIO_##name)) | 2456 | T5_PORT_REG(port, MAC_PORT_EPIO_##name)) |
2392 | 2457 | ||
2393 | t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); | 2458 | t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); |
@@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, | |||
2474 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) | 2539 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) |
2475 | { | 2540 | { |
2476 | int i, off; | 2541 | int i, off; |
2477 | u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); | 2542 | u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); |
2478 | 2543 | ||
2479 | /* Align on a 2KB boundary. | 2544 | /* Align on a 2KB boundary. |
2480 | */ | 2545 | */ |
@@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, | |||
3306 | int i, ret; | 3371 | int i, ret; |
3307 | struct fw_vi_mac_cmd c; | 3372 | struct fw_vi_mac_cmd c; |
3308 | struct fw_vi_mac_exact *p; | 3373 | struct fw_vi_mac_exact *p; |
3309 | unsigned int max_naddr = is_t4(adap->chip) ? | 3374 | unsigned int max_naddr = is_t4(adap->params.chip) ? |
3310 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 3375 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
3311 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 3376 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
3312 | 3377 | ||
@@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | |||
3368 | int ret, mode; | 3433 | int ret, mode; |
3369 | struct fw_vi_mac_cmd c; | 3434 | struct fw_vi_mac_cmd c; |
3370 | struct fw_vi_mac_exact *p = c.u.exact; | 3435 | struct fw_vi_mac_exact *p = c.u.exact; |
3371 | unsigned int max_mac_addr = is_t4(adap->chip) ? | 3436 | unsigned int max_mac_addr = is_t4(adap->params.chip) ? |
3372 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 3437 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
3373 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 3438 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
3374 | 3439 | ||
@@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3699 | { | 3764 | { |
3700 | int ret, ver; | 3765 | int ret, ver; |
3701 | uint16_t device_id; | 3766 | uint16_t device_id; |
3767 | u32 pl_rev; | ||
3702 | 3768 | ||
3703 | ret = t4_wait_dev_ready(adapter); | 3769 | ret = t4_wait_dev_ready(adapter); |
3704 | if (ret < 0) | 3770 | if (ret < 0) |
3705 | return ret; | 3771 | return ret; |
3706 | 3772 | ||
3707 | get_pci_mode(adapter, &adapter->params.pci); | 3773 | get_pci_mode(adapter, &adapter->params.pci); |
3708 | adapter->params.rev = t4_read_reg(adapter, PL_REV); | 3774 | pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); |
3709 | 3775 | ||
3710 | ret = get_flash_params(adapter); | 3776 | ret = get_flash_params(adapter); |
3711 | if (ret < 0) { | 3777 | if (ret < 0) { |
@@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3717 | */ | 3783 | */ |
3718 | pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); | 3784 | pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); |
3719 | ver = device_id >> 12; | 3785 | ver = device_id >> 12; |
3786 | adapter->params.chip = 0; | ||
3720 | switch (ver) { | 3787 | switch (ver) { |
3721 | case CHELSIO_T4: | 3788 | case CHELSIO_T4: |
3722 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, | 3789 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); |
3723 | adapter->params.rev); | ||
3724 | break; | 3790 | break; |
3725 | case CHELSIO_T5: | 3791 | case CHELSIO_T5: |
3726 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, | 3792 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); |
3727 | adapter->params.rev); | ||
3728 | break; | 3793 | break; |
3729 | default: | 3794 | default: |
3730 | dev_err(adapter->pdev_dev, "Device %d is not supported\n", | 3795 | dev_err(adapter->pdev_dev, "Device %d is not supported\n", |
@@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3732 | return -EINVAL; | 3797 | return -EINVAL; |
3733 | } | 3798 | } |
3734 | 3799 | ||
3735 | /* Reassign the updated revision field */ | ||
3736 | adapter->params.rev = adapter->chip; | ||
3737 | |||
3738 | init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); | 3800 | init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); |
3739 | 3801 | ||
3740 | /* | 3802 | /* |
@@ -3746,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3746 | return 0; | 3808 | return 0; |
3747 | } | 3809 | } |
3748 | 3810 | ||
3811 | /** | ||
3812 | * t4_init_tp_params - initialize adap->params.tp | ||
3813 | * @adap: the adapter | ||
3814 | * | ||
3815 | * Initialize various fields of the adapter's TP Parameters structure. | ||
3816 | */ | ||
3817 | int t4_init_tp_params(struct adapter *adap) | ||
3818 | { | ||
3819 | int chan; | ||
3820 | u32 v; | ||
3821 | |||
3822 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
3823 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
3824 | adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); | ||
3825 | |||
3826 | /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ | ||
3827 | for (chan = 0; chan < NCHAN; chan++) | ||
3828 | adap->params.tp.tx_modq[chan] = chan; | ||
3829 | |||
3830 | /* Cache the adapter's Compressed Filter Mode and global Incress | ||
3831 | * Configuration. | ||
3832 | */ | ||
3833 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
3834 | &adap->params.tp.vlan_pri_map, 1, | ||
3835 | TP_VLAN_PRI_MAP); | ||
3836 | t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, | ||
3837 | &adap->params.tp.ingress_config, 1, | ||
3838 | TP_INGRESS_CONFIG); | ||
3839 | |||
3840 | /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field | ||
3841 | * shift positions of several elements of the Compressed Filter Tuple | ||
3842 | * for this adapter which we need frequently ... | ||
3843 | */ | ||
3844 | adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); | ||
3845 | adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); | ||
3846 | adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); | ||
3847 | adap->params.tp.protocol_shift = t4_filter_field_shift(adap, | ||
3848 | F_PROTOCOL); | ||
3849 | |||
3850 | /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID | ||
3851 | * represents the presense of an Outer VLAN instead of a VNIC ID. | ||
3852 | */ | ||
3853 | if ((adap->params.tp.ingress_config & F_VNIC) == 0) | ||
3854 | adap->params.tp.vnic_shift = -1; | ||
3855 | |||
3856 | return 0; | ||
3857 | } | ||
3858 | |||
3859 | /** | ||
3860 | * t4_filter_field_shift - calculate filter field shift | ||
3861 | * @adap: the adapter | ||
3862 | * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) | ||
3863 | * | ||
3864 | * Return the shift position of a filter field within the Compressed | ||
3865 | * Filter Tuple. The filter field is specified via its selection bit | ||
3866 | * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. | ||
3867 | */ | ||
3868 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel) | ||
3869 | { | ||
3870 | unsigned int filter_mode = adap->params.tp.vlan_pri_map; | ||
3871 | unsigned int sel; | ||
3872 | int field_shift; | ||
3873 | |||
3874 | if ((filter_mode & filter_sel) == 0) | ||
3875 | return -1; | ||
3876 | |||
3877 | for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { | ||
3878 | switch (filter_mode & sel) { | ||
3879 | case F_FCOE: | ||
3880 | field_shift += W_FT_FCOE; | ||
3881 | break; | ||
3882 | case F_PORT: | ||
3883 | field_shift += W_FT_PORT; | ||
3884 | break; | ||
3885 | case F_VNIC_ID: | ||
3886 | field_shift += W_FT_VNIC_ID; | ||
3887 | break; | ||
3888 | case F_VLAN: | ||
3889 | field_shift += W_FT_VLAN; | ||
3890 | break; | ||
3891 | case F_TOS: | ||
3892 | field_shift += W_FT_TOS; | ||
3893 | break; | ||
3894 | case F_PROTOCOL: | ||
3895 | field_shift += W_FT_PROTOCOL; | ||
3896 | break; | ||
3897 | case F_ETHERTYPE: | ||
3898 | field_shift += W_FT_ETHERTYPE; | ||
3899 | break; | ||
3900 | case F_MACMATCH: | ||
3901 | field_shift += W_FT_MACMATCH; | ||
3902 | break; | ||
3903 | case F_MPSHITTYPE: | ||
3904 | field_shift += W_FT_MPSHITTYPE; | ||
3905 | break; | ||
3906 | case F_FRAGMENTATION: | ||
3907 | field_shift += W_FT_FRAGMENTATION; | ||
3908 | break; | ||
3909 | } | ||
3910 | } | ||
3911 | return field_shift; | ||
3912 | } | ||
3913 | |||
3749 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) | 3914 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) |
3750 | { | 3915 | { |
3751 | u8 addr[6]; | 3916 | u8 addr[6]; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index ef146c0ba481..4082522d8140 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -1092,6 +1092,11 @@ | |||
1092 | 1092 | ||
1093 | #define PL_REV 0x1943c | 1093 | #define PL_REV 0x1943c |
1094 | 1094 | ||
1095 | #define S_REV 0 | ||
1096 | #define M_REV 0xfU | ||
1097 | #define V_REV(x) ((x) << S_REV) | ||
1098 | #define G_REV(x) (((x) >> S_REV) & M_REV) | ||
1099 | |||
1095 | #define LE_DB_CONFIG 0x19c04 | 1100 | #define LE_DB_CONFIG 0x19c04 |
1096 | #define HASHEN 0x00100000U | 1101 | #define HASHEN 0x00100000U |
1097 | 1102 | ||
@@ -1166,10 +1171,50 @@ | |||
1166 | 1171 | ||
1167 | #define A_TP_TX_SCHED_PCMD 0x25 | 1172 | #define A_TP_TX_SCHED_PCMD 0x25 |
1168 | 1173 | ||
1174 | #define S_VNIC 11 | ||
1175 | #define V_VNIC(x) ((x) << S_VNIC) | ||
1176 | #define F_VNIC V_VNIC(1U) | ||
1177 | |||
1178 | #define S_FRAGMENTATION 9 | ||
1179 | #define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) | ||
1180 | #define F_FRAGMENTATION V_FRAGMENTATION(1U) | ||
1181 | |||
1182 | #define S_MPSHITTYPE 8 | ||
1183 | #define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) | ||
1184 | #define F_MPSHITTYPE V_MPSHITTYPE(1U) | ||
1185 | |||
1186 | #define S_MACMATCH 7 | ||
1187 | #define V_MACMATCH(x) ((x) << S_MACMATCH) | ||
1188 | #define F_MACMATCH V_MACMATCH(1U) | ||
1189 | |||
1190 | #define S_ETHERTYPE 6 | ||
1191 | #define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) | ||
1192 | #define F_ETHERTYPE V_ETHERTYPE(1U) | ||
1193 | |||
1194 | #define S_PROTOCOL 5 | ||
1195 | #define V_PROTOCOL(x) ((x) << S_PROTOCOL) | ||
1196 | #define F_PROTOCOL V_PROTOCOL(1U) | ||
1197 | |||
1198 | #define S_TOS 4 | ||
1199 | #define V_TOS(x) ((x) << S_TOS) | ||
1200 | #define F_TOS V_TOS(1U) | ||
1201 | |||
1202 | #define S_VLAN 3 | ||
1203 | #define V_VLAN(x) ((x) << S_VLAN) | ||
1204 | #define F_VLAN V_VLAN(1U) | ||
1205 | |||
1206 | #define S_VNIC_ID 2 | ||
1207 | #define V_VNIC_ID(x) ((x) << S_VNIC_ID) | ||
1208 | #define F_VNIC_ID V_VNIC_ID(1U) | ||
1209 | |||
1169 | #define S_PORT 1 | 1210 | #define S_PORT 1 |
1170 | #define V_PORT(x) ((x) << S_PORT) | 1211 | #define V_PORT(x) ((x) << S_PORT) |
1171 | #define F_PORT V_PORT(1U) | 1212 | #define F_PORT V_PORT(1U) |
1172 | 1213 | ||
1214 | #define S_FCOE 0 | ||
1215 | #define V_FCOE(x) ((x) << S_FCOE) | ||
1216 | #define F_FCOE V_FCOE(1U) | ||
1217 | |||
1173 | #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 | 1218 | #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 |
1174 | #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 | 1219 | #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 |
1175 | 1220 | ||
@@ -1199,4 +1244,46 @@ | |||
1199 | #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) | 1244 | #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) |
1200 | #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) | 1245 | #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) |
1201 | 1246 | ||
1247 | #define A_PL_VF_REV 0x4 | ||
1248 | #define A_PL_VF_WHOAMI 0x0 | ||
1249 | #define A_PL_VF_REVISION 0x8 | ||
1250 | |||
1251 | #define S_CHIPID 4 | ||
1252 | #define M_CHIPID 0xfU | ||
1253 | #define V_CHIPID(x) ((x) << S_CHIPID) | ||
1254 | #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) | ||
1255 | |||
1256 | /* TP_VLAN_PRI_MAP controls which subset of fields will be present in the | ||
1257 | * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP | ||
1258 | * selects for a particular field being present. These fields, when present | ||
1259 | * in the Compressed Filter Tuple, have the following widths in bits. | ||
1260 | */ | ||
1261 | #define W_FT_FCOE 1 | ||
1262 | #define W_FT_PORT 3 | ||
1263 | #define W_FT_VNIC_ID 17 | ||
1264 | #define W_FT_VLAN 17 | ||
1265 | #define W_FT_TOS 8 | ||
1266 | #define W_FT_PROTOCOL 8 | ||
1267 | #define W_FT_ETHERTYPE 16 | ||
1268 | #define W_FT_MACMATCH 9 | ||
1269 | #define W_FT_MPSHITTYPE 3 | ||
1270 | #define W_FT_FRAGMENTATION 1 | ||
1271 | |||
1272 | /* Some of the Compressed Filter Tuple fields have internal structure. These | ||
1273 | * bit shifts/masks describe those structures. All shifts are relative to the | ||
1274 | * base position of the fields within the Compressed Filter Tuple | ||
1275 | */ | ||
1276 | #define S_FT_VLAN_VLD 16 | ||
1277 | #define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) | ||
1278 | #define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) | ||
1279 | |||
1280 | #define S_FT_VNID_ID_VF 0 | ||
1281 | #define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) | ||
1282 | |||
1283 | #define S_FT_VNID_ID_PF 7 | ||
1284 | #define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) | ||
1285 | |||
1286 | #define S_FT_VNID_ID_VLD 16 | ||
1287 | #define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) | ||
1288 | |||
1202 | #endif /* __T4_REGS_H */ | 1289 | #endif /* __T4_REGS_H */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 6f77ac487743..74fea74ce0aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -2157,7 +2157,7 @@ struct fw_debug_cmd { | |||
2157 | 2157 | ||
2158 | struct fw_hdr { | 2158 | struct fw_hdr { |
2159 | u8 ver; | 2159 | u8 ver; |
2160 | u8 reserved1; | 2160 | u8 chip; /* terminator chip type */ |
2161 | __be16 len512; /* bin length in units of 512-bytes */ | 2161 | __be16 len512; /* bin length in units of 512-bytes */ |
2162 | __be32 fw_ver; /* firmware version */ | 2162 | __be32 fw_ver; /* firmware version */ |
2163 | __be32 tp_microcode_ver; | 2163 | __be32 tp_microcode_ver; |
@@ -2176,6 +2176,11 @@ struct fw_hdr { | |||
2176 | __be32 reserved6[23]; | 2176 | __be32 reserved6[23]; |
2177 | }; | 2177 | }; |
2178 | 2178 | ||
2179 | enum fw_hdr_chip { | ||
2180 | FW_HDR_CHIP_T4, | ||
2181 | FW_HDR_CHIP_T5 | ||
2182 | }; | ||
2183 | |||
2179 | #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) | 2184 | #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) |
2180 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) | 2185 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) |
2181 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) | 2186 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index be5c7ef6ca93..68eaa9c88c7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
@@ -344,7 +344,6 @@ struct adapter { | |||
344 | unsigned long registered_device_map; | 344 | unsigned long registered_device_map; |
345 | unsigned long open_device_map; | 345 | unsigned long open_device_map; |
346 | unsigned long flags; | 346 | unsigned long flags; |
347 | enum chip_type chip; | ||
348 | struct adapter_params params; | 347 | struct adapter_params params; |
349 | 348 | ||
350 | /* queue and interrupt resources */ | 349 | /* queue and interrupt resources */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5f90ec5f7519..0899c0983594 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter) | |||
1064 | /* | 1064 | /* |
1065 | * Chip version 4, revision 0x3f (cxgb4vf). | 1065 | * Chip version 4, revision 0x3f (cxgb4vf). |
1066 | */ | 1066 | */ |
1067 | return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); | 1067 | return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | /* | 1070 | /* |
@@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev, | |||
1551 | reg_block_dump(adapter, regbuf, | 1551 | reg_block_dump(adapter, regbuf, |
1552 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, | 1552 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, |
1553 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); | 1553 | T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); |
1554 | |||
1555 | /* T5 adds new registers in the PL Register map. | ||
1556 | */ | ||
1554 | reg_block_dump(adapter, regbuf, | 1557 | reg_block_dump(adapter, regbuf, |
1555 | T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, | 1558 | T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, |
1556 | T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); | 1559 | T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) |
1560 | ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); | ||
1557 | reg_block_dump(adapter, regbuf, | 1561 | reg_block_dump(adapter, regbuf, |
1558 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, | 1562 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, |
1559 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); | 1563 | T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); |
@@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter) | |||
2087 | unsigned int ethqsets; | 2091 | unsigned int ethqsets; |
2088 | int err; | 2092 | int err; |
2089 | u32 param, val = 0; | 2093 | u32 param, val = 0; |
2094 | unsigned int chipid; | ||
2090 | 2095 | ||
2091 | /* | 2096 | /* |
2092 | * Wait for the device to become ready before proceeding ... | 2097 | * Wait for the device to become ready before proceeding ... |
@@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter) | |||
2114 | return err; | 2119 | return err; |
2115 | } | 2120 | } |
2116 | 2121 | ||
2122 | adapter->params.chip = 0; | ||
2117 | switch (adapter->pdev->device >> 12) { | 2123 | switch (adapter->pdev->device >> 12) { |
2118 | case CHELSIO_T4: | 2124 | case CHELSIO_T4: |
2119 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); | 2125 | adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); |
2120 | break; | 2126 | break; |
2121 | case CHELSIO_T5: | 2127 | case CHELSIO_T5: |
2122 | adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); | 2128 | chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); |
2129 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); | ||
2123 | break; | 2130 | break; |
2124 | } | 2131 | } |
2125 | 2132 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8475c4cda9e4..0a89963c48ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) | |||
537 | */ | 537 | */ |
538 | if (fl->pend_cred >= FL_PER_EQ_UNIT) { | 538 | if (fl->pend_cred >= FL_PER_EQ_UNIT) { |
539 | val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); | 539 | val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); |
540 | if (!is_t4(adapter->chip)) | 540 | if (!is_t4(adapter->params.chip)) |
541 | val |= DBTYPE(1); | 541 | val |= DBTYPE(1); |
542 | wmb(); | 542 | wmb(); |
543 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, | 543 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 53cbfed21d0b..61362450d05b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -39,21 +39,28 @@ | |||
39 | #include "../cxgb4/t4fw_api.h" | 39 | #include "../cxgb4/t4fw_api.h" |
40 | 40 | ||
41 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) | 41 | #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) |
42 | #define CHELSIO_CHIP_VERSION(code) ((code) >> 4) | 42 | #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) |
43 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) | 43 | #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) |
44 | 44 | ||
45 | /* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: | ||
46 | * | ||
47 | * V = "4" for T4; "5" for T5, etc. or | ||
48 | * = "a" for T4 FPGA; "b" for T4 FPGA, etc. | ||
49 | * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs | ||
50 | * PP = adapter product designation | ||
51 | */ | ||
45 | #define CHELSIO_T4 0x4 | 52 | #define CHELSIO_T4 0x4 |
46 | #define CHELSIO_T5 0x5 | 53 | #define CHELSIO_T5 0x5 |
47 | 54 | ||
48 | enum chip_type { | 55 | enum chip_type { |
49 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), | 56 | T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), |
50 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), | 57 | T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), |
51 | T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), | ||
52 | T4_FIRST_REV = T4_A1, | 58 | T4_FIRST_REV = T4_A1, |
53 | T4_LAST_REV = T4_A3, | 59 | T4_LAST_REV = T4_A2, |
54 | 60 | ||
55 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), | 61 | T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), |
56 | T5_FIRST_REV = T5_A1, | 62 | T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), |
63 | T5_FIRST_REV = T5_A0, | ||
57 | T5_LAST_REV = T5_A1, | 64 | T5_LAST_REV = T5_A1, |
58 | }; | 65 | }; |
59 | 66 | ||
@@ -203,6 +210,7 @@ struct adapter_params { | |||
203 | struct vpd_params vpd; /* Vital Product Data */ | 210 | struct vpd_params vpd; /* Vital Product Data */ |
204 | struct rss_params rss; /* Receive Side Scaling */ | 211 | struct rss_params rss; /* Receive Side Scaling */ |
205 | struct vf_resources vfres; /* Virtual Function Resource limits */ | 212 | struct vf_resources vfres; /* Virtual Function Resource limits */ |
213 | enum chip_type chip; /* chip code */ | ||
206 | u8 nports; /* # of Ethernet "ports" */ | 214 | u8 nports; /* # of Ethernet "ports" */ |
207 | }; | 215 | }; |
208 | 216 | ||
@@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, | |||
253 | 261 | ||
254 | static inline int is_t4(enum chip_type chip) | 262 | static inline int is_t4(enum chip_type chip) |
255 | { | 263 | { |
256 | return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); | 264 | return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; |
257 | } | 265 | } |
258 | 266 | ||
259 | int t4vf_wait_dev_ready(struct adapter *); | 267 | int t4vf_wait_dev_ready(struct adapter *); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9f96dc3bb112..d958c44341b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, | |||
1027 | unsigned nfilters = 0; | 1027 | unsigned nfilters = 0; |
1028 | unsigned int rem = naddr; | 1028 | unsigned int rem = naddr; |
1029 | struct fw_vi_mac_cmd cmd, rpl; | 1029 | struct fw_vi_mac_cmd cmd, rpl; |
1030 | unsigned int max_naddr = is_t4(adapter->chip) ? | 1030 | unsigned int max_naddr = is_t4(adapter->params.chip) ? |
1031 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 1031 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
1032 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 1032 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
1033 | 1033 | ||
@@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, | |||
1121 | struct fw_vi_mac_exact *p = &cmd.u.exact[0]; | 1121 | struct fw_vi_mac_exact *p = &cmd.u.exact[0]; |
1122 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, | 1122 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, |
1123 | u.exact[1]), 16); | 1123 | u.exact[1]), 16); |
1124 | unsigned int max_naddr = is_t4(adapter->chip) ? | 1124 | unsigned int max_naddr = is_t4(adapter->params.chip) ? |
1125 | NUM_MPS_CLS_SRAM_L_INSTANCES : | 1125 | NUM_MPS_CLS_SRAM_L_INSTANCES : |
1126 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | 1126 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; |
1127 | 1127 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 5878df619b53..4ccaf9af6fc9 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
104 | #define BE3_MAX_RSS_QS 16 | 104 | #define BE3_MAX_RSS_QS 16 |
105 | #define BE3_MAX_TX_QS 16 | 105 | #define BE3_MAX_TX_QS 16 |
106 | #define BE3_MAX_EVT_QS 16 | 106 | #define BE3_MAX_EVT_QS 16 |
107 | #define BE3_SRIOV_MAX_EVT_QS 8 | ||
107 | 108 | ||
108 | #define MAX_RX_QS 32 | 109 | #define MAX_RX_QS 32 |
109 | #define MAX_EVT_QS 32 | 110 | #define MAX_EVT_QS 32 |
@@ -480,7 +481,7 @@ struct be_adapter { | |||
480 | struct list_head entry; | 481 | struct list_head entry; |
481 | 482 | ||
482 | u32 flash_status; | 483 | u32 flash_status; |
483 | struct completion flash_compl; | 484 | struct completion et_cmd_compl; |
484 | 485 | ||
485 | struct be_resources res; /* resources available for the func */ | 486 | struct be_resources res; /* resources available for the func */ |
486 | u16 num_vfs; /* Number of VFs provisioned by PF */ | 487 | u16 num_vfs; /* Number of VFs provisioned by PF */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e0e8bc1ef14c..94c35c8d799d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
141 | subsystem = resp_hdr->subsystem; | 141 | subsystem = resp_hdr->subsystem; |
142 | } | 142 | } |
143 | 143 | ||
144 | if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && | ||
145 | subsystem == CMD_SUBSYSTEM_LOWLEVEL) { | ||
146 | complete(&adapter->et_cmd_compl); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
144 | if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || | 150 | if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || |
145 | (opcode == OPCODE_COMMON_WRITE_OBJECT)) && | 151 | (opcode == OPCODE_COMMON_WRITE_OBJECT)) && |
146 | (subsystem == CMD_SUBSYSTEM_COMMON)) { | 152 | (subsystem == CMD_SUBSYSTEM_COMMON)) { |
147 | adapter->flash_status = compl_status; | 153 | adapter->flash_status = compl_status; |
148 | complete(&adapter->flash_compl); | 154 | complete(&adapter->et_cmd_compl); |
149 | } | 155 | } |
150 | 156 | ||
151 | if (compl_status == MCC_STATUS_SUCCESS) { | 157 | if (compl_status == MCC_STATUS_SUCCESS) { |
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, | |||
2017 | 0x3ea83c02, 0x4a110304}; | 2023 | 0x3ea83c02, 0x4a110304}; |
2018 | int status; | 2024 | int status; |
2019 | 2025 | ||
2026 | if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) | ||
2027 | return 0; | ||
2028 | |||
2020 | if (mutex_lock_interruptible(&adapter->mbox_lock)) | 2029 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
2021 | return -1; | 2030 | return -1; |
2022 | 2031 | ||
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
2160 | be_mcc_notify(adapter); | 2169 | be_mcc_notify(adapter); |
2161 | spin_unlock_bh(&adapter->mcc_lock); | 2170 | spin_unlock_bh(&adapter->mcc_lock); |
2162 | 2171 | ||
2163 | if (!wait_for_completion_timeout(&adapter->flash_compl, | 2172 | if (!wait_for_completion_timeout(&adapter->et_cmd_compl, |
2164 | msecs_to_jiffies(60000))) | 2173 | msecs_to_jiffies(60000))) |
2165 | status = -1; | 2174 | status = -1; |
2166 | else | 2175 | else |
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
2255 | be_mcc_notify(adapter); | 2264 | be_mcc_notify(adapter); |
2256 | spin_unlock_bh(&adapter->mcc_lock); | 2265 | spin_unlock_bh(&adapter->mcc_lock); |
2257 | 2266 | ||
2258 | if (!wait_for_completion_timeout(&adapter->flash_compl, | 2267 | if (!wait_for_completion_timeout(&adapter->et_cmd_compl, |
2259 | msecs_to_jiffies(40000))) | 2268 | msecs_to_jiffies(40000))) |
2260 | status = -1; | 2269 | status = -1; |
2261 | else | 2270 | else |
2262 | status = adapter->flash_status; | 2271 | status = adapter->flash_status; |
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
2367 | { | 2376 | { |
2368 | struct be_mcc_wrb *wrb; | 2377 | struct be_mcc_wrb *wrb; |
2369 | struct be_cmd_req_loopback_test *req; | 2378 | struct be_cmd_req_loopback_test *req; |
2379 | struct be_cmd_resp_loopback_test *resp; | ||
2370 | int status; | 2380 | int status; |
2371 | 2381 | ||
2372 | spin_lock_bh(&adapter->mcc_lock); | 2382 | spin_lock_bh(&adapter->mcc_lock); |
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
2381 | 2391 | ||
2382 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | 2392 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, |
2383 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); | 2393 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); |
2384 | req->hdr.timeout = cpu_to_le32(4); | ||
2385 | 2394 | ||
2395 | req->hdr.timeout = cpu_to_le32(15); | ||
2386 | req->pattern = cpu_to_le64(pattern); | 2396 | req->pattern = cpu_to_le64(pattern); |
2387 | req->src_port = cpu_to_le32(port_num); | 2397 | req->src_port = cpu_to_le32(port_num); |
2388 | req->dest_port = cpu_to_le32(port_num); | 2398 | req->dest_port = cpu_to_le32(port_num); |
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
2390 | req->num_pkts = cpu_to_le32(num_pkts); | 2400 | req->num_pkts = cpu_to_le32(num_pkts); |
2391 | req->loopback_type = cpu_to_le32(loopback_type); | 2401 | req->loopback_type = cpu_to_le32(loopback_type); |
2392 | 2402 | ||
2393 | status = be_mcc_notify_wait(adapter); | 2403 | be_mcc_notify(adapter); |
2394 | if (!status) { | 2404 | |
2395 | struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); | 2405 | spin_unlock_bh(&adapter->mcc_lock); |
2396 | status = le32_to_cpu(resp->status); | ||
2397 | } | ||
2398 | 2406 | ||
2407 | wait_for_completion(&adapter->et_cmd_compl); | ||
2408 | resp = embedded_payload(wrb); | ||
2409 | status = le32_to_cpu(resp->status); | ||
2410 | |||
2411 | return status; | ||
2399 | err: | 2412 | err: |
2400 | spin_unlock_bh(&adapter->mcc_lock); | 2413 | spin_unlock_bh(&adapter->mcc_lock); |
2401 | return status; | 2414 | return status; |
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3e2162121601..dc88782185f2 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h | |||
@@ -64,6 +64,9 @@ | |||
64 | #define SLIPORT_ERROR_NO_RESOURCE1 0x2 | 64 | #define SLIPORT_ERROR_NO_RESOURCE1 0x2 |
65 | #define SLIPORT_ERROR_NO_RESOURCE2 0x9 | 65 | #define SLIPORT_ERROR_NO_RESOURCE2 0x9 |
66 | 66 | ||
67 | #define SLIPORT_ERROR_FW_RESET1 0x2 | ||
68 | #define SLIPORT_ERROR_FW_RESET2 0x0 | ||
69 | |||
67 | /********* Memory BAR register ************/ | 70 | /********* Memory BAR register ************/ |
68 | #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc | 71 | #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc |
69 | /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt | 72 | /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fee64bf10446..bf40fdaecfa3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter) | |||
2464 | */ | 2464 | */ |
2465 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { | 2465 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { |
2466 | adapter->hw_error = true; | 2466 | adapter->hw_error = true; |
2467 | dev_err(&adapter->pdev->dev, | 2467 | /* Do not log error messages if its a FW reset */ |
2468 | "Error detected in the card\n"); | 2468 | if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && |
2469 | sliport_err2 == SLIPORT_ERROR_FW_RESET2) { | ||
2470 | dev_info(&adapter->pdev->dev, | ||
2471 | "Firmware update in progress\n"); | ||
2472 | return; | ||
2473 | } else { | ||
2474 | dev_err(&adapter->pdev->dev, | ||
2475 | "Error detected in the card\n"); | ||
2476 | } | ||
2469 | } | 2477 | } |
2470 | 2478 | ||
2471 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { | 2479 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { |
@@ -2736,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter) | |||
2736 | if (!BEx_chip(adapter)) | 2744 | if (!BEx_chip(adapter)) |
2737 | adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | | 2745 | adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | |
2738 | RSS_ENABLE_UDP_IPV6; | 2746 | RSS_ENABLE_UDP_IPV6; |
2747 | } else { | ||
2748 | /* Disable RSS, if only default RX Q is created */ | ||
2749 | adapter->rss_flags = RSS_ENABLE_NONE; | ||
2750 | } | ||
2739 | 2751 | ||
2740 | rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, | 2752 | rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, |
2741 | 128); | 2753 | 128); |
2742 | if (rc) { | 2754 | if (rc) { |
2743 | adapter->rss_flags = 0; | 2755 | adapter->rss_flags = RSS_ENABLE_NONE; |
2744 | return rc; | 2756 | return rc; |
2745 | } | ||
2746 | } | 2757 | } |
2747 | 2758 | ||
2748 | /* First time posting */ | 2759 | /* First time posting */ |
@@ -2932,28 +2943,35 @@ static void be_cancel_worker(struct be_adapter *adapter) | |||
2932 | } | 2943 | } |
2933 | } | 2944 | } |
2934 | 2945 | ||
2935 | static int be_clear(struct be_adapter *adapter) | 2946 | static void be_mac_clear(struct be_adapter *adapter) |
2936 | { | 2947 | { |
2937 | int i; | 2948 | int i; |
2938 | 2949 | ||
2950 | if (adapter->pmac_id) { | ||
2951 | for (i = 0; i < (adapter->uc_macs + 1); i++) | ||
2952 | be_cmd_pmac_del(adapter, adapter->if_handle, | ||
2953 | adapter->pmac_id[i], 0); | ||
2954 | adapter->uc_macs = 0; | ||
2955 | |||
2956 | kfree(adapter->pmac_id); | ||
2957 | adapter->pmac_id = NULL; | ||
2958 | } | ||
2959 | } | ||
2960 | |||
2961 | static int be_clear(struct be_adapter *adapter) | ||
2962 | { | ||
2939 | be_cancel_worker(adapter); | 2963 | be_cancel_worker(adapter); |
2940 | 2964 | ||
2941 | if (sriov_enabled(adapter)) | 2965 | if (sriov_enabled(adapter)) |
2942 | be_vf_clear(adapter); | 2966 | be_vf_clear(adapter); |
2943 | 2967 | ||
2944 | /* delete the primary mac along with the uc-mac list */ | 2968 | /* delete the primary mac along with the uc-mac list */ |
2945 | for (i = 0; i < (adapter->uc_macs + 1); i++) | 2969 | be_mac_clear(adapter); |
2946 | be_cmd_pmac_del(adapter, adapter->if_handle, | ||
2947 | adapter->pmac_id[i], 0); | ||
2948 | adapter->uc_macs = 0; | ||
2949 | 2970 | ||
2950 | be_cmd_if_destroy(adapter, adapter->if_handle, 0); | 2971 | be_cmd_if_destroy(adapter, adapter->if_handle, 0); |
2951 | 2972 | ||
2952 | be_clear_queues(adapter); | 2973 | be_clear_queues(adapter); |
2953 | 2974 | ||
2954 | kfree(adapter->pmac_id); | ||
2955 | adapter->pmac_id = NULL; | ||
2956 | |||
2957 | be_msix_disable(adapter); | 2975 | be_msix_disable(adapter); |
2958 | return 0; | 2976 | return 0; |
2959 | } | 2977 | } |
@@ -3109,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
3109 | { | 3127 | { |
3110 | struct pci_dev *pdev = adapter->pdev; | 3128 | struct pci_dev *pdev = adapter->pdev; |
3111 | bool use_sriov = false; | 3129 | bool use_sriov = false; |
3130 | int max_vfs; | ||
3112 | 3131 | ||
3113 | if (BE3_chip(adapter) && sriov_want(adapter)) { | 3132 | max_vfs = pci_sriov_get_totalvfs(pdev); |
3114 | int max_vfs; | ||
3115 | 3133 | ||
3116 | max_vfs = pci_sriov_get_totalvfs(pdev); | 3134 | if (BE3_chip(adapter) && sriov_want(adapter)) { |
3117 | res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; | 3135 | res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; |
3118 | use_sriov = res->max_vfs; | 3136 | use_sriov = res->max_vfs; |
3119 | } | 3137 | } |
@@ -3144,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
3144 | BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; | 3162 | BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; |
3145 | res->max_rx_qs = res->max_rss_qs + 1; | 3163 | res->max_rx_qs = res->max_rss_qs + 1; |
3146 | 3164 | ||
3147 | res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; | 3165 | if (be_physfn(adapter)) |
3166 | res->max_evt_qs = (max_vfs > 0) ? | ||
3167 | BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; | ||
3168 | else | ||
3169 | res->max_evt_qs = 1; | ||
3148 | 3170 | ||
3149 | res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; | 3171 | res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; |
3150 | if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) | 3172 | if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) |
@@ -3812,6 +3834,8 @@ static int lancer_fw_download(struct be_adapter *adapter, | |||
3812 | } | 3834 | } |
3813 | 3835 | ||
3814 | if (change_status == LANCER_FW_RESET_NEEDED) { | 3836 | if (change_status == LANCER_FW_RESET_NEEDED) { |
3837 | dev_info(&adapter->pdev->dev, | ||
3838 | "Resetting adapter to activate new FW\n"); | ||
3815 | status = lancer_physdev_ctrl(adapter, | 3839 | status = lancer_physdev_ctrl(adapter, |
3816 | PHYSDEV_CONTROL_FW_RESET_MASK); | 3840 | PHYSDEV_CONTROL_FW_RESET_MASK); |
3817 | if (status) { | 3841 | if (status) { |
@@ -4188,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
4188 | spin_lock_init(&adapter->mcc_lock); | 4212 | spin_lock_init(&adapter->mcc_lock); |
4189 | spin_lock_init(&adapter->mcc_cq_lock); | 4213 | spin_lock_init(&adapter->mcc_cq_lock); |
4190 | 4214 | ||
4191 | init_completion(&adapter->flash_compl); | 4215 | init_completion(&adapter->et_cmd_compl); |
4192 | pci_save_state(adapter->pdev); | 4216 | pci_save_state(adapter->pdev); |
4193 | return 0; | 4217 | return 0; |
4194 | 4218 | ||
@@ -4363,13 +4387,13 @@ static int lancer_recover_func(struct be_adapter *adapter) | |||
4363 | goto err; | 4387 | goto err; |
4364 | } | 4388 | } |
4365 | 4389 | ||
4366 | dev_err(dev, "Error recovery successful\n"); | 4390 | dev_err(dev, "Adapter recovery successful\n"); |
4367 | return 0; | 4391 | return 0; |
4368 | err: | 4392 | err: |
4369 | if (status == -EAGAIN) | 4393 | if (status == -EAGAIN) |
4370 | dev_err(dev, "Waiting for resource provisioning\n"); | 4394 | dev_err(dev, "Waiting for resource provisioning\n"); |
4371 | else | 4395 | else |
4372 | dev_err(dev, "Error recovery failed\n"); | 4396 | dev_err(dev, "Adapter recovery failed\n"); |
4373 | 4397 | ||
4374 | return status; | 4398 | return status; |
4375 | } | 4399 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4cbebf3d80eb..50bb71c663e2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev); | |||
98 | * detected as not set during a prior frame transmission, then the | 98 | * detected as not set during a prior frame transmission, then the |
99 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | 99 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs |
100 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | 100 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in |
101 | * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously | ||
102 | * detected as not set during a prior frame transmission, then the | ||
103 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | ||
104 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | ||
105 | * frames not being transmitted until there is a 0-to-1 transition on | 101 | * frames not being transmitted until there is a 0-to-1 transition on |
106 | * ENET_TDAR[TDAR]. | 102 | * ENET_TDAR[TDAR]. |
107 | */ | 103 | */ |
@@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
385 | * data. | 381 | * data. |
386 | */ | 382 | */ |
387 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | 383 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, |
388 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 384 | skb->len, DMA_TO_DEVICE); |
389 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | 385 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { |
390 | bdp->cbd_bufaddr = 0; | 386 | bdp->cbd_bufaddr = 0; |
391 | fep->tx_skbuff[index] = NULL; | 387 | fep->tx_skbuff[index] = NULL; |
@@ -432,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
432 | /* If this was the last BD in the ring, start at the beginning again. */ | 428 | /* If this was the last BD in the ring, start at the beginning again. */ |
433 | bdp = fec_enet_get_nextdesc(bdp, fep); | 429 | bdp = fec_enet_get_nextdesc(bdp, fep); |
434 | 430 | ||
431 | skb_tx_timestamp(skb); | ||
432 | |||
435 | fep->cur_tx = bdp; | 433 | fep->cur_tx = bdp; |
436 | 434 | ||
437 | if (fep->cur_tx == fep->dirty_tx) | 435 | if (fep->cur_tx == fep->dirty_tx) |
@@ -440,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
440 | /* Trigger transmission start */ | 438 | /* Trigger transmission start */ |
441 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 439 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
442 | 440 | ||
443 | skb_tx_timestamp(skb); | ||
444 | |||
445 | return NETDEV_TX_OK; | 441 | return NETDEV_TX_OK; |
446 | } | 442 | } |
447 | 443 | ||
@@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev) | |||
779 | else | 775 | else |
780 | index = bdp - fep->tx_bd_base; | 776 | index = bdp - fep->tx_bd_base; |
781 | 777 | ||
782 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
783 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | ||
784 | bdp->cbd_bufaddr = 0; | ||
785 | |||
786 | skb = fep->tx_skbuff[index]; | 778 | skb = fep->tx_skbuff[index]; |
779 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, | ||
780 | DMA_TO_DEVICE); | ||
781 | bdp->cbd_bufaddr = 0; | ||
787 | 782 | ||
788 | /* Check for errors. */ | 783 | /* Check for errors. */ |
789 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 784 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 2d1c6bdd3618..7628e0fd8455 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3033 | 3033 | ||
3034 | dev->hw_features = NETIF_F_SG | NETIF_F_TSO | | 3034 | dev->hw_features = NETIF_F_SG | NETIF_F_TSO | |
3035 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; | 3035 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; |
3036 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | | 3036 | dev->features = NETIF_F_SG | NETIF_F_TSO | |
3037 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | | 3037 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | |
3038 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | | 3038 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
3039 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; | 3039 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; |
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 895450e9bb3c..ff2d806eaef7 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c | |||
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
718 | e1000_release_phy_80003es2lan(hw); | 718 | e1000_release_phy_80003es2lan(hw); |
719 | 719 | ||
720 | /* Disable IBIST slave mode (far-end loopback) */ | 720 | /* Disable IBIST slave mode (far-end loopback) */ |
721 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | 721 | ret_val = |
722 | &kum_reg_data); | 722 | e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
723 | &kum_reg_data); | ||
724 | if (ret_val) | ||
725 | return ret_val; | ||
723 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; | 726 | kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; |
724 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, | 727 | e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, |
725 | kum_reg_data); | 728 | kum_reg_data); |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8d3945ab7334..c30d41d6e426 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6174 | return 0; | 6174 | return 0; |
6175 | } | 6175 | } |
6176 | 6176 | ||
6177 | #ifdef CONFIG_PM_SLEEP | 6177 | #ifdef CONFIG_PM |
6178 | static int e1000_suspend(struct device *dev) | 6178 | static int e1000_suspend(struct device *dev) |
6179 | { | 6179 | { |
6180 | struct pci_dev *pdev = to_pci_dev(dev); | 6180 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev) | |||
6193 | 6193 | ||
6194 | return __e1000_resume(pdev); | 6194 | return __e1000_resume(pdev); |
6195 | } | 6195 | } |
6196 | #endif /* CONFIG_PM_SLEEP */ | 6196 | #endif /* CONFIG_PM */ |
6197 | 6197 | ||
6198 | #ifdef CONFIG_PM_RUNTIME | 6198 | #ifdef CONFIG_PM_RUNTIME |
6199 | static int e1000_runtime_suspend(struct device *dev) | 6199 | static int e1000_runtime_suspend(struct device *dev) |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index da2be59505c0..20e71f4ca426 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | |||
1757 | * it across the board. | 1757 | * it across the board. |
1758 | */ | 1758 | */ |
1759 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | 1759 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); |
1760 | if (ret_val) | 1760 | if (ret_val) { |
1761 | /* If the first read fails, another entity may have | 1761 | /* If the first read fails, another entity may have |
1762 | * ownership of the resources, wait and try again to | 1762 | * ownership of the resources, wait and try again to |
1763 | * see if they have relinquished the resources yet. | 1763 | * see if they have relinquished the resources yet. |
1764 | */ | 1764 | */ |
1765 | udelay(usec_interval); | 1765 | if (usec_interval >= 1000) |
1766 | msleep(usec_interval / 1000); | ||
1767 | else | ||
1768 | udelay(usec_interval); | ||
1769 | } | ||
1766 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); | 1770 | ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); |
1767 | if (ret_val) | 1771 | if (ret_val) |
1768 | break; | 1772 | break; |
1769 | if (phy_status & BMSR_LSTATUS) | 1773 | if (phy_status & BMSR_LSTATUS) |
1770 | break; | 1774 | break; |
1771 | if (usec_interval >= 1000) | 1775 | if (usec_interval >= 1000) |
1772 | mdelay(usec_interval / 1000); | 1776 | msleep(usec_interval / 1000); |
1773 | else | 1777 | else |
1774 | udelay(usec_interval); | 1778 | udelay(usec_interval); |
1775 | } | 1779 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index be15938ba213..12b0932204ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( | |||
354 | struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); | 354 | struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); |
355 | int i; | 355 | int i; |
356 | 356 | ||
357 | if (!vsi->tx_rings) | ||
358 | return stats; | ||
359 | |||
357 | rcu_read_lock(); | 360 | rcu_read_lock(); |
358 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 361 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
359 | struct i40e_ring *tx_ring, *rx_ring; | 362 | struct i40e_ring *tx_ring, *rx_ring; |
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index c4c4fe332c7e..ad2b74d95138 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c | |||
@@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, | |||
1728 | * ownership of the resources, wait and try again to | 1728 | * ownership of the resources, wait and try again to |
1729 | * see if they have relinquished the resources yet. | 1729 | * see if they have relinquished the resources yet. |
1730 | */ | 1730 | */ |
1731 | udelay(usec_interval); | 1731 | if (usec_interval >= 1000) |
1732 | mdelay(usec_interval/1000); | ||
1733 | else | ||
1734 | udelay(usec_interval); | ||
1732 | } | 1735 | } |
1733 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); | 1736 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); |
1734 | if (ret_val) | 1737 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc06854296a3..5bcc870f8367 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) | |||
6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); | 6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
6828 | } | 6828 | } |
6829 | 6829 | ||
6830 | #ifdef IXGBE_FCOE | 6830 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
6831 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 6831 | void *accel_priv) |
6832 | { | 6832 | { |
6833 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; | ||
6834 | #ifdef IXGBE_FCOE | ||
6833 | struct ixgbe_adapter *adapter; | 6835 | struct ixgbe_adapter *adapter; |
6834 | struct ixgbe_ring_feature *f; | 6836 | struct ixgbe_ring_feature *f; |
6835 | int txq; | 6837 | int txq; |
6838 | #endif | ||
6839 | |||
6840 | if (fwd_adapter) | ||
6841 | return skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
6842 | |||
6843 | #ifdef IXGBE_FCOE | ||
6836 | 6844 | ||
6837 | /* | 6845 | /* |
6838 | * only execute the code below if protocol is FCoE | 6846 | * only execute the code below if protocol is FCoE |
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6858 | txq -= f->indices; | 6866 | txq -= f->indices; |
6859 | 6867 | ||
6860 | return txq + f->offset; | 6868 | return txq + f->offset; |
6869 | #else | ||
6870 | return __netdev_pick_tx(dev, skb); | ||
6871 | #endif | ||
6861 | } | 6872 | } |
6862 | 6873 | ||
6863 | #endif | ||
6864 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | 6874 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
6865 | struct ixgbe_adapter *adapter, | 6875 | struct ixgbe_adapter *adapter, |
6866 | struct ixgbe_ring *tx_ring) | 6876 | struct ixgbe_ring *tx_ring) |
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) | |||
7629 | kfree(fwd_adapter); | 7639 | kfree(fwd_adapter); |
7630 | } | 7640 | } |
7631 | 7641 | ||
7632 | static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, | ||
7633 | struct net_device *dev, | ||
7634 | void *priv) | ||
7635 | { | ||
7636 | struct ixgbe_fwd_adapter *fwd_adapter = priv; | ||
7637 | unsigned int queue; | ||
7638 | struct ixgbe_ring *tx_ring; | ||
7639 | |||
7640 | queue = skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
7641 | tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; | ||
7642 | |||
7643 | return __ixgbe_xmit_frame(skb, dev, tx_ring); | ||
7644 | } | ||
7645 | |||
7646 | static const struct net_device_ops ixgbe_netdev_ops = { | 7642 | static const struct net_device_ops ixgbe_netdev_ops = { |
7647 | .ndo_open = ixgbe_open, | 7643 | .ndo_open = ixgbe_open, |
7648 | .ndo_stop = ixgbe_close, | 7644 | .ndo_stop = ixgbe_close, |
7649 | .ndo_start_xmit = ixgbe_xmit_frame, | 7645 | .ndo_start_xmit = ixgbe_xmit_frame, |
7650 | #ifdef IXGBE_FCOE | ||
7651 | .ndo_select_queue = ixgbe_select_queue, | 7646 | .ndo_select_queue = ixgbe_select_queue, |
7652 | #endif | ||
7653 | .ndo_set_rx_mode = ixgbe_set_rx_mode, | 7647 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
7654 | .ndo_validate_addr = eth_validate_addr, | 7648 | .ndo_validate_addr = eth_validate_addr, |
7655 | .ndo_set_mac_address = ixgbe_set_mac, | 7649 | .ndo_set_mac_address = ixgbe_set_mac, |
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7689 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, | 7683 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, |
7690 | .ndo_dfwd_add_station = ixgbe_fwd_add, | 7684 | .ndo_dfwd_add_station = ixgbe_fwd_add, |
7691 | .ndo_dfwd_del_station = ixgbe_fwd_del, | 7685 | .ndo_dfwd_del_station = ixgbe_fwd_del, |
7692 | .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, | ||
7693 | }; | 7686 | }; |
7694 | 7687 | ||
7695 | /** | 7688 | /** |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d6f0c0d8cf11..72084f70adbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) | |||
291 | { | 291 | { |
292 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); | 292 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); |
293 | int err; | 293 | int err; |
294 | #ifdef CONFIG_PCI_IOV | ||
294 | u32 current_flags = adapter->flags; | 295 | u32 current_flags = adapter->flags; |
296 | #endif | ||
295 | 297 | ||
296 | err = ixgbe_disable_sriov(adapter); | 298 | err = ixgbe_disable_sriov(adapter); |
297 | 299 | ||
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a6c1f76d8e0..ec94a20d7099 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | static u16 | 621 | static u16 |
622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) | 622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, |
623 | void *accel_priv) | ||
623 | { | 624 | { |
624 | /* we are currently only using the first queue */ | 625 | /* we are currently only using the first queue */ |
625 | return 0; | 626 | return 0; |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 7354960b583b..c4eeb69a5bee 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c | |||
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus) | |||
92 | if (time_is_before_jiffies(end)) | 92 | if (time_is_before_jiffies(end)) |
93 | ++timedout; | 93 | ++timedout; |
94 | } else { | 94 | } else { |
95 | /* wait_event_timeout does not guarantee a delay of at | ||
96 | * least one whole jiffie, so timeout must be no less | ||
97 | * than two. | ||
98 | */ | ||
99 | if (timeout < 2) | ||
100 | timeout = 2; | ||
95 | wait_event_timeout(dev->smi_busy_wait, | 101 | wait_event_timeout(dev->smi_busy_wait, |
96 | orion_mdio_smi_is_done(dev), | 102 | orion_mdio_smi_is_done(dev), |
97 | timeout); | 103 | timeout); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b8e232b4ea2d..d5f0d72e5e33 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |||
1378 | 1378 | ||
1379 | dev_kfree_skb_any(skb); | 1379 | dev_kfree_skb_any(skb); |
1380 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1380 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
1381 | rx_desc->data_size, DMA_FROM_DEVICE); | 1381 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | if (rx_done) | 1384 | if (rx_done) |
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1426 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
1427 | rx_desc->data_size, DMA_FROM_DEVICE); | 1427 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1428 | 1428 | ||
1429 | rx_bytes = rx_desc->data_size - | 1429 | rx_bytes = rx_desc->data_size - |
1430 | (ETH_FCS_LEN + MVNETA_MH_SIZE); | 1430 | (ETH_FCS_LEN + MVNETA_MH_SIZE); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f54ebd5a1702..a7fcd593b2db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
592 | } | 592 | } |
593 | } | 593 | } |
594 | 594 | ||
595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | 595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
596 | void *accel_priv) | ||
596 | { | 597 | { |
597 | struct mlx4_en_priv *priv = netdev_priv(dev); | 598 | struct mlx4_en_priv *priv = netdev_priv(dev); |
598 | u16 rings_p_up = priv->num_tx_rings_p_up; | 599 | u16 rings_p_up = priv->num_tx_rings_p_up; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5789ea2c934d..01fc6515384d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -2635,6 +2635,8 @@ static int __init mlx4_init(void) | |||
2635 | return -ENOMEM; | 2635 | return -ENOMEM; |
2636 | 2636 | ||
2637 | ret = pci_register_driver(&mlx4_driver); | 2637 | ret = pci_register_driver(&mlx4_driver); |
2638 | if (ret < 0) | ||
2639 | destroy_workqueue(mlx4_wq); | ||
2638 | return ret < 0 ? ret : 0; | 2640 | return ret < 0 ? ret : 0; |
2639 | } | 2641 | } |
2640 | 2642 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f3758de59c05..d5758adceaa2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | |||
714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
715 | 715 | ||
716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | 716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); |
717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | 717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
718 | void *accel_priv); | ||
718 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 719 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
719 | 720 | ||
720 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 721 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 2d045be4b5cf..1e8b9514718b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
5150 | { | 5150 | { |
5151 | struct fe_priv *np = netdev_priv(dev); | 5151 | struct fe_priv *np = netdev_priv(dev); |
5152 | u8 __iomem *base = get_hwbase(dev); | 5152 | u8 __iomem *base = get_hwbase(dev); |
5153 | int result; | 5153 | int result, count; |
5154 | memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); | 5154 | |
5155 | count = nv_get_sset_count(dev, ETH_SS_TEST); | ||
5156 | memset(buffer, 0, count * sizeof(u64)); | ||
5155 | 5157 | ||
5156 | if (!nv_link_test(dev)) { | 5158 | if (!nv_link_test(dev)) { |
5157 | test->flags |= ETH_TEST_FL_FAILED; | 5159 | test->flags |= ETH_TEST_FL_FAILED; |
@@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
5195 | return; | 5197 | return; |
5196 | } | 5198 | } |
5197 | 5199 | ||
5198 | if (!nv_loopback_test(dev)) { | 5200 | if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { |
5199 | test->flags |= ETH_TEST_FL_FAILED; | 5201 | test->flags |= ETH_TEST_FL_FAILED; |
5200 | buffer[3] = 1; | 5202 | buffer[3] = 1; |
5201 | } | 5203 | } |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 7692dfd4f262..cc68657f0536 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter, | |||
1604 | u32 seq_number; | 1604 | u32 seq_number; |
1605 | u8 vhdr_len = 0; | 1605 | u8 vhdr_len = 0; |
1606 | 1606 | ||
1607 | if (unlikely(ring > adapter->max_rds_rings)) | 1607 | if (unlikely(ring >= adapter->max_rds_rings)) |
1608 | return NULL; | 1608 | return NULL; |
1609 | 1609 | ||
1610 | rds_ring = &recv_ctx->rds_rings[ring]; | 1610 | rds_ring = &recv_ctx->rds_rings[ring]; |
1611 | 1611 | ||
1612 | index = netxen_get_lro_sts_refhandle(sts_data0); | 1612 | index = netxen_get_lro_sts_refhandle(sts_data0); |
1613 | if (unlikely(index > rds_ring->num_desc)) | 1613 | if (unlikely(index >= rds_ring->num_desc)) |
1614 | return NULL; | 1614 | return NULL; |
1615 | 1615 | ||
1616 | buffer = &rds_ring->rx_buf_arr[index]; | 1616 | buffer = &rds_ring->rx_buf_arr[index]; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 631ea0ac1cd8..f2a7c7166e24 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context { | |||
487 | struct qlcnic_mailbox *mailbox; | 487 | struct qlcnic_mailbox *mailbox; |
488 | u8 extend_lb_time; | 488 | u8 extend_lb_time; |
489 | u8 phys_port_id[ETH_ALEN]; | 489 | u8 phys_port_id[ETH_ALEN]; |
490 | u8 lb_mode; | ||
490 | }; | 491 | }; |
491 | 492 | ||
492 | struct qlcnic_adapter_stats { | 493 | struct qlcnic_adapter_stats { |
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring { | |||
578 | dma_addr_t phys_addr; | 579 | dma_addr_t phys_addr; |
579 | dma_addr_t hw_cons_phys_addr; | 580 | dma_addr_t hw_cons_phys_addr; |
580 | struct netdev_queue *txq; | 581 | struct netdev_queue *txq; |
582 | /* Lock to protect Tx descriptors cleanup */ | ||
583 | spinlock_t tx_clean_lock; | ||
581 | } ____cacheline_internodealigned_in_smp; | 584 | } ____cacheline_internodealigned_in_smp; |
582 | 585 | ||
583 | /* | 586 | /* |
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s { | |||
808 | 811 | ||
809 | #define QLCNIC_ILB_MODE 0x1 | 812 | #define QLCNIC_ILB_MODE 0x1 |
810 | #define QLCNIC_ELB_MODE 0x2 | 813 | #define QLCNIC_ELB_MODE 0x2 |
814 | #define QLCNIC_LB_MODE_MASK 0x3 | ||
811 | 815 | ||
812 | #define QLCNIC_LINKEVENT 0x1 | 816 | #define QLCNIC_LINKEVENT 0x1 |
813 | #define QLCNIC_LB_RESPONSE 0x2 | 817 | #define QLCNIC_LB_RESPONSE 0x2 |
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter { | |||
1093 | struct qlcnic_filter_hash rx_fhash; | 1097 | struct qlcnic_filter_hash rx_fhash; |
1094 | struct list_head vf_mc_list; | 1098 | struct list_head vf_mc_list; |
1095 | 1099 | ||
1096 | spinlock_t tx_clean_lock; | ||
1097 | spinlock_t mac_learn_lock; | 1100 | spinlock_t mac_learn_lock; |
1098 | /* spinlock for catching rcv filters for eswitch traffic */ | 1101 | /* spinlock for catching rcv filters for eswitch traffic */ |
1099 | spinlock_t rx_mac_learn_lock; | 1102 | spinlock_t rx_mac_learn_lock; |
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); | |||
1708 | void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); | 1711 | void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); |
1709 | void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); | 1712 | void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); |
1710 | void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); | 1713 | void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); |
1714 | void qlcnic_update_stats(struct qlcnic_adapter *); | ||
1711 | 1715 | ||
1712 | /* Adapter hardware abstraction */ | 1716 | /* Adapter hardware abstraction */ |
1713 | struct qlcnic_hardware_ops { | 1717 | struct qlcnic_hardware_ops { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b1cb0ffb15c7..f776f99f7915 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data) | |||
447 | 447 | ||
448 | qlcnic_83xx_poll_process_aen(adapter); | 448 | qlcnic_83xx_poll_process_aen(adapter); |
449 | 449 | ||
450 | if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 450 | if (ahw->diag_test) { |
451 | ahw->diag_cnt++; | 451 | if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) |
452 | ahw->diag_cnt++; | ||
452 | qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); | 453 | qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); |
453 | return IRQ_HANDLED; | 454 | return IRQ_HANDLED; |
454 | } | 455 | } |
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, | |||
1345 | } | 1346 | } |
1346 | 1347 | ||
1347 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { | 1348 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { |
1348 | /* disable and free mailbox interrupt */ | ||
1349 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | ||
1350 | qlcnic_83xx_enable_mbx_poll(adapter); | ||
1351 | qlcnic_83xx_free_mbx_intr(adapter); | ||
1352 | } | ||
1353 | adapter->ahw->loopback_state = 0; | 1349 | adapter->ahw->loopback_state = 0; |
1354 | adapter->ahw->hw_ops->setup_link_event(adapter, 1); | 1350 | adapter->ahw->hw_ops->setup_link_event(adapter, 1); |
1355 | } | 1351 | } |
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, | |||
1363 | { | 1359 | { |
1364 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1360 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
1365 | struct qlcnic_host_sds_ring *sds_ring; | 1361 | struct qlcnic_host_sds_ring *sds_ring; |
1366 | int ring, err; | 1362 | int ring; |
1367 | 1363 | ||
1368 | clear_bit(__QLCNIC_DEV_UP, &adapter->state); | 1364 | clear_bit(__QLCNIC_DEV_UP, &adapter->state); |
1369 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 1365 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { |
1370 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { | 1366 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { |
1371 | sds_ring = &adapter->recv_ctx->sds_rings[ring]; | 1367 | sds_ring = &adapter->recv_ctx->sds_rings[ring]; |
1372 | qlcnic_83xx_disable_intr(adapter, sds_ring); | 1368 | if (adapter->flags & QLCNIC_MSIX_ENABLED) |
1373 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) | 1369 | qlcnic_83xx_disable_intr(adapter, sds_ring); |
1374 | qlcnic_83xx_enable_mbx_poll(adapter); | ||
1375 | } | 1370 | } |
1376 | } | 1371 | } |
1377 | 1372 | ||
1378 | qlcnic_fw_destroy_ctx(adapter); | 1373 | qlcnic_fw_destroy_ctx(adapter); |
1379 | qlcnic_detach(adapter); | 1374 | qlcnic_detach(adapter); |
1380 | 1375 | ||
1381 | if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { | ||
1382 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | ||
1383 | err = qlcnic_83xx_setup_mbx_intr(adapter); | ||
1384 | qlcnic_83xx_disable_mbx_poll(adapter); | ||
1385 | if (err) { | ||
1386 | dev_err(&adapter->pdev->dev, | ||
1387 | "%s: failed to setup mbx interrupt\n", | ||
1388 | __func__); | ||
1389 | goto out; | ||
1390 | } | ||
1391 | } | ||
1392 | } | ||
1393 | adapter->ahw->diag_test = 0; | 1376 | adapter->ahw->diag_test = 0; |
1394 | adapter->drv_sds_rings = drv_sds_rings; | 1377 | adapter->drv_sds_rings = drv_sds_rings; |
1395 | 1378 | ||
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, | |||
1399 | if (netif_running(netdev)) | 1382 | if (netif_running(netdev)) |
1400 | __qlcnic_up(adapter, netdev); | 1383 | __qlcnic_up(adapter, netdev); |
1401 | 1384 | ||
1402 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && | ||
1403 | !(adapter->flags & QLCNIC_MSIX_ENABLED)) | ||
1404 | qlcnic_83xx_disable_mbx_poll(adapter); | ||
1405 | out: | 1385 | out: |
1406 | netif_device_attach(netdev); | 1386 | netif_device_attach(netdev); |
1407 | } | 1387 | } |
@@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
1704 | } | 1684 | } |
1705 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); | 1685 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); |
1706 | 1686 | ||
1707 | /* Make sure carrier is off and queue is stopped during loopback */ | ||
1708 | if (netif_running(netdev)) { | ||
1709 | netif_carrier_off(netdev); | ||
1710 | netif_tx_stop_all_queues(netdev); | ||
1711 | } | ||
1712 | |||
1713 | ret = qlcnic_do_lb_test(adapter, mode); | 1687 | ret = qlcnic_do_lb_test(adapter, mode); |
1714 | 1688 | ||
1715 | qlcnic_83xx_clear_lb_mode(adapter, mode); | 1689 | qlcnic_83xx_clear_lb_mode(adapter, mode); |
@@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, | |||
2141 | ahw->link_autoneg = MSB(MSW(data[3])); | 2115 | ahw->link_autoneg = MSB(MSW(data[3])); |
2142 | ahw->module_type = MSB(LSW(data[3])); | 2116 | ahw->module_type = MSB(LSW(data[3])); |
2143 | ahw->has_link_events = 1; | 2117 | ahw->has_link_events = 1; |
2118 | ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; | ||
2144 | qlcnic_advert_link_change(adapter, link_status); | 2119 | qlcnic_advert_link_change(adapter, link_status); |
2145 | } | 2120 | } |
2146 | 2121 | ||
@@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, | |||
3754 | return; | 3729 | return; |
3755 | } | 3730 | } |
3756 | 3731 | ||
3732 | static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) | ||
3733 | { | ||
3734 | struct qlcnic_hardware_context *ahw = adapter->ahw; | ||
3735 | u32 offset; | ||
3736 | |||
3737 | offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); | ||
3738 | dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", | ||
3739 | readl(ahw->pci_base0 + offset), | ||
3740 | QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), | ||
3741 | QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), | ||
3742 | QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); | ||
3743 | } | ||
3744 | |||
3757 | static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | 3745 | static void qlcnic_83xx_mailbox_worker(struct work_struct *work) |
3758 | { | 3746 | { |
3759 | struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, | 3747 | struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, |
@@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | |||
3798 | __func__, cmd->cmd_op, cmd->type, ahw->pci_func, | 3786 | __func__, cmd->cmd_op, cmd->type, ahw->pci_func, |
3799 | ahw->op_mode); | 3787 | ahw->op_mode); |
3800 | clear_bit(QLC_83XX_MBX_READY, &mbx->status); | 3788 | clear_bit(QLC_83XX_MBX_READY, &mbx->status); |
3789 | qlcnic_dump_mailbox_registers(adapter); | ||
3790 | qlcnic_83xx_get_mbx_data(adapter, cmd); | ||
3801 | qlcnic_dump_mbx(adapter, cmd); | 3791 | qlcnic_dump_mbx(adapter, cmd); |
3802 | qlcnic_83xx_idc_request_reset(adapter, | 3792 | qlcnic_83xx_idc_request_reset(adapter, |
3803 | QLCNIC_FORCE_FW_DUMP_KEY); | 3793 | QLCNIC_FORCE_FW_DUMP_KEY); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 4cae6caa6bfa..a6a33508e401 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, | |||
662 | pci_channel_state_t); | 662 | pci_channel_state_t); |
663 | pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); | 663 | pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); |
664 | void qlcnic_83xx_io_resume(struct pci_dev *); | 664 | void qlcnic_83xx_io_resume(struct pci_dev *); |
665 | void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); | ||
665 | #endif | 666 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 89208e5b25d6..918e18ddf038 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) | |||
740 | adapter->ahw->idc.err_code = -EIO; | 740 | adapter->ahw->idc.err_code = -EIO; |
741 | dev_err(&adapter->pdev->dev, | 741 | dev_err(&adapter->pdev->dev, |
742 | "%s: Device in unknown state\n", __func__); | 742 | "%s: Device in unknown state\n", __func__); |
743 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | ||
743 | return 0; | 744 | return 0; |
744 | } | 745 | } |
745 | 746 | ||
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
818 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 819 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
819 | struct qlcnic_mailbox *mbx = ahw->mailbox; | 820 | struct qlcnic_mailbox *mbx = ahw->mailbox; |
820 | int ret = 0; | 821 | int ret = 0; |
821 | u32 owner; | ||
822 | u32 val; | 822 | u32 val; |
823 | 823 | ||
824 | /* Perform NIC configuration based ready state entry actions */ | 824 | /* Perform NIC configuration based ready state entry actions */ |
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) | |||
848 | set_bit(__QLCNIC_RESETTING, &adapter->state); | 848 | set_bit(__QLCNIC_RESETTING, &adapter->state); |
849 | qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); | 849 | qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); |
850 | } else { | 850 | } else { |
851 | owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); | 851 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", |
852 | if (ahw->pci_func == owner) | 852 | __func__); |
853 | qlcnic_dump_fw(adapter); | 853 | qlcnic_83xx_idc_enter_failed_state(adapter, 1); |
854 | } | 854 | } |
855 | return -EIO; | 855 | return -EIO; |
856 | } | 856 | } |
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) | |||
948 | return 0; | 948 | return 0; |
949 | } | 949 | } |
950 | 950 | ||
951 | static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) | 951 | static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) |
952 | { | 952 | { |
953 | dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); | 953 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
954 | u32 val, owner; | ||
955 | |||
956 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | ||
957 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { | ||
958 | owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); | ||
959 | if (ahw->pci_func == owner) { | ||
960 | qlcnic_83xx_stop_hw(adapter); | ||
961 | qlcnic_dump_fw(adapter); | ||
962 | } | ||
963 | } | ||
964 | |||
965 | netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", | ||
966 | __func__); | ||
954 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 967 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
955 | adapter->ahw->idc.err_code = -EIO; | 968 | ahw->idc.err_code = -EIO; |
956 | 969 | ||
957 | return 0; | 970 | return; |
958 | } | 971 | } |
959 | 972 | ||
960 | static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) | 973 | static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) |
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) | |||
1063 | adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; | 1076 | adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; |
1064 | qlcnic_83xx_periodic_tasks(adapter); | 1077 | qlcnic_83xx_periodic_tasks(adapter); |
1065 | 1078 | ||
1066 | /* Do not reschedule if firmaware is in hanged state and auto | ||
1067 | * recovery is disabled | ||
1068 | */ | ||
1069 | if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset) | ||
1070 | return; | ||
1071 | |||
1072 | /* Re-schedule the function */ | 1079 | /* Re-schedule the function */ |
1073 | if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) | 1080 | if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) |
1074 | qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, | 1081 | qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, |
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) | |||
1219 | } | 1226 | } |
1220 | 1227 | ||
1221 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | 1228 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); |
1222 | if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || | 1229 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { |
1223 | !qlcnic_auto_fw_reset) { | 1230 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", |
1224 | dev_err(&adapter->pdev->dev, | 1231 | __func__); |
1225 | "%s:failed, device in non reset mode\n", __func__); | 1232 | qlcnic_83xx_idc_enter_failed_state(adapter, 0); |
1226 | qlcnic_83xx_unlock_driver(adapter); | 1233 | qlcnic_83xx_unlock_driver(adapter); |
1227 | return; | 1234 | return; |
1228 | } | 1235 | } |
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) | |||
1254 | if (size & 0xF) | 1261 | if (size & 0xF) |
1255 | size = (size + 16) & ~0xF; | 1262 | size = (size + 16) & ~0xF; |
1256 | 1263 | ||
1257 | p_cache = kzalloc(size, GFP_KERNEL); | 1264 | p_cache = vzalloc(size); |
1258 | if (p_cache == NULL) | 1265 | if (p_cache == NULL) |
1259 | return -ENOMEM; | 1266 | return -ENOMEM; |
1260 | 1267 | ||
1261 | ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, | 1268 | ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, |
1262 | size / sizeof(u32)); | 1269 | size / sizeof(u32)); |
1263 | if (ret) { | 1270 | if (ret) { |
1264 | kfree(p_cache); | 1271 | vfree(p_cache); |
1265 | return ret; | 1272 | return ret; |
1266 | } | 1273 | } |
1267 | /* 16 byte write to MS memory */ | 1274 | /* 16 byte write to MS memory */ |
1268 | ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, | 1275 | ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, |
1269 | size / 16); | 1276 | size / 16); |
1270 | if (ret) { | 1277 | if (ret) { |
1271 | kfree(p_cache); | 1278 | vfree(p_cache); |
1272 | return ret; | 1279 | return ret; |
1273 | } | 1280 | } |
1274 | kfree(p_cache); | 1281 | vfree(p_cache); |
1275 | 1282 | ||
1276 | return ret; | 1283 | return ret; |
1277 | } | 1284 | } |
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, | |||
1939 | p_dev->ahw->reset.seq_index = index; | 1946 | p_dev->ahw->reset.seq_index = index; |
1940 | } | 1947 | } |
1941 | 1948 | ||
1942 | static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) | 1949 | void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) |
1943 | { | 1950 | { |
1944 | p_dev->ahw->reset.seq_index = 0; | 1951 | p_dev->ahw->reset.seq_index = 0; |
1945 | 1952 | ||
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) | |||
1994 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); | 2001 | val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); |
1995 | if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) | 2002 | if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) |
1996 | qlcnic_dump_fw(adapter); | 2003 | qlcnic_dump_fw(adapter); |
2004 | |||
2005 | if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { | ||
2006 | netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", | ||
2007 | __func__); | ||
2008 | qlcnic_83xx_idc_enter_failed_state(adapter, 1); | ||
2009 | return err; | ||
2010 | } | ||
2011 | |||
1997 | qlcnic_83xx_init_hw(adapter); | 2012 | qlcnic_83xx_init_hw(adapter); |
1998 | 2013 | ||
1999 | if (qlcnic_83xx_copy_bootloader(adapter)) | 2014 | if (qlcnic_83xx_copy_bootloader(adapter)) |
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) | |||
2073 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 2088 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
2074 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; | 2089 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; |
2075 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; | 2090 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; |
2076 | adapter->max_sds_rings = ahw->max_rx_ques; | 2091 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; |
2077 | adapter->max_tx_rings = ahw->max_tx_ques; | 2092 | adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; |
2078 | } else { | 2093 | } else { |
2079 | return -EIO; | 2094 | return -EIO; |
2080 | } | 2095 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index b36c02fafcfd..6b08194aa0d4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { | |||
167 | 167 | ||
168 | #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) | 168 | #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) |
169 | 169 | ||
170 | static inline int qlcnic_82xx_statistics(void) | 170 | static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) |
171 | { | 171 | { |
172 | return ARRAY_SIZE(qlcnic_device_gstrings_stats) + | 172 | return ARRAY_SIZE(qlcnic_gstrings_stats) + |
173 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); | 173 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + |
174 | QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; | ||
174 | } | 175 | } |
175 | 176 | ||
176 | static inline int qlcnic_83xx_statistics(void) | 177 | static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) |
177 | { | 178 | { |
178 | return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + | 179 | return ARRAY_SIZE(qlcnic_gstrings_stats) + |
180 | ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + | ||
179 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + | 181 | ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + |
180 | ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); | 182 | ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + |
183 | QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; | ||
181 | } | 184 | } |
182 | 185 | ||
183 | static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) | 186 | static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) |
184 | { | 187 | { |
185 | if (qlcnic_82xx_check(adapter)) | 188 | int len = -1; |
186 | return qlcnic_82xx_statistics(); | 189 | |
187 | else if (qlcnic_83xx_check(adapter)) | 190 | if (qlcnic_82xx_check(adapter)) { |
188 | return qlcnic_83xx_statistics(); | 191 | len = qlcnic_82xx_statistics(adapter); |
189 | else | 192 | if (adapter->flags & QLCNIC_ESWITCH_ENABLED) |
190 | return -1; | 193 | len += ARRAY_SIZE(qlcnic_device_gstrings_stats); |
194 | } else if (qlcnic_83xx_check(adapter)) { | ||
195 | len = qlcnic_83xx_statistics(adapter); | ||
196 | } | ||
197 | |||
198 | return len; | ||
191 | } | 199 | } |
192 | 200 | ||
193 | #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 | 201 | #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 |
@@ -667,30 +675,25 @@ qlcnic_set_ringparam(struct net_device *dev, | |||
667 | static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, | 675 | static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, |
668 | u8 rx_ring, u8 tx_ring) | 676 | u8 rx_ring, u8 tx_ring) |
669 | { | 677 | { |
678 | if (rx_ring == 0 || tx_ring == 0) | ||
679 | return -EINVAL; | ||
680 | |||
670 | if (rx_ring != 0) { | 681 | if (rx_ring != 0) { |
671 | if (rx_ring > adapter->max_sds_rings) { | 682 | if (rx_ring > adapter->max_sds_rings) { |
672 | netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", | 683 | netdev_err(adapter->netdev, |
684 | "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", | ||
673 | rx_ring, adapter->max_sds_rings); | 685 | rx_ring, adapter->max_sds_rings); |
674 | return -EINVAL; | 686 | return -EINVAL; |
675 | } | 687 | } |
676 | } | 688 | } |
677 | 689 | ||
678 | if (tx_ring != 0) { | 690 | if (tx_ring != 0) { |
679 | if (qlcnic_82xx_check(adapter) && | 691 | if (tx_ring > adapter->max_tx_rings) { |
680 | (tx_ring > adapter->max_tx_rings)) { | ||
681 | netdev_err(adapter->netdev, | 692 | netdev_err(adapter->netdev, |
682 | "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", | 693 | "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", |
683 | tx_ring, adapter->max_tx_rings); | 694 | tx_ring, adapter->max_tx_rings); |
684 | return -EINVAL; | 695 | return -EINVAL; |
685 | } | 696 | } |
686 | |||
687 | if (qlcnic_83xx_check(adapter) && | ||
688 | (tx_ring > QLCNIC_SINGLE_RING)) { | ||
689 | netdev_err(adapter->netdev, | ||
690 | "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n", | ||
691 | tx_ring, QLCNIC_SINGLE_RING); | ||
692 | return -EINVAL; | ||
693 | } | ||
694 | } | 697 | } |
695 | 698 | ||
696 | return 0; | 699 | return 0; |
@@ -925,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev) | |||
925 | 928 | ||
926 | static int qlcnic_get_sset_count(struct net_device *dev, int sset) | 929 | static int qlcnic_get_sset_count(struct net_device *dev, int sset) |
927 | { | 930 | { |
928 | int len; | ||
929 | 931 | ||
930 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 932 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
931 | switch (sset) { | 933 | switch (sset) { |
932 | case ETH_SS_TEST: | 934 | case ETH_SS_TEST: |
933 | return QLCNIC_TEST_LEN; | 935 | return QLCNIC_TEST_LEN; |
934 | case ETH_SS_STATS: | 936 | case ETH_SS_STATS: |
935 | len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; | 937 | return qlcnic_dev_statistics_len(adapter); |
936 | if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || | ||
937 | qlcnic_83xx_check(adapter)) | ||
938 | return len; | ||
939 | return qlcnic_82xx_statistics(); | ||
940 | default: | 938 | default: |
941 | return -EOPNOTSUPP; | 939 | return -EOPNOTSUPP; |
942 | } | 940 | } |
@@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev) | |||
948 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 946 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
949 | struct qlcnic_cmd_args cmd; | 947 | struct qlcnic_cmd_args cmd; |
950 | int ret, drv_sds_rings = adapter->drv_sds_rings; | 948 | int ret, drv_sds_rings = adapter->drv_sds_rings; |
949 | int drv_tx_rings = adapter->drv_tx_rings; | ||
951 | 950 | ||
952 | if (qlcnic_83xx_check(adapter)) | 951 | if (qlcnic_83xx_check(adapter)) |
953 | return qlcnic_83xx_interrupt_test(netdev); | 952 | return qlcnic_83xx_interrupt_test(netdev); |
@@ -980,6 +979,7 @@ free_diag_res: | |||
980 | 979 | ||
981 | clear_diag_irq: | 980 | clear_diag_irq: |
982 | adapter->drv_sds_rings = drv_sds_rings; | 981 | adapter->drv_sds_rings = drv_sds_rings; |
982 | adapter->drv_tx_rings = drv_tx_rings; | ||
983 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 983 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
984 | 984 | ||
985 | return ret; | 985 | return ret; |
@@ -1270,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) | |||
1270 | return data; | 1270 | return data; |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | static void qlcnic_update_stats(struct qlcnic_adapter *adapter) | 1273 | void qlcnic_update_stats(struct qlcnic_adapter *adapter) |
1274 | { | 1274 | { |
1275 | struct qlcnic_host_tx_ring *tx_ring; | 1275 | struct qlcnic_host_tx_ring *tx_ring; |
1276 | int ring; | 1276 | int ring; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e9c21e5d0ca9..c4262c23ed7c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | |||
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
134 | struct qlcnic_skb_frag *buffrag; | 134 | struct qlcnic_skb_frag *buffrag; |
135 | int i, j; | 135 | int i, j; |
136 | 136 | ||
137 | spin_lock(&tx_ring->tx_clean_lock); | ||
138 | |||
137 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
138 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
139 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
157 | } | 159 | } |
158 | cmd_buf++; | 160 | cmd_buf++; |
159 | } | 161 | } |
162 | |||
163 | spin_unlock(&tx_ring->tx_clean_lock); | ||
160 | } | 164 | } |
161 | 165 | ||
162 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) | 166 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0149c9495347..ad1531ae3aa8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) | |||
687 | if (adapter->ahw->linkup && !linkup) { | 687 | if (adapter->ahw->linkup && !linkup) { |
688 | netdev_info(netdev, "NIC Link is down\n"); | 688 | netdev_info(netdev, "NIC Link is down\n"); |
689 | adapter->ahw->linkup = 0; | 689 | adapter->ahw->linkup = 0; |
690 | if (netif_running(netdev)) { | 690 | netif_carrier_off(netdev); |
691 | netif_carrier_off(netdev); | ||
692 | netif_tx_stop_all_queues(netdev); | ||
693 | } | ||
694 | } else if (!adapter->ahw->linkup && linkup) { | 691 | } else if (!adapter->ahw->linkup && linkup) { |
692 | /* Do not advertise Link up if the port is in loopback mode */ | ||
693 | if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) | ||
694 | return; | ||
695 | |||
695 | netdev_info(netdev, "NIC Link is up\n"); | 696 | netdev_info(netdev, "NIC Link is up\n"); |
696 | adapter->ahw->linkup = 1; | 697 | adapter->ahw->linkup = 1; |
697 | if (netif_running(netdev)) { | 698 | netif_carrier_on(netdev); |
698 | netif_carrier_on(netdev); | ||
699 | netif_wake_queue(netdev); | ||
700 | } | ||
701 | } | 699 | } |
702 | } | 700 | } |
703 | 701 | ||
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
784 | struct net_device *netdev = adapter->netdev; | 782 | struct net_device *netdev = adapter->netdev; |
785 | struct qlcnic_skb_frag *frag; | 783 | struct qlcnic_skb_frag *frag; |
786 | 784 | ||
787 | if (!spin_trylock(&adapter->tx_clean_lock)) | 785 | if (!spin_trylock(&tx_ring->tx_clean_lock)) |
788 | return 1; | 786 | return 1; |
789 | 787 | ||
790 | sw_consumer = tx_ring->sw_consumer; | 788 | sw_consumer = tx_ring->sw_consumer; |
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
813 | break; | 811 | break; |
814 | } | 812 | } |
815 | 813 | ||
814 | tx_ring->sw_consumer = sw_consumer; | ||
815 | |||
816 | if (count && netif_running(netdev)) { | 816 | if (count && netif_running(netdev)) { |
817 | tx_ring->sw_consumer = sw_consumer; | ||
818 | smp_mb(); | 817 | smp_mb(); |
819 | if (netif_tx_queue_stopped(tx_ring->txq) && | 818 | if (netif_tx_queue_stopped(tx_ring->txq) && |
820 | netif_carrier_ok(netdev)) { | 819 | netif_carrier_ok(netdev)) { |
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
840 | */ | 839 | */ |
841 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | 840 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
842 | done = (sw_consumer == hw_consumer); | 841 | done = (sw_consumer == hw_consumer); |
843 | spin_unlock(&adapter->tx_clean_lock); | 842 | |
843 | spin_unlock(&tx_ring->tx_clean_lock); | ||
844 | 844 | ||
845 | return done; | 845 | return done; |
846 | } | 846 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 05c1eef8df13..550791b8fbae 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) | |||
1178 | } else { | 1178 | } else { |
1179 | adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 1179 | adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
1180 | adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; | 1180 | adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; |
1181 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; | ||
1181 | adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; | 1182 | adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; |
1182 | } | 1183 | } |
1183 | 1184 | ||
@@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
1755 | if (qlcnic_sriov_vf_check(adapter)) | 1756 | if (qlcnic_sriov_vf_check(adapter)) |
1756 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); | 1757 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); |
1757 | smp_mb(); | 1758 | smp_mb(); |
1758 | spin_lock(&adapter->tx_clean_lock); | ||
1759 | netif_carrier_off(netdev); | 1759 | netif_carrier_off(netdev); |
1760 | adapter->ahw->linkup = 0; | 1760 | adapter->ahw->linkup = 0; |
1761 | netif_tx_disable(netdev); | 1761 | netif_tx_disable(netdev); |
@@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
1776 | 1776 | ||
1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) | 1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) |
1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); | 1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); |
1779 | spin_unlock(&adapter->tx_clean_lock); | ||
1780 | } | 1779 | } |
1781 | 1780 | ||
1782 | /* Usage: During suspend and firmware recovery module */ | 1781 | /* Usage: During suspend and firmware recovery module */ |
@@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) | |||
1940 | qlcnic_detach(adapter); | 1939 | qlcnic_detach(adapter); |
1941 | 1940 | ||
1942 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | 1941 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; |
1943 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; | ||
1944 | adapter->ahw->diag_test = test; | 1942 | adapter->ahw->diag_test = test; |
1945 | adapter->ahw->linkup = 0; | 1943 | adapter->ahw->linkup = 0; |
1946 | 1944 | ||
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, | |||
2172 | } | 2170 | } |
2173 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); | 2171 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); |
2174 | tx_ring->cmd_buf_arr = cmd_buf_arr; | 2172 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
2173 | spin_lock_init(&tx_ring->tx_clean_lock); | ||
2175 | } | 2174 | } |
2176 | 2175 | ||
2177 | if (qlcnic_83xx_check(adapter) || | 2176 | if (qlcnic_83xx_check(adapter) || |
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2299 | rwlock_init(&adapter->ahw->crb_lock); | 2298 | rwlock_init(&adapter->ahw->crb_lock); |
2300 | mutex_init(&adapter->ahw->mem_lock); | 2299 | mutex_init(&adapter->ahw->mem_lock); |
2301 | 2300 | ||
2302 | spin_lock_init(&adapter->tx_clean_lock); | ||
2303 | INIT_LIST_HEAD(&adapter->mac_list); | 2301 | INIT_LIST_HEAD(&adapter->mac_list); |
2304 | 2302 | ||
2305 | qlcnic_register_dcb(adapter); | 2303 | qlcnic_register_dcb(adapter); |
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) | |||
2782 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 2780 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
2783 | struct net_device_stats *stats = &netdev->stats; | 2781 | struct net_device_stats *stats = &netdev->stats; |
2784 | 2782 | ||
2783 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) | ||
2784 | qlcnic_update_stats(adapter); | ||
2785 | |||
2785 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; | 2786 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; |
2786 | stats->tx_packets = adapter->stats.xmitfinished; | 2787 | stats->tx_packets = adapter->stats.xmitfinished; |
2787 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; | 2788 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 686f460b1502..024f8161d2fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
75 | num_vfs = sriov->num_vfs; | 75 | num_vfs = sriov->num_vfs; |
76 | max = num_vfs + 1; | 76 | max = num_vfs + 1; |
77 | info->bit_offsets = 0xffff; | 77 | info->bit_offsets = 0xffff; |
78 | info->max_tx_ques = res->num_tx_queues / max; | ||
79 | info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; | 78 | info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; |
80 | num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; | 79 | num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; |
81 | 80 | ||
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
86 | info->max_tx_mac_filters = temp; | 85 | info->max_tx_mac_filters = temp; |
87 | info->min_tx_bw = 0; | 86 | info->min_tx_bw = 0; |
88 | info->max_tx_bw = MAX_BW; | 87 | info->max_tx_bw = MAX_BW; |
88 | info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; | ||
89 | } else { | 89 | } else { |
90 | id = qlcnic_sriov_func_to_index(adapter, func); | 90 | id = qlcnic_sriov_func_to_index(adapter, func); |
91 | if (id < 0) | 91 | if (id < 0) |
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, | |||
95 | info->max_tx_bw = vp->max_tx_bw; | 95 | info->max_tx_bw = vp->max_tx_bw; |
96 | info->max_rx_ucast_mac_filters = num_vf_macs; | 96 | info->max_rx_ucast_mac_filters = num_vf_macs; |
97 | info->max_tx_mac_filters = num_vf_macs; | 97 | info->max_tx_mac_filters = num_vf_macs; |
98 | info->max_tx_ques = QLCNIC_SINGLE_RING; | ||
98 | } | 99 | } |
99 | 100 | ||
100 | info->max_rx_ip_addr = res->num_destip / max; | 101 | info->max_rx_ip_addr = res->num_destip / max; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 0c9c4e895595..03517478e589 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #define DRV_NAME "qlge" | 19 | #define DRV_NAME "qlge" |
20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
21 | #define DRV_VERSION "1.00.00.33" | 21 | #define DRV_VERSION "1.00.00.34" |
22 | 22 | ||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 24 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 0780e039b271..8dee1beb9854 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | |||
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { | |||
181 | }; | 181 | }; |
182 | #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) | 182 | #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) |
183 | #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) | 183 | #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) |
184 | #define QLGE_RCV_MAC_ERR_STATS 7 | ||
184 | 185 | ||
185 | static int ql_update_ring_coalescing(struct ql_adapter *qdev) | 186 | static int ql_update_ring_coalescing(struct ql_adapter *qdev) |
186 | { | 187 | { |
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev) | |||
280 | iter++; | 281 | iter++; |
281 | } | 282 | } |
282 | 283 | ||
284 | /* Update receive mac error statistics */ | ||
285 | iter += QLGE_RCV_MAC_ERR_STATS; | ||
286 | |||
283 | /* | 287 | /* |
284 | * Get Per-priority TX pause frame counter statistics. | 288 | * Get Per-priority TX pause frame counter statistics. |
285 | */ | 289 | */ |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index a245dc18d769..449f506d2e8f 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, | |||
2376 | netdev_features_t features) | 2376 | netdev_features_t features) |
2377 | { | 2377 | { |
2378 | int err; | 2378 | int err; |
2379 | /* | ||
2380 | * Since there is no support for separate rx/tx vlan accel | ||
2381 | * enable/disable make sure tx flag is always in same state as rx. | ||
2382 | */ | ||
2383 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | ||
2384 | features |= NETIF_F_HW_VLAN_CTAG_TX; | ||
2385 | else | ||
2386 | features &= ~NETIF_F_HW_VLAN_CTAG_TX; | ||
2387 | 2379 | ||
2388 | /* Update the behavior of vlan accel in the adapter */ | 2380 | /* Update the behavior of vlan accel in the adapter */ |
2389 | err = qlge_update_hw_vlan_features(ndev, features); | 2381 | err = qlge_update_hw_vlan_features(ndev, features); |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2e27837ce6a2..fd844b53e385 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx) | |||
585 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | 585 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + |
586 | efx->type->rx_buffer_padding); | 586 | efx->type->rx_buffer_padding); |
587 | rx_buf_len = (sizeof(struct efx_rx_page_state) + | 587 | rx_buf_len = (sizeof(struct efx_rx_page_state) + |
588 | NET_IP_ALIGN + efx->rx_dma_len); | 588 | efx->rx_ip_align + efx->rx_dma_len); |
589 | if (rx_buf_len <= PAGE_SIZE) { | 589 | if (rx_buf_len <= PAGE_SIZE) { |
590 | efx->rx_scatter = efx->type->always_rx_scatter; | 590 | efx->rx_scatter = efx->type->always_rx_scatter; |
591 | efx->rx_buffer_order = 0; | 591 | efx->rx_buffer_order = 0; |
@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx) | |||
645 | WARN_ON(channel->rx_pkt_n_frags); | 645 | WARN_ON(channel->rx_pkt_n_frags); |
646 | } | 646 | } |
647 | 647 | ||
648 | efx_ptp_start_datapath(efx); | ||
649 | |||
648 | if (netif_device_present(efx->net_dev)) | 650 | if (netif_device_present(efx->net_dev)) |
649 | netif_tx_wake_all_queues(efx->net_dev); | 651 | netif_tx_wake_all_queues(efx->net_dev); |
650 | } | 652 | } |
@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx) | |||
659 | EFX_ASSERT_RESET_SERIALISED(efx); | 661 | EFX_ASSERT_RESET_SERIALISED(efx); |
660 | BUG_ON(efx->port_enabled); | 662 | BUG_ON(efx->port_enabled); |
661 | 663 | ||
664 | efx_ptp_stop_datapath(efx); | ||
665 | |||
662 | /* Stop RX refill */ | 666 | /* Stop RX refill */ |
663 | efx_for_each_channel(channel, efx) { | 667 | efx_for_each_channel(channel, efx) { |
664 | efx_for_each_channel_rx_queue(rx_queue, channel) | 668 | efx_for_each_channel_rx_queue(rx_queue, channel) |
@@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx, | |||
2540 | 2544 | ||
2541 | efx->net_dev = net_dev; | 2545 | efx->net_dev = net_dev; |
2542 | efx->rx_prefix_size = efx->type->rx_prefix_size; | 2546 | efx->rx_prefix_size = efx->type->rx_prefix_size; |
2547 | efx->rx_ip_align = | ||
2548 | NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; | ||
2543 | efx->rx_packet_hash_offset = | 2549 | efx->rx_packet_hash_offset = |
2544 | efx->type->rx_hash_offset - efx->type->rx_prefix_size; | 2550 | efx->type->rx_hash_offset - efx->type->rx_prefix_size; |
2545 | spin_lock_init(&efx->stats_lock); | 2551 | spin_lock_init(&efx->stats_lock); |
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 366c8e3e3784..4b0bd8a1514d 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -50,6 +50,7 @@ struct efx_mcdi_async_param { | |||
50 | static void efx_mcdi_timeout_async(unsigned long context); | 50 | static void efx_mcdi_timeout_async(unsigned long context); |
51 | static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | 51 | static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, |
52 | bool *was_attached_out); | 52 | bool *was_attached_out); |
53 | static bool efx_mcdi_poll_once(struct efx_nic *efx); | ||
53 | 54 | ||
54 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | 55 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) |
55 | { | 56 | { |
@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) | |||
237 | } | 238 | } |
238 | } | 239 | } |
239 | 240 | ||
241 | static bool efx_mcdi_poll_once(struct efx_nic *efx) | ||
242 | { | ||
243 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
244 | |||
245 | rmb(); | ||
246 | if (!efx->type->mcdi_poll_response(efx)) | ||
247 | return false; | ||
248 | |||
249 | spin_lock_bh(&mcdi->iface_lock); | ||
250 | efx_mcdi_read_response_header(efx); | ||
251 | spin_unlock_bh(&mcdi->iface_lock); | ||
252 | |||
253 | return true; | ||
254 | } | ||
255 | |||
240 | static int efx_mcdi_poll(struct efx_nic *efx) | 256 | static int efx_mcdi_poll(struct efx_nic *efx) |
241 | { | 257 | { |
242 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 258 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
272 | 288 | ||
273 | time = jiffies; | 289 | time = jiffies; |
274 | 290 | ||
275 | rmb(); | 291 | if (efx_mcdi_poll_once(efx)) |
276 | if (efx->type->mcdi_poll_response(efx)) | ||
277 | break; | 292 | break; |
278 | 293 | ||
279 | if (time_after(time, finish)) | 294 | if (time_after(time, finish)) |
280 | return -ETIMEDOUT; | 295 | return -ETIMEDOUT; |
281 | } | 296 | } |
282 | 297 | ||
283 | spin_lock_bh(&mcdi->iface_lock); | ||
284 | efx_mcdi_read_response_header(efx); | ||
285 | spin_unlock_bh(&mcdi->iface_lock); | ||
286 | |||
287 | /* Return rc=0 like wait_event_timeout() */ | 298 | /* Return rc=0 like wait_event_timeout() */ |
288 | return 0; | 299 | return 0; |
289 | } | 300 | } |
@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, | |||
619 | rc = efx_mcdi_await_completion(efx); | 630 | rc = efx_mcdi_await_completion(efx); |
620 | 631 | ||
621 | if (rc != 0) { | 632 | if (rc != 0) { |
633 | netif_err(efx, hw, efx->net_dev, | ||
634 | "MC command 0x%x inlen %d mode %d timed out\n", | ||
635 | cmd, (int)inlen, mcdi->mode); | ||
636 | |||
637 | if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { | ||
638 | netif_err(efx, hw, efx->net_dev, | ||
639 | "MCDI request was completed without an event\n"); | ||
640 | rc = 0; | ||
641 | } | ||
642 | |||
622 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | 643 | /* Close the race with efx_mcdi_ev_cpl() executing just too late |
623 | * and completing a request we've just cancelled, by ensuring | 644 | * and completing a request we've just cancelled, by ensuring |
624 | * that the seqno check therein fails. | 645 | * that the seqno check therein fails. |
@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, | |||
627 | ++mcdi->seqno; | 648 | ++mcdi->seqno; |
628 | ++mcdi->credits; | 649 | ++mcdi->credits; |
629 | spin_unlock_bh(&mcdi->iface_lock); | 650 | spin_unlock_bh(&mcdi->iface_lock); |
651 | } | ||
630 | 652 | ||
631 | netif_err(efx, hw, efx->net_dev, | 653 | if (rc == 0) { |
632 | "MC command 0x%x inlen %d mode %d timed out\n", | ||
633 | cmd, (int)inlen, mcdi->mode); | ||
634 | } else { | ||
635 | size_t hdr_len, data_len; | 654 | size_t hdr_len, data_len; |
636 | 655 | ||
637 | /* At the very least we need a memory barrier here to ensure | 656 | /* At the very least we need a memory barrier here to ensure |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b14a717ac3e8..542a0d252ae0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -683,6 +683,8 @@ struct vfdi_status; | |||
683 | * @n_channels: Number of channels in use | 683 | * @n_channels: Number of channels in use |
684 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) | 684 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) |
685 | * @n_tx_channels: Number of channels used for TX | 685 | * @n_tx_channels: Number of channels used for TX |
686 | * @rx_ip_align: RX DMA address offset to have IP header aligned in | ||
687 | * in accordance with NET_IP_ALIGN | ||
686 | * @rx_dma_len: Current maximum RX DMA length | 688 | * @rx_dma_len: Current maximum RX DMA length |
687 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | 689 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer |
688 | * @rx_buffer_truesize: Amortised allocation size of an RX buffer, | 690 | * @rx_buffer_truesize: Amortised allocation size of an RX buffer, |
@@ -816,6 +818,7 @@ struct efx_nic { | |||
816 | unsigned rss_spread; | 818 | unsigned rss_spread; |
817 | unsigned tx_channel_offset; | 819 | unsigned tx_channel_offset; |
818 | unsigned n_tx_channels; | 820 | unsigned n_tx_channels; |
821 | unsigned int rx_ip_align; | ||
819 | unsigned int rx_dma_len; | 822 | unsigned int rx_dma_len; |
820 | unsigned int rx_buffer_order; | 823 | unsigned int rx_buffer_order; |
821 | unsigned int rx_buffer_truesize; | 824 | unsigned int rx_buffer_truesize; |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 11b6112d9249..91c63ec79c5f 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); | |||
560 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); | 560 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); |
561 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); | 561 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); |
562 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); | 562 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); |
563 | void efx_ptp_start_datapath(struct efx_nic *efx); | ||
564 | void efx_ptp_stop_datapath(struct efx_nic *efx); | ||
563 | 565 | ||
564 | extern const struct efx_nic_type falcon_a1_nic_type; | 566 | extern const struct efx_nic_type falcon_a1_nic_type; |
565 | extern const struct efx_nic_type falcon_b0_nic_type; | 567 | extern const struct efx_nic_type falcon_b0_nic_type; |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 03acf57df045..3dd39dcfe36b 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -220,6 +220,7 @@ struct efx_ptp_timeset { | |||
220 | * @evt_list: List of MC receive events awaiting packets | 220 | * @evt_list: List of MC receive events awaiting packets |
221 | * @evt_free_list: List of free events | 221 | * @evt_free_list: List of free events |
222 | * @evt_lock: Lock for manipulating evt_list and evt_free_list | 222 | * @evt_lock: Lock for manipulating evt_list and evt_free_list |
223 | * @evt_overflow: Boolean indicating that event list has overflowed | ||
223 | * @rx_evts: Instantiated events (on evt_list and evt_free_list) | 224 | * @rx_evts: Instantiated events (on evt_list and evt_free_list) |
224 | * @workwq: Work queue for processing pending PTP operations | 225 | * @workwq: Work queue for processing pending PTP operations |
225 | * @work: Work task | 226 | * @work: Work task |
@@ -270,6 +271,7 @@ struct efx_ptp_data { | |||
270 | struct list_head evt_list; | 271 | struct list_head evt_list; |
271 | struct list_head evt_free_list; | 272 | struct list_head evt_free_list; |
272 | spinlock_t evt_lock; | 273 | spinlock_t evt_lock; |
274 | bool evt_overflow; | ||
273 | struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; | 275 | struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; |
274 | struct workqueue_struct *workwq; | 276 | struct workqueue_struct *workwq; |
275 | struct work_struct work; | 277 | struct work_struct work; |
@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) | |||
635 | } | 637 | } |
636 | } | 638 | } |
637 | } | 639 | } |
640 | /* If the event overflow flag is set and the event list is now empty | ||
641 | * clear the flag to re-enable the overflow warning message. | ||
642 | */ | ||
643 | if (ptp->evt_overflow && list_empty(&ptp->evt_list)) | ||
644 | ptp->evt_overflow = false; | ||
638 | spin_unlock_bh(&ptp->evt_lock); | 645 | spin_unlock_bh(&ptp->evt_lock); |
639 | } | 646 | } |
640 | 647 | ||
@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, | |||
676 | break; | 683 | break; |
677 | } | 684 | } |
678 | } | 685 | } |
686 | /* If the event overflow flag is set and the event list is now empty | ||
687 | * clear the flag to re-enable the overflow warning message. | ||
688 | */ | ||
689 | if (ptp->evt_overflow && list_empty(&ptp->evt_list)) | ||
690 | ptp->evt_overflow = false; | ||
679 | spin_unlock_bh(&ptp->evt_lock); | 691 | spin_unlock_bh(&ptp->evt_lock); |
680 | 692 | ||
681 | return rc; | 693 | return rc; |
@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) | |||
705 | __skb_queue_tail(q, skb); | 717 | __skb_queue_tail(q, skb); |
706 | } else if (time_after(jiffies, match->expiry)) { | 718 | } else if (time_after(jiffies, match->expiry)) { |
707 | match->state = PTP_PACKET_STATE_TIMED_OUT; | 719 | match->state = PTP_PACKET_STATE_TIMED_OUT; |
708 | netif_warn(efx, rx_err, efx->net_dev, | 720 | if (net_ratelimit()) |
709 | "PTP packet - no timestamp seen\n"); | 721 | netif_warn(efx, rx_err, efx->net_dev, |
722 | "PTP packet - no timestamp seen\n"); | ||
710 | __skb_queue_tail(q, skb); | 723 | __skb_queue_tail(q, skb); |
711 | } else { | 724 | } else { |
712 | /* Replace unprocessed entry and stop */ | 725 | /* Replace unprocessed entry and stop */ |
@@ -788,9 +801,14 @@ fail: | |||
788 | static int efx_ptp_stop(struct efx_nic *efx) | 801 | static int efx_ptp_stop(struct efx_nic *efx) |
789 | { | 802 | { |
790 | struct efx_ptp_data *ptp = efx->ptp_data; | 803 | struct efx_ptp_data *ptp = efx->ptp_data; |
791 | int rc = efx_ptp_disable(efx); | ||
792 | struct list_head *cursor; | 804 | struct list_head *cursor; |
793 | struct list_head *next; | 805 | struct list_head *next; |
806 | int rc; | ||
807 | |||
808 | if (ptp == NULL) | ||
809 | return 0; | ||
810 | |||
811 | rc = efx_ptp_disable(efx); | ||
794 | 812 | ||
795 | if (ptp->rxfilter_installed) { | 813 | if (ptp->rxfilter_installed) { |
796 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | 814 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, |
@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx) | |||
809 | list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { | 827 | list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { |
810 | list_move(cursor, &efx->ptp_data->evt_free_list); | 828 | list_move(cursor, &efx->ptp_data->evt_free_list); |
811 | } | 829 | } |
830 | ptp->evt_overflow = false; | ||
812 | spin_unlock_bh(&efx->ptp_data->evt_lock); | 831 | spin_unlock_bh(&efx->ptp_data->evt_lock); |
813 | 832 | ||
814 | return rc; | 833 | return rc; |
815 | } | 834 | } |
816 | 835 | ||
836 | static int efx_ptp_restart(struct efx_nic *efx) | ||
837 | { | ||
838 | if (efx->ptp_data && efx->ptp_data->enabled) | ||
839 | return efx_ptp_start(efx); | ||
840 | return 0; | ||
841 | } | ||
842 | |||
817 | static void efx_ptp_pps_worker(struct work_struct *work) | 843 | static void efx_ptp_pps_worker(struct work_struct *work) |
818 | { | 844 | { |
819 | struct efx_ptp_data *ptp = | 845 | struct efx_ptp_data *ptp = |
@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) | |||
901 | spin_lock_init(&ptp->evt_lock); | 927 | spin_lock_init(&ptp->evt_lock); |
902 | for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) | 928 | for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) |
903 | list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); | 929 | list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); |
930 | ptp->evt_overflow = false; | ||
904 | 931 | ||
905 | ptp->phc_clock_info.owner = THIS_MODULE; | 932 | ptp->phc_clock_info.owner = THIS_MODULE; |
906 | snprintf(ptp->phc_clock_info.name, | 933 | snprintf(ptp->phc_clock_info.name, |
@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) | |||
989 | skb->len >= PTP_MIN_LENGTH && | 1016 | skb->len >= PTP_MIN_LENGTH && |
990 | skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && | 1017 | skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && |
991 | likely(skb->protocol == htons(ETH_P_IP)) && | 1018 | likely(skb->protocol == htons(ETH_P_IP)) && |
1019 | skb_transport_header_was_set(skb) && | ||
1020 | skb_network_header_len(skb) >= sizeof(struct iphdr) && | ||
992 | ip_hdr(skb)->protocol == IPPROTO_UDP && | 1021 | ip_hdr(skb)->protocol == IPPROTO_UDP && |
1022 | skb_headlen(skb) >= | ||
1023 | skb_transport_offset(skb) + sizeof(struct udphdr) && | ||
993 | udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); | 1024 | udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); |
994 | } | 1025 | } |
995 | 1026 | ||
@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |||
1106 | { | 1137 | { |
1107 | if ((enable_wanted != efx->ptp_data->enabled) || | 1138 | if ((enable_wanted != efx->ptp_data->enabled) || |
1108 | (enable_wanted && (efx->ptp_data->mode != new_mode))) { | 1139 | (enable_wanted && (efx->ptp_data->mode != new_mode))) { |
1109 | int rc; | 1140 | int rc = 0; |
1110 | 1141 | ||
1111 | if (enable_wanted) { | 1142 | if (enable_wanted) { |
1112 | /* Change of mode requires disable */ | 1143 | /* Change of mode requires disable */ |
@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |||
1123 | * succeed. | 1154 | * succeed. |
1124 | */ | 1155 | */ |
1125 | efx->ptp_data->mode = new_mode; | 1156 | efx->ptp_data->mode = new_mode; |
1126 | rc = efx_ptp_start(efx); | 1157 | if (netif_running(efx->net_dev)) |
1158 | rc = efx_ptp_start(efx); | ||
1127 | if (rc == 0) { | 1159 | if (rc == 0) { |
1128 | rc = efx_ptp_synchronize(efx, | 1160 | rc = efx_ptp_synchronize(efx, |
1129 | PTP_SYNC_ATTEMPTS * 2); | 1161 | PTP_SYNC_ATTEMPTS * 2); |
@@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) | |||
1295 | list_add_tail(&evt->link, &ptp->evt_list); | 1327 | list_add_tail(&evt->link, &ptp->evt_list); |
1296 | 1328 | ||
1297 | queue_work(ptp->workwq, &ptp->work); | 1329 | queue_work(ptp->workwq, &ptp->work); |
1298 | } else { | 1330 | } else if (!ptp->evt_overflow) { |
1299 | netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); | 1331 | /* Log a warning message and set the event overflow flag. |
1332 | * The message won't be logged again until the event queue | ||
1333 | * becomes empty. | ||
1334 | */ | ||
1335 | netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); | ||
1336 | ptp->evt_overflow = true; | ||
1300 | } | 1337 | } |
1301 | spin_unlock_bh(&ptp->evt_lock); | 1338 | spin_unlock_bh(&ptp->evt_lock); |
1302 | } | 1339 | } |
@@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) | |||
1389 | if (rc != 0) | 1426 | if (rc != 0) |
1390 | return rc; | 1427 | return rc; |
1391 | 1428 | ||
1392 | ptp_data->current_adjfreq = delta; | 1429 | ptp_data->current_adjfreq = adjustment_ns; |
1393 | return 0; | 1430 | return 0; |
1394 | } | 1431 | } |
1395 | 1432 | ||
@@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) | |||
1404 | 1441 | ||
1405 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); | 1442 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); |
1406 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); | 1443 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
1407 | MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); | 1444 | MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); |
1408 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); | 1445 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); |
1409 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); | 1446 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); |
1410 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | 1447 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), |
@@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx) | |||
1491 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = | 1528 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = |
1492 | &efx_ptp_channel_type; | 1529 | &efx_ptp_channel_type; |
1493 | } | 1530 | } |
1531 | |||
1532 | void efx_ptp_start_datapath(struct efx_nic *efx) | ||
1533 | { | ||
1534 | if (efx_ptp_restart(efx)) | ||
1535 | netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); | ||
1536 | } | ||
1537 | |||
1538 | void efx_ptp_stop_datapath(struct efx_nic *efx) | ||
1539 | { | ||
1540 | efx_ptp_stop(efx); | ||
1541 | } | ||
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8f09e686fc23..42488df1f4ec 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, | |||
94 | 94 | ||
95 | void efx_rx_config_page_split(struct efx_nic *efx) | 95 | void efx_rx_config_page_split(struct efx_nic *efx) |
96 | { | 96 | { |
97 | efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, | 97 | efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, |
98 | EFX_RX_BUF_ALIGNMENT); | 98 | EFX_RX_BUF_ALIGNMENT); |
99 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : | 99 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : |
100 | ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / | 100 | ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / |
@@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) | |||
189 | do { | 189 | do { |
190 | index = rx_queue->added_count & rx_queue->ptr_mask; | 190 | index = rx_queue->added_count & rx_queue->ptr_mask; |
191 | rx_buf = efx_rx_buffer(rx_queue, index); | 191 | rx_buf = efx_rx_buffer(rx_queue, index); |
192 | rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; | 192 | rx_buf->dma_addr = dma_addr + efx->rx_ip_align; |
193 | rx_buf->page = page; | 193 | rx_buf->page = page; |
194 | rx_buf->page_offset = page_offset + NET_IP_ALIGN; | 194 | rx_buf->page_offset = page_offset + efx->rx_ip_align; |
195 | rx_buf->len = efx->rx_dma_len; | 195 | rx_buf->len = efx->rx_dma_len; |
196 | rx_buf->flags = 0; | 196 | rx_buf->flags = 0; |
197 | ++rx_queue->added_count; | 197 | ++rx_queue->added_count; |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0c9b5d94154f..8bf29eb4a5a0 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -82,6 +82,7 @@ static const char version[] = | |||
82 | #include <linux/mii.h> | 82 | #include <linux/mii.h> |
83 | #include <linux/workqueue.h> | 83 | #include <linux/workqueue.h> |
84 | #include <linux/of.h> | 84 | #include <linux/of.h> |
85 | #include <linux/of_device.h> | ||
85 | 86 | ||
86 | #include <linux/netdevice.h> | 87 | #include <linux/netdevice.h> |
87 | #include <linux/etherdevice.h> | 88 | #include <linux/etherdevice.h> |
@@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * | |||
2184 | } | 2185 | } |
2185 | } | 2186 | } |
2186 | 2187 | ||
2188 | #if IS_BUILTIN(CONFIG_OF) | ||
2189 | static const struct of_device_id smc91x_match[] = { | ||
2190 | { .compatible = "smsc,lan91c94", }, | ||
2191 | { .compatible = "smsc,lan91c111", }, | ||
2192 | {}, | ||
2193 | }; | ||
2194 | MODULE_DEVICE_TABLE(of, smc91x_match); | ||
2195 | #endif | ||
2196 | |||
2187 | /* | 2197 | /* |
2188 | * smc_init(void) | 2198 | * smc_init(void) |
2189 | * Input parameters: | 2199 | * Input parameters: |
@@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * | |||
2198 | static int smc_drv_probe(struct platform_device *pdev) | 2208 | static int smc_drv_probe(struct platform_device *pdev) |
2199 | { | 2209 | { |
2200 | struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); | 2210 | struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); |
2211 | const struct of_device_id *match = NULL; | ||
2201 | struct smc_local *lp; | 2212 | struct smc_local *lp; |
2202 | struct net_device *ndev; | 2213 | struct net_device *ndev; |
2203 | struct resource *res, *ires; | 2214 | struct resource *res, *ires; |
@@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2217 | */ | 2228 | */ |
2218 | 2229 | ||
2219 | lp = netdev_priv(ndev); | 2230 | lp = netdev_priv(ndev); |
2231 | lp->cfg.flags = 0; | ||
2220 | 2232 | ||
2221 | if (pd) { | 2233 | if (pd) { |
2222 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); | 2234 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); |
2223 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); | 2235 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); |
2224 | } else { | 2236 | } |
2237 | |||
2238 | #if IS_BUILTIN(CONFIG_OF) | ||
2239 | match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); | ||
2240 | if (match) { | ||
2241 | struct device_node *np = pdev->dev.of_node; | ||
2242 | u32 val; | ||
2243 | |||
2244 | /* Combination of IO widths supported, default to 16-bit */ | ||
2245 | if (!of_property_read_u32(np, "reg-io-width", &val)) { | ||
2246 | if (val & 1) | ||
2247 | lp->cfg.flags |= SMC91X_USE_8BIT; | ||
2248 | if ((val == 0) || (val & 2)) | ||
2249 | lp->cfg.flags |= SMC91X_USE_16BIT; | ||
2250 | if (val & 4) | ||
2251 | lp->cfg.flags |= SMC91X_USE_32BIT; | ||
2252 | } else { | ||
2253 | lp->cfg.flags |= SMC91X_USE_16BIT; | ||
2254 | } | ||
2255 | } | ||
2256 | #endif | ||
2257 | |||
2258 | if (!pd && !match) { | ||
2225 | lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; | 2259 | lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; |
2226 | lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; | 2260 | lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; |
2227 | lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; | 2261 | lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; |
@@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev) | |||
2370 | return 0; | 2404 | return 0; |
2371 | } | 2405 | } |
2372 | 2406 | ||
2373 | #ifdef CONFIG_OF | ||
2374 | static const struct of_device_id smc91x_match[] = { | ||
2375 | { .compatible = "smsc,lan91c94", }, | ||
2376 | { .compatible = "smsc,lan91c111", }, | ||
2377 | {}, | ||
2378 | }; | ||
2379 | MODULE_DEVICE_TABLE(of, smc91x_match); | ||
2380 | #endif | ||
2381 | |||
2382 | static struct dev_pm_ops smc_drv_pm_ops = { | 2407 | static struct dev_pm_ops smc_drv_pm_ops = { |
2383 | .suspend = smc_drv_suspend, | 2408 | .suspend = smc_drv_suspend, |
2384 | .resume = smc_drv_resume, | 2409 | .resume = smc_drv_resume, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8a7a23a84ac5..797b56a0efc4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
622 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) | 622 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
623 | return -EOPNOTSUPP; | 623 | return -EOPNOTSUPP; |
624 | 624 | ||
625 | if (netif_msg_hw(priv)) { | 625 | priv->adv_ts = 0; |
626 | if (priv->dma_cap.time_stamp) { | 626 | if (priv->dma_cap.atime_stamp && priv->extend_desc) |
627 | pr_debug("IEEE 1588-2002 Time Stamp supported\n"); | 627 | priv->adv_ts = 1; |
628 | priv->adv_ts = 0; | 628 | |
629 | } | 629 | if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) |
630 | if (priv->dma_cap.atime_stamp && priv->extend_desc) { | 630 | pr_debug("IEEE 1588-2002 Time Stamp supported\n"); |
631 | pr_debug | 631 | |
632 | ("IEEE 1588-2008 Advanced Time Stamp supported\n"); | 632 | if (netif_msg_hw(priv) && priv->adv_ts) |
633 | priv->adv_ts = 1; | 633 | pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); |
634 | } | ||
635 | } | ||
636 | 634 | ||
637 | priv->hw->ptp = &stmmac_ptp; | 635 | priv->hw->ptp = &stmmac_ptp; |
638 | priv->hwts_tx_en = 0; | 636 | priv->hwts_tx_en = 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b8b0eeed0f92..7680581ebe12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) | |||
56 | 56 | ||
57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); | 57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); |
58 | 58 | ||
59 | spin_unlock_irqrestore(&priv->lock, flags); | 59 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
60 | 60 | ||
61 | return 0; | 61 | return 0; |
62 | } | 62 | } |
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) | |||
91 | 91 | ||
92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); | 92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); |
93 | 93 | ||
94 | spin_unlock_irqrestore(&priv->lock, flags); | 94 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
95 | 95 | ||
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dd0dd6279b4e..4f1d2549130e 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c | |||
@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2019 | ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | 2019 | ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO |
2020 | | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | | 2020 | | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
2021 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM | 2021 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM |
2022 | /*| NETIF_F_FRAGLIST */ | ||
2023 | ; | 2022 | ; |
2024 | ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | 2023 | ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
2025 | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; | 2024 | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7536a4c01293..5330fd298705 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, | |||
740 | /* set speed_in input in case RMII mode is used in 100Mbps */ | 740 | /* set speed_in input in case RMII mode is used in 100Mbps */ |
741 | if (phy->speed == 100) | 741 | if (phy->speed == 100) |
742 | mac_control |= BIT(15); | 742 | mac_control |= BIT(15); |
743 | else if (phy->speed == 10) | ||
744 | mac_control |= BIT(18); /* In Band mode */ | ||
743 | 745 | ||
744 | *link = true; | 746 | *link = true; |
745 | } else { | 747 | } else { |
@@ -1151,6 +1153,12 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1151 | * receive descs | 1153 | * receive descs |
1152 | */ | 1154 | */ |
1153 | cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); | 1155 | cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); |
1156 | |||
1157 | if (cpts_register(&priv->pdev->dev, priv->cpts, | ||
1158 | priv->data.cpts_clock_mult, | ||
1159 | priv->data.cpts_clock_shift)) | ||
1160 | dev_err(priv->dev, "error registering cpts device\n"); | ||
1161 | |||
1154 | } | 1162 | } |
1155 | 1163 | ||
1156 | /* Enable Interrupt pacing if configured */ | 1164 | /* Enable Interrupt pacing if configured */ |
@@ -1197,6 +1205,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) | |||
1197 | netif_carrier_off(priv->ndev); | 1205 | netif_carrier_off(priv->ndev); |
1198 | 1206 | ||
1199 | if (cpsw_common_res_usage_state(priv) <= 1) { | 1207 | if (cpsw_common_res_usage_state(priv) <= 1) { |
1208 | cpts_unregister(priv->cpts); | ||
1200 | cpsw_intr_disable(priv); | 1209 | cpsw_intr_disable(priv); |
1201 | cpdma_ctlr_int_ctrl(priv->dma, false); | 1210 | cpdma_ctlr_int_ctrl(priv->dma, false); |
1202 | cpdma_ctlr_stop(priv->dma); | 1211 | cpdma_ctlr_stop(priv->dma); |
@@ -1816,6 +1825,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1816 | } | 1825 | } |
1817 | 1826 | ||
1818 | i++; | 1827 | i++; |
1828 | if (i == data->slaves) | ||
1829 | break; | ||
1819 | } | 1830 | } |
1820 | 1831 | ||
1821 | return 0; | 1832 | return 0; |
@@ -1983,9 +1994,15 @@ static int cpsw_probe(struct platform_device *pdev) | |||
1983 | goto clean_runtime_disable_ret; | 1994 | goto clean_runtime_disable_ret; |
1984 | } | 1995 | } |
1985 | priv->regs = ss_regs; | 1996 | priv->regs = ss_regs; |
1986 | priv->version = __raw_readl(&priv->regs->id_ver); | ||
1987 | priv->host_port = HOST_PORT_NUM; | 1997 | priv->host_port = HOST_PORT_NUM; |
1988 | 1998 | ||
1999 | /* Need to enable clocks with runtime PM api to access module | ||
2000 | * registers | ||
2001 | */ | ||
2002 | pm_runtime_get_sync(&pdev->dev); | ||
2003 | priv->version = readl(&priv->regs->id_ver); | ||
2004 | pm_runtime_put_sync(&pdev->dev); | ||
2005 | |||
1989 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 2006 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1990 | priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); | 2007 | priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); |
1991 | if (IS_ERR(priv->wr_regs)) { | 2008 | if (IS_ERR(priv->wr_regs)) { |
@@ -2091,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2091 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | 2108 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { |
2092 | for (i = res->start; i <= res->end; i++) { | 2109 | for (i = res->start; i <= res->end; i++) { |
2093 | if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, | 2110 | if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, |
2094 | dev_name(priv->dev), priv)) { | 2111 | dev_name(&pdev->dev), priv)) { |
2095 | dev_err(priv->dev, "error attaching irq\n"); | 2112 | dev_err(priv->dev, "error attaching irq\n"); |
2096 | goto clean_ale_ret; | 2113 | goto clean_ale_ret; |
2097 | } | 2114 | } |
@@ -2155,8 +2172,6 @@ static int cpsw_remove(struct platform_device *pdev) | |||
2155 | unregister_netdev(cpsw_get_slave_ndev(priv, 1)); | 2172 | unregister_netdev(cpsw_get_slave_ndev(priv, 1)); |
2156 | unregister_netdev(ndev); | 2173 | unregister_netdev(ndev); |
2157 | 2174 | ||
2158 | cpts_unregister(priv->cpts); | ||
2159 | |||
2160 | cpsw_ale_destroy(priv->ale); | 2175 | cpsw_ale_destroy(priv->ale); |
2161 | cpdma_chan_destroy(priv->txch); | 2176 | cpdma_chan_destroy(priv->txch); |
2162 | cpdma_chan_destroy(priv->rxch); | 2177 | cpdma_chan_destroy(priv->rxch); |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 41ba974bf37c..cd9b164a0434 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -61,6 +61,7 @@ | |||
61 | #include <linux/davinci_emac.h> | 61 | #include <linux/davinci_emac.h> |
62 | #include <linux/of.h> | 62 | #include <linux/of.h> |
63 | #include <linux/of_address.h> | 63 | #include <linux/of_address.h> |
64 | #include <linux/of_device.h> | ||
64 | #include <linux/of_irq.h> | 65 | #include <linux/of_irq.h> |
65 | #include <linux/of_net.h> | 66 | #include <linux/of_net.h> |
66 | 67 | ||
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = { | |||
1752 | #endif | 1753 | #endif |
1753 | }; | 1754 | }; |
1754 | 1755 | ||
1756 | static const struct of_device_id davinci_emac_of_match[]; | ||
1757 | |||
1755 | static struct emac_platform_data * | 1758 | static struct emac_platform_data * |
1756 | davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) | 1759 | davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) |
1757 | { | 1760 | { |
1758 | struct device_node *np; | 1761 | struct device_node *np; |
1762 | const struct of_device_id *match; | ||
1763 | const struct emac_platform_data *auxdata; | ||
1759 | struct emac_platform_data *pdata = NULL; | 1764 | struct emac_platform_data *pdata = NULL; |
1760 | const u8 *mac_addr; | 1765 | const u8 *mac_addr; |
1761 | 1766 | ||
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) | |||
1793 | 1798 | ||
1794 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); | 1799 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
1795 | if (!priv->phy_node) | 1800 | if (!priv->phy_node) |
1796 | pdata->phy_id = ""; | 1801 | pdata->phy_id = NULL; |
1802 | |||
1803 | auxdata = pdev->dev.platform_data; | ||
1804 | if (auxdata) { | ||
1805 | pdata->interrupt_enable = auxdata->interrupt_enable; | ||
1806 | pdata->interrupt_disable = auxdata->interrupt_disable; | ||
1807 | } | ||
1808 | |||
1809 | match = of_match_device(davinci_emac_of_match, &pdev->dev); | ||
1810 | if (match && match->data) { | ||
1811 | auxdata = match->data; | ||
1812 | pdata->version = auxdata->version; | ||
1813 | pdata->hw_ram_addr = auxdata->hw_ram_addr; | ||
1814 | } | ||
1797 | 1815 | ||
1798 | pdev->dev.platform_data = pdata; | 1816 | pdev->dev.platform_data = pdata; |
1799 | 1817 | ||
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { | |||
2020 | }; | 2038 | }; |
2021 | 2039 | ||
2022 | #if IS_ENABLED(CONFIG_OF) | 2040 | #if IS_ENABLED(CONFIG_OF) |
2041 | static const struct emac_platform_data am3517_emac_data = { | ||
2042 | .version = EMAC_VERSION_2, | ||
2043 | .hw_ram_addr = 0x01e20000, | ||
2044 | }; | ||
2045 | |||
2023 | static const struct of_device_id davinci_emac_of_match[] = { | 2046 | static const struct of_device_id davinci_emac_of_match[] = { |
2024 | {.compatible = "ti,davinci-dm6467-emac", }, | 2047 | {.compatible = "ti,davinci-dm6467-emac", }, |
2048 | {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, | ||
2025 | {}, | 2049 | {}, |
2026 | }; | 2050 | }; |
2027 | MODULE_DEVICE_TABLE(of, davinci_emac_of_match); | 2051 | MODULE_DEVICE_TABLE(of, davinci_emac_of_match); |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 628b736e5ae7..0e9fb3301b11 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
2080 | } | 2080 | } |
2081 | 2081 | ||
2082 | /* Return subqueue id on this core (one per core). */ | 2082 | /* Return subqueue id on this core (one per core). */ |
2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | 2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, |
2084 | void *accel_priv) | ||
2084 | { | 2085 | { |
2085 | return smp_processor_id(); | 2086 | return smp_processor_id(); |
2086 | } | 2087 | } |
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1f2364126323..2166e879a096 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op) | |||
1017 | platform_set_drvdata(op, ndev); | 1017 | platform_set_drvdata(op, ndev); |
1018 | SET_NETDEV_DEV(ndev, &op->dev); | 1018 | SET_NETDEV_DEV(ndev, &op->dev); |
1019 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ | 1019 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ |
1020 | ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; | 1020 | ndev->features = NETIF_F_SG; |
1021 | ndev->netdev_ops = &temac_netdev_ops; | 1021 | ndev->netdev_ops = &temac_netdev_ops; |
1022 | ndev->ethtool_ops = &temac_ethtool_ops; | 1022 | ndev->ethtool_ops = &temac_ethtool_ops; |
1023 | #if 0 | 1023 | #if 0 |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b2ff038d6d20..f9293da19e26 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op) | |||
1486 | 1486 | ||
1487 | SET_NETDEV_DEV(ndev, &op->dev); | 1487 | SET_NETDEV_DEV(ndev, &op->dev); |
1488 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ | 1488 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ |
1489 | ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; | 1489 | ndev->features = NETIF_F_SG; |
1490 | ndev->netdev_ops = &axienet_netdev_ops; | 1490 | ndev->netdev_ops = &axienet_netdev_ops; |
1491 | ndev->ethtool_ops = &axienet_ethtool_ops; | 1491 | ndev->ethtool_ops = &axienet_ethtool_ops; |
1492 | 1492 | ||
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 74234a51c851..fefb8cd5eb65 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
@@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) | |||
163 | __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, | 163 | __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, |
164 | drvdata->base_addr + XEL_TSR_OFFSET); | 164 | drvdata->base_addr + XEL_TSR_OFFSET); |
165 | 165 | ||
166 | /* Enable the Tx interrupts for the second Buffer if | ||
167 | * configured in HW */ | ||
168 | if (drvdata->tx_ping_pong != 0) { | ||
169 | reg_data = __raw_readl(drvdata->base_addr + | ||
170 | XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); | ||
171 | __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, | ||
172 | drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
173 | XEL_TSR_OFFSET); | ||
174 | } | ||
175 | |||
176 | /* Enable the Rx interrupts for the first buffer */ | 166 | /* Enable the Rx interrupts for the first buffer */ |
177 | __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); | 167 | __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); |
178 | 168 | ||
179 | /* Enable the Rx interrupts for the second Buffer if | ||
180 | * configured in HW */ | ||
181 | if (drvdata->rx_ping_pong != 0) { | ||
182 | __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + | ||
183 | XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); | ||
184 | } | ||
185 | |||
186 | /* Enable the Global Interrupt Enable */ | 169 | /* Enable the Global Interrupt Enable */ |
187 | __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); | 170 | __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); |
188 | } | 171 | } |
@@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) | |||
206 | __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), | 189 | __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), |
207 | drvdata->base_addr + XEL_TSR_OFFSET); | 190 | drvdata->base_addr + XEL_TSR_OFFSET); |
208 | 191 | ||
209 | /* Disable the Tx interrupts for the second Buffer | ||
210 | * if configured in HW */ | ||
211 | if (drvdata->tx_ping_pong != 0) { | ||
212 | reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
213 | XEL_TSR_OFFSET); | ||
214 | __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), | ||
215 | drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
216 | XEL_TSR_OFFSET); | ||
217 | } | ||
218 | |||
219 | /* Disable the Rx interrupts for the first buffer */ | 192 | /* Disable the Rx interrupts for the first buffer */ |
220 | reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); | 193 | reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); |
221 | __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), | 194 | __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), |
222 | drvdata->base_addr + XEL_RSR_OFFSET); | 195 | drvdata->base_addr + XEL_RSR_OFFSET); |
223 | |||
224 | /* Disable the Rx interrupts for the second buffer | ||
225 | * if configured in HW */ | ||
226 | if (drvdata->rx_ping_pong != 0) { | ||
227 | |||
228 | reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
229 | XEL_RSR_OFFSET); | ||
230 | __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), | ||
231 | drvdata->base_addr + XEL_BUFFER_OFFSET + | ||
232 | XEL_RSR_OFFSET); | ||
233 | } | ||
234 | } | 196 | } |
235 | 197 | ||
236 | /** | 198 | /** |
@@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, | |||
258 | *to_u16_ptr++ = *from_u16_ptr++; | 220 | *to_u16_ptr++ = *from_u16_ptr++; |
259 | *to_u16_ptr++ = *from_u16_ptr++; | 221 | *to_u16_ptr++ = *from_u16_ptr++; |
260 | 222 | ||
223 | /* This barrier resolves occasional issues seen around | ||
224 | * cases where the data is not properly flushed out | ||
225 | * from the processor store buffers to the destination | ||
226 | * memory locations. | ||
227 | */ | ||
228 | wmb(); | ||
229 | |||
261 | /* Output a word */ | 230 | /* Output a word */ |
262 | *to_u32_ptr++ = align_buffer; | 231 | *to_u32_ptr++ = align_buffer; |
263 | } | 232 | } |
@@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, | |||
273 | for (; length > 0; length--) | 242 | for (; length > 0; length--) |
274 | *to_u8_ptr++ = *from_u8_ptr++; | 243 | *to_u8_ptr++ = *from_u8_ptr++; |
275 | 244 | ||
245 | /* This barrier resolves occasional issues seen around | ||
246 | * cases where the data is not properly flushed out | ||
247 | * from the processor store buffers to the destination | ||
248 | * memory locations. | ||
249 | */ | ||
250 | wmb(); | ||
276 | *to_u32_ptr = align_buffer; | 251 | *to_u32_ptr = align_buffer; |
277 | } | 252 | } |
278 | } | 253 | } |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 3169252613fa..5d78c1d08abd 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
571 | case HDLCDRVCTL_CALIBRATE: | 571 | case HDLCDRVCTL_CALIBRATE: |
572 | if(!capable(CAP_SYS_RAWIO)) | 572 | if(!capable(CAP_SYS_RAWIO)) |
573 | return -EPERM; | 573 | return -EPERM; |
574 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) | ||
575 | return -EINVAL; | ||
574 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; | 576 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; |
575 | return 0; | 577 | return 0; |
576 | 578 | ||
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 1971411574db..61dd2447e1bb 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1057 | break; | 1057 | break; |
1058 | 1058 | ||
1059 | case SIOCYAMGCFG: | 1059 | case SIOCYAMGCFG: |
1060 | memset(&yi, 0, sizeof(yi)); | ||
1060 | yi.cfg.mask = 0xffffffff; | 1061 | yi.cfg.mask = 0xffffffff; |
1061 | yi.cfg.iobase = yp->iobase; | 1062 | yi.cfg.iobase = yp->iobase; |
1062 | yi.cfg.irq = yp->irq; | 1063 | yi.cfg.irq = yp->irq; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 524f713f6017..71baeb3ed905 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
261 | struct sk_buff *skb; | 261 | struct sk_buff *skb; |
262 | 262 | ||
263 | net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; | 263 | net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; |
264 | if (!net) { | 264 | if (!net || net->reg_state != NETREG_REGISTERED) { |
265 | netdev_err(net, "got receive callback but net device" | ||
266 | " not initialized yet\n"); | ||
267 | packet->status = NVSP_STAT_FAIL; | 265 | packet->status = NVSP_STAT_FAIL; |
268 | return 0; | 266 | return 0; |
269 | } | 267 | } |
@@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
327 | return -EINVAL; | 325 | return -EINVAL; |
328 | 326 | ||
329 | nvdev->start_remove = true; | 327 | nvdev->start_remove = true; |
330 | cancel_delayed_work_sync(&ndevctx->dwork); | ||
331 | cancel_work_sync(&ndevctx->work); | 328 | cancel_work_sync(&ndevctx->work); |
332 | netif_tx_disable(ndev); | 329 | netif_tx_disable(ndev); |
333 | rndis_filter_device_remove(hdev); | 330 | rndis_filter_device_remove(hdev); |
@@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev, | |||
436 | SET_ETHTOOL_OPS(net, ðtool_ops); | 433 | SET_ETHTOOL_OPS(net, ðtool_ops); |
437 | SET_NETDEV_DEV(net, &dev->device); | 434 | SET_NETDEV_DEV(net, &dev->device); |
438 | 435 | ||
439 | ret = register_netdev(net); | ||
440 | if (ret != 0) { | ||
441 | pr_err("Unable to register netdev.\n"); | ||
442 | free_netdev(net); | ||
443 | goto out; | ||
444 | } | ||
445 | |||
446 | /* Notify the netvsc driver of the new device */ | 436 | /* Notify the netvsc driver of the new device */ |
447 | device_info.ring_size = ring_size; | 437 | device_info.ring_size = ring_size; |
448 | ret = rndis_filter_device_add(dev, &device_info); | 438 | ret = rndis_filter_device_add(dev, &device_info); |
449 | if (ret != 0) { | 439 | if (ret != 0) { |
450 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); | 440 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); |
451 | unregister_netdev(net); | ||
452 | free_netdev(net); | 441 | free_netdev(net); |
453 | hv_set_drvdata(dev, NULL); | 442 | hv_set_drvdata(dev, NULL); |
454 | return ret; | 443 | return ret; |
@@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev, | |||
457 | 446 | ||
458 | netif_carrier_on(net); | 447 | netif_carrier_on(net); |
459 | 448 | ||
460 | out: | 449 | ret = register_netdev(net); |
450 | if (ret != 0) { | ||
451 | pr_err("Unable to register netdev.\n"); | ||
452 | rndis_filter_device_remove(dev); | ||
453 | free_netdev(net); | ||
454 | } | ||
455 | |||
461 | return ret; | 456 | return ret; |
462 | } | 457 | } |
463 | 458 | ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index acf93798dc67..bc8faaec33f5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, | |||
299 | 299 | ||
300 | if (vlan->fwd_priv) { | 300 | if (vlan->fwd_priv) { |
301 | skb->dev = vlan->lowerdev; | 301 | skb->dev = vlan->lowerdev; |
302 | ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); | 302 | ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); |
303 | } else { | 303 | } else { |
304 | ret = macvlan_queue_xmit(skb, dev); | 304 | ret = macvlan_queue_xmit(skb, dev); |
305 | } | 305 | } |
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = { | |||
338 | .cache_update = eth_header_cache_update, | 338 | .cache_update = eth_header_cache_update, |
339 | }; | 339 | }; |
340 | 340 | ||
341 | static struct rtnl_link_ops macvlan_link_ops; | ||
342 | |||
341 | static int macvlan_open(struct net_device *dev) | 343 | static int macvlan_open(struct net_device *dev) |
342 | { | 344 | { |
343 | struct macvlan_dev *vlan = netdev_priv(dev); | 345 | struct macvlan_dev *vlan = netdev_priv(dev); |
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev) | |||
353 | goto hash_add; | 355 | goto hash_add; |
354 | } | 356 | } |
355 | 357 | ||
356 | if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { | 358 | if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && |
359 | dev->rtnl_link_ops == &macvlan_link_ops) { | ||
357 | vlan->fwd_priv = | 360 | vlan->fwd_priv = |
358 | lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); | 361 | lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); |
359 | 362 | ||
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev) | |||
362 | */ | 365 | */ |
363 | if (IS_ERR_OR_NULL(vlan->fwd_priv)) { | 366 | if (IS_ERR_OR_NULL(vlan->fwd_priv)) { |
364 | vlan->fwd_priv = NULL; | 367 | vlan->fwd_priv = NULL; |
365 | } else { | 368 | } else |
366 | dev->features &= ~NETIF_F_LLTX; | ||
367 | return 0; | 369 | return 0; |
368 | } | ||
369 | } | 370 | } |
370 | 371 | ||
371 | err = -EBUSY; | 372 | err = -EBUSY; |
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
690 | netdev_features_t features) | 691 | netdev_features_t features) |
691 | { | 692 | { |
692 | struct macvlan_dev *vlan = netdev_priv(dev); | 693 | struct macvlan_dev *vlan = netdev_priv(dev); |
694 | netdev_features_t mask; | ||
695 | |||
696 | features |= NETIF_F_ALL_FOR_ALL; | ||
697 | features &= (vlan->set_features | ~MACVLAN_FEATURES); | ||
698 | mask = features; | ||
699 | |||
700 | features = netdev_increment_features(vlan->lowerdev->features, | ||
701 | features, | ||
702 | mask); | ||
703 | features |= NETIF_F_LLTX; | ||
693 | 704 | ||
694 | return features & (vlan->set_features | ~MACVLAN_FEATURES); | 705 | return features; |
695 | } | 706 | } |
696 | 707 | ||
697 | static const struct ethtool_ops macvlan_ethtool_ops = { | 708 | static const struct ethtool_ops macvlan_ethtool_ops = { |
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused, | |||
1019 | break; | 1030 | break; |
1020 | case NETDEV_FEAT_CHANGE: | 1031 | case NETDEV_FEAT_CHANGE: |
1021 | list_for_each_entry(vlan, &port->vlans, list) { | 1032 | list_for_each_entry(vlan, &port->vlans, list) { |
1022 | vlan->dev->features = dev->features & MACVLAN_FEATURES; | ||
1023 | vlan->dev->gso_max_size = dev->gso_max_size; | 1033 | vlan->dev->gso_max_size = dev->gso_max_size; |
1024 | netdev_features_change(vlan->dev); | 1034 | netdev_update_features(vlan->dev); |
1025 | } | 1035 | } |
1026 | break; | 1036 | break; |
1027 | case NETDEV_UNREGISTER: | 1037 | case NETDEV_UNREGISTER: |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9093004f9b63..2a89da080317 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -770,7 +770,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
770 | int ret; | 770 | int ret; |
771 | int vnet_hdr_len = 0; | 771 | int vnet_hdr_len = 0; |
772 | int vlan_offset = 0; | 772 | int vlan_offset = 0; |
773 | int copied; | 773 | int copied, total; |
774 | 774 | ||
775 | if (q->flags & IFF_VNET_HDR) { | 775 | if (q->flags & IFF_VNET_HDR) { |
776 | struct virtio_net_hdr vnet_hdr; | 776 | struct virtio_net_hdr vnet_hdr; |
@@ -785,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
785 | if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) | 785 | if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) |
786 | return -EFAULT; | 786 | return -EFAULT; |
787 | } | 787 | } |
788 | copied = vnet_hdr_len; | 788 | total = copied = vnet_hdr_len; |
789 | total += skb->len; | ||
789 | 790 | ||
790 | if (!vlan_tx_tag_present(skb)) | 791 | if (!vlan_tx_tag_present(skb)) |
791 | len = min_t(int, skb->len, len); | 792 | len = min_t(int, skb->len, len); |
@@ -800,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
800 | 801 | ||
801 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); | 802 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
802 | len = min_t(int, skb->len + VLAN_HLEN, len); | 803 | len = min_t(int, skb->len + VLAN_HLEN, len); |
804 | total += VLAN_HLEN; | ||
803 | 805 | ||
804 | copy = min_t(int, vlan_offset, len); | 806 | copy = min_t(int, vlan_offset, len); |
805 | ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); | 807 | ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); |
@@ -817,10 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
817 | } | 819 | } |
818 | 820 | ||
819 | ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); | 821 | ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); |
820 | copied += len; | ||
821 | 822 | ||
822 | done: | 823 | done: |
823 | return ret ? ret : copied; | 824 | return ret ? ret : total; |
824 | } | 825 | } |
825 | 826 | ||
826 | static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, | 827 | static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, |
@@ -875,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
875 | } | 876 | } |
876 | 877 | ||
877 | ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); | 878 | ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); |
878 | ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ | 879 | ret = min_t(ssize_t, ret, len); |
880 | if (ret > 0) | ||
881 | iocb->ki_pos = ret; | ||
879 | out: | 882 | out: |
880 | return ret; | 883 | return ret; |
881 | } | 884 | } |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3ae28f420868..26fa05a472b4 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = { | |||
336 | .resume = genphy_resume, | 336 | .resume = genphy_resume, |
337 | .driver = { .owner = THIS_MODULE,}, | 337 | .driver = { .owner = THIS_MODULE,}, |
338 | }, { | 338 | }, { |
339 | .phy_id = PHY_ID_KSZ8041RNLI, | ||
340 | .phy_id_mask = 0x00fffff0, | ||
341 | .name = "Micrel KSZ8041RNLI", | ||
342 | .features = PHY_BASIC_FEATURES | | ||
343 | SUPPORTED_Pause | SUPPORTED_Asym_Pause, | ||
344 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
345 | .config_init = kszphy_config_init, | ||
346 | .config_aneg = genphy_config_aneg, | ||
347 | .read_status = genphy_read_status, | ||
348 | .ack_interrupt = kszphy_ack_interrupt, | ||
349 | .config_intr = kszphy_config_intr, | ||
350 | .suspend = genphy_suspend, | ||
351 | .resume = genphy_resume, | ||
352 | .driver = { .owner = THIS_MODULE,}, | ||
353 | }, { | ||
339 | .phy_id = PHY_ID_KSZ8051, | 354 | .phy_id = PHY_ID_KSZ8051, |
340 | .phy_id_mask = 0x00fffff0, | 355 | .phy_id_mask = 0x00fffff0, |
341 | .name = "Micrel KSZ8051", | 356 | .name = "Micrel KSZ8051", |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 36c6994436b7..98434b84f041 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev) | |||
565 | int err = 0; | 565 | int err = 0; |
566 | 566 | ||
567 | atomic_set(&phydev->irq_disable, 0); | 567 | atomic_set(&phydev->irq_disable, 0); |
568 | if (request_irq(phydev->irq, phy_interrupt, | 568 | if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", |
569 | IRQF_SHARED, | 569 | phydev) < 0) { |
570 | "phy_interrupt", | ||
571 | phydev) < 0) { | ||
572 | pr_warn("%s: Can't get IRQ %d (PHY)\n", | 570 | pr_warn("%s: Can't get IRQ %d (PHY)\n", |
573 | phydev->bus->name, phydev->irq); | 571 | phydev->bus->name, phydev->irq); |
574 | phydev->irq = PHY_POLL; | 572 | phydev->irq = PHY_POLL; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 736050d6b451..b75ae5bde673 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1647 | return NETDEV_TX_OK; | 1647 | return NETDEV_TX_OK; |
1648 | } | 1648 | } |
1649 | 1649 | ||
1650 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) | 1650 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, |
1651 | void *accel_priv) | ||
1651 | { | 1652 | { |
1652 | /* | 1653 | /* |
1653 | * This helper function exists to help dev_pick_tx get the correct | 1654 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 782e38bfc1ee..ecec8029c5e8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -348,7 +348,8 @@ unlock: | |||
348 | * different rxq no. here. If we could not get rxhash, then we would | 348 | * different rxq no. here. If we could not get rxhash, then we would |
349 | * hope the rxq no. may help here. | 349 | * hope the rxq no. may help here. |
350 | */ | 350 | */ |
351 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) | 351 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, |
352 | void *accel_priv) | ||
352 | { | 353 | { |
353 | struct tun_struct *tun = netdev_priv(dev); | 354 | struct tun_struct *tun = netdev_priv(dev); |
354 | struct tun_flow_entry *e; | 355 | struct tun_flow_entry *e; |
@@ -1184,7 +1185,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, | |||
1184 | { | 1185 | { |
1185 | struct tun_pi pi = { 0, skb->protocol }; | 1186 | struct tun_pi pi = { 0, skb->protocol }; |
1186 | ssize_t total = 0; | 1187 | ssize_t total = 0; |
1187 | int vlan_offset = 0; | 1188 | int vlan_offset = 0, copied; |
1188 | 1189 | ||
1189 | if (!(tun->flags & TUN_NO_PI)) { | 1190 | if (!(tun->flags & TUN_NO_PI)) { |
1190 | if ((len -= sizeof(pi)) < 0) | 1191 | if ((len -= sizeof(pi)) < 0) |
@@ -1248,6 +1249,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, | |||
1248 | total += tun->vnet_hdr_sz; | 1249 | total += tun->vnet_hdr_sz; |
1249 | } | 1250 | } |
1250 | 1251 | ||
1252 | copied = total; | ||
1253 | total += skb->len; | ||
1251 | if (!vlan_tx_tag_present(skb)) { | 1254 | if (!vlan_tx_tag_present(skb)) { |
1252 | len = min_t(int, skb->len, len); | 1255 | len = min_t(int, skb->len, len); |
1253 | } else { | 1256 | } else { |
@@ -1262,24 +1265,24 @@ static ssize_t tun_put_user(struct tun_struct *tun, | |||
1262 | 1265 | ||
1263 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); | 1266 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
1264 | len = min_t(int, skb->len + VLAN_HLEN, len); | 1267 | len = min_t(int, skb->len + VLAN_HLEN, len); |
1268 | total += VLAN_HLEN; | ||
1265 | 1269 | ||
1266 | copy = min_t(int, vlan_offset, len); | 1270 | copy = min_t(int, vlan_offset, len); |
1267 | ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy); | 1271 | ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); |
1268 | len -= copy; | 1272 | len -= copy; |
1269 | total += copy; | 1273 | copied += copy; |
1270 | if (ret || !len) | 1274 | if (ret || !len) |
1271 | goto done; | 1275 | goto done; |
1272 | 1276 | ||
1273 | copy = min_t(int, sizeof(veth), len); | 1277 | copy = min_t(int, sizeof(veth), len); |
1274 | ret = memcpy_toiovecend(iv, (void *)&veth, total, copy); | 1278 | ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); |
1275 | len -= copy; | 1279 | len -= copy; |
1276 | total += copy; | 1280 | copied += copy; |
1277 | if (ret || !len) | 1281 | if (ret || !len) |
1278 | goto done; | 1282 | goto done; |
1279 | } | 1283 | } |
1280 | 1284 | ||
1281 | skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len); | 1285 | skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); |
1282 | total += len; | ||
1283 | 1286 | ||
1284 | done: | 1287 | done: |
1285 | tun->dev->stats.tx_packets++; | 1288 | tun->dev->stats.tx_packets++; |
@@ -1356,6 +1359,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
1356 | ret = tun_do_read(tun, tfile, iocb, iv, len, | 1359 | ret = tun_do_read(tun, tfile, iocb, iv, len, |
1357 | file->f_flags & O_NONBLOCK); | 1360 | file->f_flags & O_NONBLOCK); |
1358 | ret = min_t(ssize_t, ret, len); | 1361 | ret = min_t(ssize_t, ret, len); |
1362 | if (ret > 0) | ||
1363 | iocb->ki_pos = ret; | ||
1359 | out: | 1364 | out: |
1360 | tun_put(tun); | 1365 | tun_put(tun); |
1361 | return ret; | 1366 | return ret; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 85e4a01670f0..47b0f732b0b1 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM | |||
276 | module will be called cdc_mbim. | 276 | module will be called cdc_mbim. |
277 | 277 | ||
278 | config USB_NET_DM9601 | 278 | config USB_NET_DM9601 |
279 | tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" | 279 | tristate "Davicom DM96xx based USB 10/100 ethernet devices" |
280 | depends on USB_USBNET | 280 | depends on USB_USBNET |
281 | select CRC32 | 281 | select CRC32 |
282 | help | 282 | help |
283 | This option adds support for Davicom DM9601 based USB 1.1 | 283 | This option adds support for Davicom DM9601/DM9620/DM9621A |
284 | 10/100 Ethernet adapters. | 284 | based USB 10/100 Ethernet adapters. |
285 | 285 | ||
286 | config USB_NET_SR9700 | 286 | config USB_NET_SR9700 |
287 | tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" | 287 | tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index c6867f926cff..14aa48fa8d7e 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices | 2 | * Davicom DM96xx USB 10/100Mbps ethernet devices |
3 | * | 3 | * |
4 | * Peter Korsgaard <jacmet@sunsite.dk> | 4 | * Peter Korsgaard <jacmet@sunsite.dk> |
5 | * | 5 | * |
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) | |||
364 | dev->net->ethtool_ops = &dm9601_ethtool_ops; | 364 | dev->net->ethtool_ops = &dm9601_ethtool_ops; |
365 | dev->net->hard_header_len += DM_TX_OVERHEAD; | 365 | dev->net->hard_header_len += DM_TX_OVERHEAD; |
366 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 366 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
367 | dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; | 367 | |
368 | /* dm9620/21a require room for 4 byte padding, even in dm9601 | ||
369 | * mode, so we need +1 to be able to receive full size | ||
370 | * ethernet frames. | ||
371 | */ | ||
372 | dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1; | ||
368 | 373 | ||
369 | dev->mii.dev = dev->net; | 374 | dev->mii.dev = dev->net; |
370 | dev->mii.mdio_read = dm9601_mdio_read; | 375 | dev->mii.mdio_read = dm9601_mdio_read; |
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
468 | static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | 473 | static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
469 | gfp_t flags) | 474 | gfp_t flags) |
470 | { | 475 | { |
471 | int len; | 476 | int len, pad; |
472 | 477 | ||
473 | /* format: | 478 | /* format: |
474 | b1: packet length low | 479 | b1: packet length low |
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
476 | b3..n: packet data | 481 | b3..n: packet data |
477 | */ | 482 | */ |
478 | 483 | ||
479 | len = skb->len; | 484 | len = skb->len + DM_TX_OVERHEAD; |
485 | |||
486 | /* workaround for dm962x errata with tx fifo getting out of | ||
487 | * sync if a USB bulk transfer retry happens right after a | ||
488 | * packet with odd / maxpacket length by adding up to 3 bytes | ||
489 | * padding. | ||
490 | */ | ||
491 | while ((len & 1) || !(len % dev->maxpacket)) | ||
492 | len++; | ||
480 | 493 | ||
481 | if (skb_headroom(skb) < DM_TX_OVERHEAD) { | 494 | len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */ |
495 | pad = len - skb->len; | ||
496 | |||
497 | if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) { | ||
482 | struct sk_buff *skb2; | 498 | struct sk_buff *skb2; |
483 | 499 | ||
484 | skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); | 500 | skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags); |
485 | dev_kfree_skb_any(skb); | 501 | dev_kfree_skb_any(skb); |
486 | skb = skb2; | 502 | skb = skb2; |
487 | if (!skb) | 503 | if (!skb) |
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
490 | 506 | ||
491 | __skb_push(skb, DM_TX_OVERHEAD); | 507 | __skb_push(skb, DM_TX_OVERHEAD); |
492 | 508 | ||
493 | /* usbnet adds padding if length is a multiple of packet size | 509 | if (pad) { |
494 | if so, adjust length value in header */ | 510 | memset(skb->data + skb->len, 0, pad); |
495 | if ((skb->len % dev->maxpacket) == 0) | 511 | __skb_put(skb, pad); |
496 | len++; | 512 | } |
497 | 513 | ||
498 | skb->data[0] = len; | 514 | skb->data[0] = len; |
499 | skb->data[1] = len >> 8; | 515 | skb->data[1] = len >> 8; |
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev) | |||
543 | } | 559 | } |
544 | 560 | ||
545 | static const struct driver_info dm9601_info = { | 561 | static const struct driver_info dm9601_info = { |
546 | .description = "Davicom DM9601 USB Ethernet", | 562 | .description = "Davicom DM96xx USB 10/100 Ethernet", |
547 | .flags = FLAG_ETHER | FLAG_LINK_INTR, | 563 | .flags = FLAG_ETHER | FLAG_LINK_INTR, |
548 | .bind = dm9601_bind, | 564 | .bind = dm9601_bind, |
549 | .rx_fixup = dm9601_rx_fixup, | 565 | .rx_fixup = dm9601_rx_fixup, |
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = { | |||
594 | USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ | 610 | USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ |
595 | .driver_info = (unsigned long)&dm9601_info, | 611 | .driver_info = (unsigned long)&dm9601_info, |
596 | }, | 612 | }, |
613 | { | ||
614 | USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */ | ||
615 | .driver_info = (unsigned long)&dm9601_info, | ||
616 | }, | ||
597 | {}, // END | 617 | {}, // END |
598 | }; | 618 | }; |
599 | 619 | ||
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = { | |||
612 | module_usb_driver(dm9601_driver); | 632 | module_usb_driver(dm9601_driver); |
613 | 633 | ||
614 | MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); | 634 | MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); |
615 | MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); | 635 | MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices"); |
616 | MODULE_LICENSE("GPL"); | 636 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 86292e6aaf49..1a482344b3f5 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -185,7 +185,6 @@ enum rx_ctrl_state{ | |||
185 | #define BM_REQUEST_TYPE (0xa1) | 185 | #define BM_REQUEST_TYPE (0xa1) |
186 | #define B_NOTIFICATION (0x20) | 186 | #define B_NOTIFICATION (0x20) |
187 | #define W_VALUE (0x0) | 187 | #define W_VALUE (0x0) |
188 | #define W_INDEX (0x2) | ||
189 | #define W_LENGTH (0x2) | 188 | #define W_LENGTH (0x2) |
190 | 189 | ||
191 | #define B_OVERRUN (0x1<<6) | 190 | #define B_OVERRUN (0x1<<6) |
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
1487 | struct uart_icount *icount; | 1486 | struct uart_icount *icount; |
1488 | struct hso_serial_state_notification *serial_state_notification; | 1487 | struct hso_serial_state_notification *serial_state_notification; |
1489 | struct usb_device *usb; | 1488 | struct usb_device *usb; |
1489 | int if_num; | ||
1490 | 1490 | ||
1491 | /* Sanity checks */ | 1491 | /* Sanity checks */ |
1492 | if (!serial) | 1492 | if (!serial) |
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
1495 | handle_usb_error(status, __func__, serial->parent); | 1495 | handle_usb_error(status, __func__, serial->parent); |
1496 | return; | 1496 | return; |
1497 | } | 1497 | } |
1498 | |||
1499 | /* tiocmget is only supported on HSO_PORT_MODEM */ | ||
1498 | tiocmget = serial->tiocmget; | 1500 | tiocmget = serial->tiocmget; |
1499 | if (!tiocmget) | 1501 | if (!tiocmget) |
1500 | return; | 1502 | return; |
1503 | BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); | ||
1504 | |||
1501 | usb = serial->parent->usb; | 1505 | usb = serial->parent->usb; |
1506 | if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; | ||
1507 | |||
1508 | /* wIndex should be the USB interface number of the port to which the | ||
1509 | * notification applies, which should always be the Modem port. | ||
1510 | */ | ||
1502 | serial_state_notification = &tiocmget->serial_state_notification; | 1511 | serial_state_notification = &tiocmget->serial_state_notification; |
1503 | if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || | 1512 | if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || |
1504 | serial_state_notification->bNotification != B_NOTIFICATION || | 1513 | serial_state_notification->bNotification != B_NOTIFICATION || |
1505 | le16_to_cpu(serial_state_notification->wValue) != W_VALUE || | 1514 | le16_to_cpu(serial_state_notification->wValue) != W_VALUE || |
1506 | le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || | 1515 | le16_to_cpu(serial_state_notification->wIndex) != if_num || |
1507 | le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { | 1516 | le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { |
1508 | dev_warn(&usb->dev, | 1517 | dev_warn(&usb->dev, |
1509 | "hso received invalid serial state notification\n"); | 1518 | "hso received invalid serial state notification\n"); |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 03832d3780aa..f54637828574 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c | |||
@@ -117,7 +117,6 @@ enum { | |||
117 | struct mcs7830_data { | 117 | struct mcs7830_data { |
118 | u8 multi_filter[8]; | 118 | u8 multi_filter[8]; |
119 | u8 config; | 119 | u8 config; |
120 | u8 link_counter; | ||
121 | }; | 120 | }; |
122 | 121 | ||
123 | static const char driver_name[] = "MOSCHIP usb-ethernet driver"; | 122 | static const char driver_name[] = "MOSCHIP usb-ethernet driver"; |
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb) | |||
561 | { | 560 | { |
562 | u8 *buf = urb->transfer_buffer; | 561 | u8 *buf = urb->transfer_buffer; |
563 | bool link, link_changed; | 562 | bool link, link_changed; |
564 | struct mcs7830_data *data = mcs7830_get_data(dev); | ||
565 | 563 | ||
566 | if (urb->actual_length < 16) | 564 | if (urb->actual_length < 16) |
567 | return; | 565 | return; |
568 | 566 | ||
569 | link = !(buf[1] & 0x20); | 567 | link = !(buf[1] == 0x20); |
570 | link_changed = netif_carrier_ok(dev->net) != link; | 568 | link_changed = netif_carrier_ok(dev->net) != link; |
571 | if (link_changed) { | 569 | if (link_changed) { |
572 | data->link_counter++; | 570 | usbnet_link_change(dev, link, 0); |
573 | /* | 571 | netdev_dbg(dev->net, "Link Status is: %d\n", link); |
574 | track link state 20 times to guard against erroneous | 572 | } |
575 | link state changes reported sometimes by the chip | ||
576 | */ | ||
577 | if (data->link_counter > 20) { | ||
578 | data->link_counter = 0; | ||
579 | usbnet_link_change(dev, link, 0); | ||
580 | netdev_dbg(dev->net, "Link Status is: %d\n", link); | ||
581 | } | ||
582 | } else | ||
583 | data->link_counter = 0; | ||
584 | } | 573 | } |
585 | 574 | ||
586 | static const struct driver_info moschip_info = { | 575 | static const struct driver_info moschip_info = { |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 916241d16c67..5d776447d9c3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -426,10 +426,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
426 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { | 426 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
427 | pr_debug("%s: short packet %i\n", dev->name, len); | 427 | pr_debug("%s: short packet %i\n", dev->name, len); |
428 | dev->stats.rx_length_errors++; | 428 | dev->stats.rx_length_errors++; |
429 | if (vi->big_packets) | 429 | if (vi->mergeable_rx_bufs) |
430 | give_pages(rq, buf); | ||
431 | else if (vi->mergeable_rx_bufs) | ||
432 | put_page(virt_to_head_page(buf)); | 430 | put_page(virt_to_head_page(buf)); |
431 | else if (vi->big_packets) | ||
432 | give_pages(rq, buf); | ||
433 | else | 433 | else |
434 | dev_kfree_skb(buf); | 434 | dev_kfree_skb(buf); |
435 | return; | 435 | return; |
@@ -1367,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev) | |||
1367 | 1367 | ||
1368 | static void virtnet_free_queues(struct virtnet_info *vi) | 1368 | static void virtnet_free_queues(struct virtnet_info *vi) |
1369 | { | 1369 | { |
1370 | int i; | ||
1371 | |||
1372 | for (i = 0; i < vi->max_queue_pairs; i++) | ||
1373 | netif_napi_del(&vi->rq[i].napi); | ||
1374 | |||
1370 | kfree(vi->rq); | 1375 | kfree(vi->rq); |
1371 | kfree(vi->sq); | 1376 | kfree(vi->sq); |
1372 | } | 1377 | } |
@@ -1396,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
1396 | struct virtqueue *vq = vi->rq[i].vq; | 1401 | struct virtqueue *vq = vi->rq[i].vq; |
1397 | 1402 | ||
1398 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | 1403 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
1399 | if (vi->big_packets) | 1404 | if (vi->mergeable_rx_bufs) |
1400 | give_pages(&vi->rq[i], buf); | ||
1401 | else if (vi->mergeable_rx_bufs) | ||
1402 | put_page(virt_to_head_page(buf)); | 1405 | put_page(virt_to_head_page(buf)); |
1406 | else if (vi->big_packets) | ||
1407 | give_pages(&vi->rq[i], buf); | ||
1403 | else | 1408 | else |
1404 | dev_kfree_skb(buf); | 1409 | dev_kfree_skb(buf); |
1405 | --vi->rq[i].num; | 1410 | --vi->rq[i].num; |
@@ -1792,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev) | |||
1792 | if (err) | 1797 | if (err) |
1793 | return err; | 1798 | return err; |
1794 | 1799 | ||
1795 | if (netif_running(vi->dev)) | 1800 | if (netif_running(vi->dev)) { |
1801 | for (i = 0; i < vi->curr_queue_pairs; i++) | ||
1802 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | ||
1803 | schedule_delayed_work(&vi->refill, 0); | ||
1804 | |||
1796 | for (i = 0; i < vi->max_queue_pairs; i++) | 1805 | for (i = 0; i < vi->max_queue_pairs; i++) |
1797 | virtnet_napi_enable(&vi->rq[i]); | 1806 | virtnet_napi_enable(&vi->rq[i]); |
1807 | } | ||
1798 | 1808 | ||
1799 | netif_device_attach(vi->dev); | 1809 | netif_device_attach(vi->dev); |
1800 | 1810 | ||
1801 | for (i = 0; i < vi->curr_queue_pairs; i++) | ||
1802 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | ||
1803 | schedule_delayed_work(&vi->refill, 0); | ||
1804 | |||
1805 | mutex_lock(&vi->config_lock); | 1811 | mutex_lock(&vi->config_lock); |
1806 | vi->config_enable = true; | 1812 | vi->config_enable = true; |
1807 | mutex_unlock(&vi->config_lock); | 1813 | mutex_unlock(&vi->config_lock); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0358c07f7669..ed384fee76ac 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1668,7 +1668,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1668 | netdev_dbg(dev, "circular route to %pI4\n", | 1668 | netdev_dbg(dev, "circular route to %pI4\n", |
1669 | &dst->sin.sin_addr.s_addr); | 1669 | &dst->sin.sin_addr.s_addr); |
1670 | dev->stats.collisions++; | 1670 | dev->stats.collisions++; |
1671 | goto tx_error; | 1671 | goto rt_tx_error; |
1672 | } | 1672 | } |
1673 | 1673 | ||
1674 | /* Bypass encapsulation if the destination is local */ | 1674 | /* Bypass encapsulation if the destination is local */ |
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
2440 | /* update header length based on lower device */ | 2440 | /* update header length based on lower device */ |
2441 | dev->hard_header_len = lowerdev->hard_header_len + | 2441 | dev->hard_header_len = lowerdev->hard_header_len + |
2442 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); | 2442 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); |
2443 | } | 2443 | } else if (use_ipv6) |
2444 | vxlan->flags |= VXLAN_F_IPV6; | ||
2444 | 2445 | ||
2445 | if (data[IFLA_VXLAN_TOS]) | 2446 | if (data[IFLA_VXLAN_TOS]) |
2446 | vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); | 2447 | vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 8d78253c26ce..a366d6b4626f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c | |||
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
76 | mask2 |= ATH9K_INT_CST; | 76 | mask2 |= ATH9K_INT_CST; |
77 | if (isr2 & AR_ISR_S2_TSFOOR) | 77 | if (isr2 & AR_ISR_S2_TSFOOR) |
78 | mask2 |= ATH9K_INT_TSFOOR; | 78 | mask2 |= ATH9K_INT_TSFOOR; |
79 | |||
80 | if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { | ||
81 | REG_WRITE(ah, AR_ISR_S2, isr2); | ||
82 | isr &= ~AR_ISR_BCNMISC; | ||
83 | } | ||
79 | } | 84 | } |
80 | 85 | ||
81 | isr = REG_READ(ah, AR_ISR_RAC); | 86 | if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) |
87 | isr = REG_READ(ah, AR_ISR_RAC); | ||
88 | |||
82 | if (isr == 0xffffffff) { | 89 | if (isr == 0xffffffff) { |
83 | *masked = 0; | 90 | *masked = 0; |
84 | return false; | 91 | return false; |
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
97 | 104 | ||
98 | *masked |= ATH9K_INT_TX; | 105 | *masked |= ATH9K_INT_TX; |
99 | 106 | ||
100 | s0_s = REG_READ(ah, AR_ISR_S0_S); | 107 | if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { |
108 | s0_s = REG_READ(ah, AR_ISR_S0_S); | ||
109 | s1_s = REG_READ(ah, AR_ISR_S1_S); | ||
110 | } else { | ||
111 | s0_s = REG_READ(ah, AR_ISR_S0); | ||
112 | REG_WRITE(ah, AR_ISR_S0, s0_s); | ||
113 | s1_s = REG_READ(ah, AR_ISR_S1); | ||
114 | REG_WRITE(ah, AR_ISR_S1, s1_s); | ||
115 | |||
116 | isr &= ~(AR_ISR_TXOK | | ||
117 | AR_ISR_TXDESC | | ||
118 | AR_ISR_TXERR | | ||
119 | AR_ISR_TXEOL); | ||
120 | } | ||
121 | |||
101 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); | 122 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); |
102 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); | 123 | ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); |
103 | |||
104 | s1_s = REG_READ(ah, AR_ISR_S1_S); | ||
105 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); | 124 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); |
106 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); | 125 | ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); |
107 | } | 126 | } |
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
114 | *masked |= mask2; | 133 | *masked |= mask2; |
115 | } | 134 | } |
116 | 135 | ||
117 | if (AR_SREV_9100(ah)) | 136 | if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) { |
118 | return true; | ||
119 | |||
120 | if (isr & AR_ISR_GENTMR) { | ||
121 | u32 s5_s; | 137 | u32 s5_s; |
122 | 138 | ||
123 | s5_s = REG_READ(ah, AR_ISR_S5_S); | 139 | if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { |
140 | s5_s = REG_READ(ah, AR_ISR_S5_S); | ||
141 | } else { | ||
142 | s5_s = REG_READ(ah, AR_ISR_S5); | ||
143 | } | ||
144 | |||
124 | ah->intr_gen_timer_trigger = | 145 | ah->intr_gen_timer_trigger = |
125 | MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); | 146 | MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); |
126 | 147 | ||
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) | |||
133 | if ((s5_s & AR_ISR_S5_TIM_TIMER) && | 154 | if ((s5_s & AR_ISR_S5_TIM_TIMER) && |
134 | !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) | 155 | !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) |
135 | *masked |= ATH9K_INT_TIM_TIMER; | 156 | *masked |= ATH9K_INT_TIM_TIMER; |
157 | |||
158 | if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { | ||
159 | REG_WRITE(ah, AR_ISR_S5, s5_s); | ||
160 | isr &= ~AR_ISR_GENTMR; | ||
161 | } | ||
136 | } | 162 | } |
137 | 163 | ||
164 | if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { | ||
165 | REG_WRITE(ah, AR_ISR, isr); | ||
166 | REG_READ(ah, AR_ISR); | ||
167 | } | ||
168 | |||
169 | if (AR_SREV_9100(ah)) | ||
170 | return true; | ||
171 | |||
138 | if (sync_cause) { | 172 | if (sync_cause) { |
139 | ath9k_debug_sync_cause(common, sync_cause); | 173 | ath9k_debug_sync_cause(common, sync_cause); |
140 | fatal_int = | 174 | fatal_int = |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 1ec52356b5a1..130657db5c43 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
@@ -3984,18 +3984,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) | |||
3984 | int quick_drop; | 3984 | int quick_drop; |
3985 | s32 t[3], f[3] = {5180, 5500, 5785}; | 3985 | s32 t[3], f[3] = {5180, 5500, 5785}; |
3986 | 3986 | ||
3987 | if (!(pBase->miscConfiguration & BIT(1))) | 3987 | if (!(pBase->miscConfiguration & BIT(4))) |
3988 | return; | 3988 | return; |
3989 | 3989 | ||
3990 | if (freq < 4000) | 3990 | if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) { |
3991 | quick_drop = eep->modalHeader2G.quick_drop; | 3991 | if (freq < 4000) { |
3992 | else { | 3992 | quick_drop = eep->modalHeader2G.quick_drop; |
3993 | t[0] = eep->base_ext1.quick_drop_low; | 3993 | } else { |
3994 | t[1] = eep->modalHeader5G.quick_drop; | 3994 | t[0] = eep->base_ext1.quick_drop_low; |
3995 | t[2] = eep->base_ext1.quick_drop_high; | 3995 | t[1] = eep->modalHeader5G.quick_drop; |
3996 | quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); | 3996 | t[2] = eep->base_ext1.quick_drop_high; |
3997 | quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); | ||
3998 | } | ||
3999 | REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); | ||
3997 | } | 4000 | } |
3998 | REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); | ||
3999 | } | 4001 | } |
4000 | 4002 | ||
4001 | static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) | 4003 | static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) |
@@ -4035,7 +4037,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz) | |||
4035 | struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; | 4037 | struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; |
4036 | u8 bias; | 4038 | u8 bias; |
4037 | 4039 | ||
4038 | if (!(eep->baseEepHeader.featureEnable & 0x40)) | 4040 | if (!(eep->baseEepHeader.miscConfiguration & 0x40)) |
4039 | return; | 4041 | return; |
4040 | 4042 | ||
4041 | if (!AR_SREV_9300(ah)) | 4043 | if (!AR_SREV_9300(ah)) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9a2657fdd9cc..608d739d1378 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | |||
127 | struct ath9k_vif_iter_data *iter_data = data; | 127 | struct ath9k_vif_iter_data *iter_data = data; |
128 | int i; | 128 | int i; |
129 | 129 | ||
130 | for (i = 0; i < ETH_ALEN; i++) | 130 | if (iter_data->hw_macaddr != NULL) { |
131 | iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); | 131 | for (i = 0; i < ETH_ALEN; i++) |
132 | iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); | ||
133 | } else { | ||
134 | iter_data->hw_macaddr = mac; | ||
135 | } | ||
132 | } | 136 | } |
133 | 137 | ||
134 | static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, | 138 | static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, |
135 | struct ieee80211_vif *vif) | 139 | struct ieee80211_vif *vif) |
136 | { | 140 | { |
137 | struct ath_common *common = ath9k_hw_common(priv->ah); | 141 | struct ath_common *common = ath9k_hw_common(priv->ah); |
138 | struct ath9k_vif_iter_data iter_data; | 142 | struct ath9k_vif_iter_data iter_data; |
139 | 143 | ||
140 | /* | 144 | /* |
141 | * Use the hardware MAC address as reference, the hardware uses it | 145 | * Pick the MAC address of the first interface as the new hardware |
142 | * together with the BSSID mask when matching addresses. | 146 | * MAC address. The hardware will use it together with the BSSID mask |
147 | * when matching addresses. | ||
143 | */ | 148 | */ |
144 | iter_data.hw_macaddr = common->macaddr; | 149 | iter_data.hw_macaddr = NULL; |
145 | memset(&iter_data.mask, 0xff, ETH_ALEN); | 150 | memset(&iter_data.mask, 0xff, ETH_ALEN); |
146 | 151 | ||
147 | if (vif) | 152 | if (vif) |
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, | |||
153 | ath9k_htc_bssid_iter, &iter_data); | 158 | ath9k_htc_bssid_iter, &iter_data); |
154 | 159 | ||
155 | memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); | 160 | memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); |
161 | |||
162 | if (iter_data.hw_macaddr) | ||
163 | memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); | ||
164 | |||
156 | ath_hw_setbssidmask(common); | 165 | ath_hw_setbssidmask(common); |
157 | } | 166 | } |
158 | 167 | ||
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, | |||
1063 | goto out; | 1072 | goto out; |
1064 | } | 1073 | } |
1065 | 1074 | ||
1066 | ath9k_htc_set_bssid_mask(priv, vif); | 1075 | ath9k_htc_set_mac_bssid_mask(priv, vif); |
1067 | 1076 | ||
1068 | priv->vif_slot |= (1 << avp->index); | 1077 | priv->vif_slot |= (1 << avp->index); |
1069 | priv->nvifs++; | 1078 | priv->nvifs++; |
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, | |||
1128 | 1137 | ||
1129 | ath9k_htc_set_opmode(priv); | 1138 | ath9k_htc_set_opmode(priv); |
1130 | 1139 | ||
1131 | ath9k_htc_set_bssid_mask(priv, vif); | 1140 | ath9k_htc_set_mac_bssid_mask(priv, vif); |
1132 | 1141 | ||
1133 | /* | 1142 | /* |
1134 | * Stop ANI only if there are no associated station interfaces. | 1143 | * Stop ANI only if there are no associated station interfaces. |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 54b04155e43b..8918035da3a3 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -146,10 +146,9 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah) | |||
146 | else | 146 | else |
147 | clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; | 147 | clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; |
148 | 148 | ||
149 | if (IS_CHAN_HT40(chan)) | 149 | if (chan) { |
150 | clockrate *= 2; | 150 | if (IS_CHAN_HT40(chan)) |
151 | 151 | clockrate *= 2; | |
152 | if (ah->curchan) { | ||
153 | if (IS_CHAN_HALF_RATE(chan)) | 152 | if (IS_CHAN_HALF_RATE(chan)) |
154 | clockrate /= 2; | 153 | clockrate /= 2; |
155 | if (IS_CHAN_QUARTER_RATE(chan)) | 154 | if (IS_CHAN_QUARTER_RATE(chan)) |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 74f452c7b166..21aa09e0e825 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw, | |||
965 | struct ath_common *common = ath9k_hw_common(ah); | 965 | struct ath_common *common = ath9k_hw_common(ah); |
966 | 966 | ||
967 | /* | 967 | /* |
968 | * Use the hardware MAC address as reference, the hardware uses it | 968 | * Pick the MAC address of the first interface as the new hardware |
969 | * together with the BSSID mask when matching addresses. | 969 | * MAC address. The hardware will use it together with the BSSID mask |
970 | * when matching addresses. | ||
970 | */ | 971 | */ |
971 | memset(iter_data, 0, sizeof(*iter_data)); | 972 | memset(iter_data, 0, sizeof(*iter_data)); |
972 | memset(&iter_data->mask, 0xff, ETH_ALEN); | 973 | memset(&iter_data->mask, 0xff, ETH_ALEN); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 09cdbcd09739..b5a19e098f2d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1276,6 +1276,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, | |||
1276 | if (!rts_thresh || (len > rts_thresh)) | 1276 | if (!rts_thresh || (len > rts_thresh)) |
1277 | rts = true; | 1277 | rts = true; |
1278 | } | 1278 | } |
1279 | |||
1280 | if (!aggr) | ||
1281 | len = fi->framelen; | ||
1282 | |||
1279 | ath_buf_set_rate(sc, bf, &info, len, rts); | 1283 | ath_buf_set_rate(sc, bf, &info, len, rts); |
1280 | } | 1284 | } |
1281 | 1285 | ||
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index de9eb2cfbf4b..366339421d4f 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c | |||
@@ -2041,13 +2041,20 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) | |||
2041 | case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: | 2041 | case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: |
2042 | mutex_lock(&wcn->hal_ind_mutex); | 2042 | mutex_lock(&wcn->hal_ind_mutex); |
2043 | msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); | 2043 | msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); |
2044 | msg_ind->msg_len = len; | 2044 | if (msg_ind) { |
2045 | msg_ind->msg = kmalloc(len, GFP_KERNEL); | 2045 | msg_ind->msg_len = len; |
2046 | memcpy(msg_ind->msg, buf, len); | 2046 | msg_ind->msg = kmalloc(len, GFP_KERNEL); |
2047 | list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); | 2047 | memcpy(msg_ind->msg, buf, len); |
2048 | queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); | 2048 | list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); |
2049 | wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); | 2049 | queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); |
2050 | wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); | ||
2051 | } | ||
2050 | mutex_unlock(&wcn->hal_ind_mutex); | 2052 | mutex_unlock(&wcn->hal_ind_mutex); |
2053 | if (msg_ind) | ||
2054 | break; | ||
2055 | /* FIXME: Do something smarter then just printing an error. */ | ||
2056 | wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", | ||
2057 | msg_header->msg_type); | ||
2051 | break; | 2058 | break; |
2052 | default: | 2059 | default: |
2053 | wcn36xx_err("SMD_EVENT (%d) not supported\n", | 2060 | wcn36xx_err("SMD_EVENT (%d) not supported\n", |
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b00a7e92225f..54e36fcb3954 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig | |||
@@ -5,6 +5,8 @@ config BRCMSMAC | |||
5 | tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" | 5 | tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" |
6 | depends on MAC80211 | 6 | depends on MAC80211 |
7 | depends on BCMA | 7 | depends on BCMA |
8 | select NEW_LEDS if BCMA_DRIVER_GPIO | ||
9 | select LEDS_CLASS if BCMA_DRIVER_GPIO | ||
8 | select BRCMUTIL | 10 | select BRCMUTIL |
9 | select FW_LOADER | 11 | select FW_LOADER |
10 | select CRC_CCITT | 12 | select CRC_CCITT |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 905704e335d7..abc9ceca70f3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c | |||
@@ -109,6 +109,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev, | |||
109 | brcmf_err("Disable F2 failed:%d\n", | 109 | brcmf_err("Disable F2 failed:%d\n", |
110 | err_ret); | 110 | err_ret); |
111 | } | 111 | } |
112 | } else { | ||
113 | err_ret = -ENOENT; | ||
112 | } | 114 | } |
113 | } else if ((regaddr == SDIO_CCCR_ABORT) || | 115 | } else if ((regaddr == SDIO_CCCR_ABORT) || |
114 | (regaddr == SDIO_CCCR_IENx)) { | 116 | (regaddr == SDIO_CCCR_IENx)) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 85879dbaa402..3c34a72a5d64 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
@@ -67,8 +67,8 @@ | |||
67 | #include "iwl-agn-hw.h" | 67 | #include "iwl-agn-hw.h" |
68 | 68 | ||
69 | /* Highest firmware API version supported */ | 69 | /* Highest firmware API version supported */ |
70 | #define IWL7260_UCODE_API_MAX 7 | 70 | #define IWL7260_UCODE_API_MAX 8 |
71 | #define IWL3160_UCODE_API_MAX 7 | 71 | #define IWL3160_UCODE_API_MAX 8 |
72 | 72 | ||
73 | /* Oldest version we won't warn about */ | 73 | /* Oldest version we won't warn about */ |
74 | #define IWL7260_UCODE_API_OK 7 | 74 | #define IWL7260_UCODE_API_OK 7 |
@@ -130,6 +130,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = { | |||
130 | .ht_params = &iwl7000_ht_params, | 130 | .ht_params = &iwl7000_ht_params, |
131 | .nvm_ver = IWL7260_NVM_VERSION, | 131 | .nvm_ver = IWL7260_NVM_VERSION, |
132 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, | 132 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, |
133 | .host_interrupt_operation_mode = true, | ||
133 | }; | 134 | }; |
134 | 135 | ||
135 | const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { | 136 | const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { |
@@ -140,6 +141,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { | |||
140 | .nvm_ver = IWL7260_NVM_VERSION, | 141 | .nvm_ver = IWL7260_NVM_VERSION, |
141 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, | 142 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, |
142 | .high_temp = true, | 143 | .high_temp = true, |
144 | .host_interrupt_operation_mode = true, | ||
143 | }; | 145 | }; |
144 | 146 | ||
145 | const struct iwl_cfg iwl7260_2n_cfg = { | 147 | const struct iwl_cfg iwl7260_2n_cfg = { |
@@ -149,6 +151,7 @@ const struct iwl_cfg iwl7260_2n_cfg = { | |||
149 | .ht_params = &iwl7000_ht_params, | 151 | .ht_params = &iwl7000_ht_params, |
150 | .nvm_ver = IWL7260_NVM_VERSION, | 152 | .nvm_ver = IWL7260_NVM_VERSION, |
151 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, | 153 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, |
154 | .host_interrupt_operation_mode = true, | ||
152 | }; | 155 | }; |
153 | 156 | ||
154 | const struct iwl_cfg iwl7260_n_cfg = { | 157 | const struct iwl_cfg iwl7260_n_cfg = { |
@@ -158,6 +161,7 @@ const struct iwl_cfg iwl7260_n_cfg = { | |||
158 | .ht_params = &iwl7000_ht_params, | 161 | .ht_params = &iwl7000_ht_params, |
159 | .nvm_ver = IWL7260_NVM_VERSION, | 162 | .nvm_ver = IWL7260_NVM_VERSION, |
160 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, | 163 | .nvm_calib_ver = IWL7260_TX_POWER_VERSION, |
164 | .host_interrupt_operation_mode = true, | ||
161 | }; | 165 | }; |
162 | 166 | ||
163 | const struct iwl_cfg iwl3160_2ac_cfg = { | 167 | const struct iwl_cfg iwl3160_2ac_cfg = { |
@@ -167,6 +171,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = { | |||
167 | .ht_params = &iwl7000_ht_params, | 171 | .ht_params = &iwl7000_ht_params, |
168 | .nvm_ver = IWL3160_NVM_VERSION, | 172 | .nvm_ver = IWL3160_NVM_VERSION, |
169 | .nvm_calib_ver = IWL3160_TX_POWER_VERSION, | 173 | .nvm_calib_ver = IWL3160_TX_POWER_VERSION, |
174 | .host_interrupt_operation_mode = true, | ||
170 | }; | 175 | }; |
171 | 176 | ||
172 | const struct iwl_cfg iwl3160_2n_cfg = { | 177 | const struct iwl_cfg iwl3160_2n_cfg = { |
@@ -176,6 +181,7 @@ const struct iwl_cfg iwl3160_2n_cfg = { | |||
176 | .ht_params = &iwl7000_ht_params, | 181 | .ht_params = &iwl7000_ht_params, |
177 | .nvm_ver = IWL3160_NVM_VERSION, | 182 | .nvm_ver = IWL3160_NVM_VERSION, |
178 | .nvm_calib_ver = IWL3160_TX_POWER_VERSION, | 183 | .nvm_calib_ver = IWL3160_TX_POWER_VERSION, |
184 | .host_interrupt_operation_mode = true, | ||
179 | }; | 185 | }; |
180 | 186 | ||
181 | const struct iwl_cfg iwl3160_n_cfg = { | 187 | const struct iwl_cfg iwl3160_n_cfg = { |
@@ -185,6 +191,7 @@ const struct iwl_cfg iwl3160_n_cfg = { | |||
185 | .ht_params = &iwl7000_ht_params, | 191 | .ht_params = &iwl7000_ht_params, |
186 | .nvm_ver = IWL3160_NVM_VERSION, | 192 | .nvm_ver = IWL3160_NVM_VERSION, |
187 | .nvm_calib_ver = IWL3160_TX_POWER_VERSION, | 193 | .nvm_calib_ver = IWL3160_TX_POWER_VERSION, |
194 | .host_interrupt_operation_mode = true, | ||
188 | }; | 195 | }; |
189 | 196 | ||
190 | const struct iwl_cfg iwl7265_2ac_cfg = { | 197 | const struct iwl_cfg iwl7265_2ac_cfg = { |
@@ -196,5 +203,23 @@ const struct iwl_cfg iwl7265_2ac_cfg = { | |||
196 | .nvm_calib_ver = IWL7265_TX_POWER_VERSION, | 203 | .nvm_calib_ver = IWL7265_TX_POWER_VERSION, |
197 | }; | 204 | }; |
198 | 205 | ||
206 | const struct iwl_cfg iwl7265_2n_cfg = { | ||
207 | .name = "Intel(R) Dual Band Wireless N 7265", | ||
208 | .fw_name_pre = IWL7265_FW_PRE, | ||
209 | IWL_DEVICE_7000, | ||
210 | .ht_params = &iwl7000_ht_params, | ||
211 | .nvm_ver = IWL7265_NVM_VERSION, | ||
212 | .nvm_calib_ver = IWL7265_TX_POWER_VERSION, | ||
213 | }; | ||
214 | |||
215 | const struct iwl_cfg iwl7265_n_cfg = { | ||
216 | .name = "Intel(R) Wireless N 7265", | ||
217 | .fw_name_pre = IWL7265_FW_PRE, | ||
218 | IWL_DEVICE_7000, | ||
219 | .ht_params = &iwl7000_ht_params, | ||
220 | .nvm_ver = IWL7265_NVM_VERSION, | ||
221 | .nvm_calib_ver = IWL7265_TX_POWER_VERSION, | ||
222 | }; | ||
223 | |||
199 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 224 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
200 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | 225 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 18f232e8e812..03fd9aa8bfda 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h | |||
@@ -207,6 +207,8 @@ struct iwl_eeprom_params { | |||
207 | * @rx_with_siso_diversity: 1x1 device with rx antenna diversity | 207 | * @rx_with_siso_diversity: 1x1 device with rx antenna diversity |
208 | * @internal_wimax_coex: internal wifi/wimax combo device | 208 | * @internal_wimax_coex: internal wifi/wimax combo device |
209 | * @high_temp: Is this NIC is designated to be in high temperature. | 209 | * @high_temp: Is this NIC is designated to be in high temperature. |
210 | * @host_interrupt_operation_mode: device needs host interrupt operation | ||
211 | * mode set | ||
210 | * | 212 | * |
211 | * We enable the driver to be backward compatible wrt. hardware features. | 213 | * We enable the driver to be backward compatible wrt. hardware features. |
212 | * API differences in uCode shouldn't be handled here but through TLVs | 214 | * API differences in uCode shouldn't be handled here but through TLVs |
@@ -235,6 +237,7 @@ struct iwl_cfg { | |||
235 | enum iwl_led_mode led_mode; | 237 | enum iwl_led_mode led_mode; |
236 | const bool rx_with_siso_diversity; | 238 | const bool rx_with_siso_diversity; |
237 | const bool internal_wimax_coex; | 239 | const bool internal_wimax_coex; |
240 | const bool host_interrupt_operation_mode; | ||
238 | bool high_temp; | 241 | bool high_temp; |
239 | }; | 242 | }; |
240 | 243 | ||
@@ -294,6 +297,8 @@ extern const struct iwl_cfg iwl3160_2ac_cfg; | |||
294 | extern const struct iwl_cfg iwl3160_2n_cfg; | 297 | extern const struct iwl_cfg iwl3160_2n_cfg; |
295 | extern const struct iwl_cfg iwl3160_n_cfg; | 298 | extern const struct iwl_cfg iwl3160_n_cfg; |
296 | extern const struct iwl_cfg iwl7265_2ac_cfg; | 299 | extern const struct iwl_cfg iwl7265_2ac_cfg; |
300 | extern const struct iwl_cfg iwl7265_2n_cfg; | ||
301 | extern const struct iwl_cfg iwl7265_n_cfg; | ||
297 | #endif /* CONFIG_IWLMVM */ | 302 | #endif /* CONFIG_IWLMVM */ |
298 | 303 | ||
299 | #endif /* __IWL_CONFIG_H__ */ | 304 | #endif /* __IWL_CONFIG_H__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 54a4fdc631b7..da4eca8b3007 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h | |||
@@ -495,14 +495,11 @@ enum secure_load_status_reg { | |||
495 | * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit | 495 | * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit |
496 | * | 496 | * |
497 | * default interrupt coalescing timer is 64 x 32 = 2048 usecs | 497 | * default interrupt coalescing timer is 64 x 32 = 2048 usecs |
498 | * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs | ||
499 | */ | 498 | */ |
500 | #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) | 499 | #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) |
501 | #define IWL_HOST_INT_TIMEOUT_DEF (0x40) | 500 | #define IWL_HOST_INT_TIMEOUT_DEF (0x40) |
502 | #define IWL_HOST_INT_TIMEOUT_MIN (0x0) | 501 | #define IWL_HOST_INT_TIMEOUT_MIN (0x0) |
503 | #define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) | 502 | #define IWL_HOST_INT_OPER_MODE BIT(31) |
504 | #define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) | ||
505 | #define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) | ||
506 | 503 | ||
507 | /***************************************************************************** | 504 | /***************************************************************************** |
508 | * 7000/3000 series SHR DTS addresses * | 505 | * 7000/3000 series SHR DTS addresses * |
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 5d066cbc5ac7..75b72a956552 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c | |||
@@ -391,7 +391,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) | |||
391 | BT_VALID_LUT | | 391 | BT_VALID_LUT | |
392 | BT_VALID_WIFI_RX_SW_PRIO_BOOST | | 392 | BT_VALID_WIFI_RX_SW_PRIO_BOOST | |
393 | BT_VALID_WIFI_TX_SW_PRIO_BOOST | | 393 | BT_VALID_WIFI_TX_SW_PRIO_BOOST | |
394 | BT_VALID_MULTI_PRIO_LUT | | ||
395 | BT_VALID_CORUN_LUT_20 | | 394 | BT_VALID_CORUN_LUT_20 | |
396 | BT_VALID_CORUN_LUT_40 | | 395 | BT_VALID_CORUN_LUT_40 | |
397 | BT_VALID_ANT_ISOLATION | | 396 | BT_VALID_ANT_ISOLATION | |
@@ -842,6 +841,11 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, | |||
842 | 841 | ||
843 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], | 842 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], |
844 | lockdep_is_held(&mvm->mutex)); | 843 | lockdep_is_held(&mvm->mutex)); |
844 | |||
845 | /* This can happen if the station has been removed right now */ | ||
846 | if (IS_ERR_OR_NULL(sta)) | ||
847 | return; | ||
848 | |||
845 | mvmsta = (void *)sta->drv_priv; | 849 | mvmsta = (void *)sta->drv_priv; |
846 | 850 | ||
847 | data->num_bss_ifaces++; | 851 | data->num_bss_ifaces++; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 6f45966817bb..b9b81e881dd0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c | |||
@@ -895,7 +895,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, | |||
895 | /* new API returns next, not last-used seqno */ | 895 | /* new API returns next, not last-used seqno */ |
896 | if (mvm->fw->ucode_capa.flags & | 896 | if (mvm->fw->ucode_capa.flags & |
897 | IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) | 897 | IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) |
898 | err -= 0x10; | 898 | err = (u16) (err - 0x10); |
899 | } | 899 | } |
900 | 900 | ||
901 | iwl_free_resp(&cmd); | 901 | iwl_free_resp(&cmd); |
@@ -1549,7 +1549,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, | |||
1549 | if (gtkdata.unhandled_cipher) | 1549 | if (gtkdata.unhandled_cipher) |
1550 | return false; | 1550 | return false; |
1551 | if (!gtkdata.num_keys) | 1551 | if (!gtkdata.num_keys) |
1552 | return true; | 1552 | goto out; |
1553 | if (!gtkdata.last_gtk) | 1553 | if (!gtkdata.last_gtk) |
1554 | return false; | 1554 | return false; |
1555 | 1555 | ||
@@ -1600,6 +1600,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, | |||
1600 | (void *)&replay_ctr, GFP_KERNEL); | 1600 | (void *)&replay_ctr, GFP_KERNEL); |
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | out: | ||
1603 | mvmvif->seqno_valid = true; | 1604 | mvmvif->seqno_valid = true; |
1604 | /* +0x10 because the set API expects next-to-use, not last-used */ | 1605 | /* +0x10 because the set API expects next-to-use, not last-used */ |
1605 | mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; | 1606 | mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 9864d713eb2c..a8fe6b41f9a3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c | |||
@@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file, | |||
119 | 119 | ||
120 | if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) | 120 | if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) |
121 | return -EINVAL; | 121 | return -EINVAL; |
122 | if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) | ||
123 | return -EINVAL; | ||
124 | if (drain < 0 || drain > 1) | ||
125 | return -EINVAL; | ||
122 | 126 | ||
123 | mutex_lock(&mvm->mutex); | 127 | mutex_lock(&mvm->mutex); |
124 | 128 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 33cf56fdfc41..95ce4b601fef 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -176,8 +176,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
176 | * P2P Device discoveribility, while there are other higher priority | 176 | * P2P Device discoveribility, while there are other higher priority |
177 | * events in the system). | 177 | * events in the system). |
178 | */ | 178 | */ |
179 | if (WARN_ONCE(!le32_to_cpu(notif->status), | 179 | if (!le32_to_cpu(notif->status)) { |
180 | "Failed to schedule time event\n")) { | 180 | bool start = le32_to_cpu(notif->action) & |
181 | TE_V2_NOTIF_HOST_EVENT_START; | ||
182 | IWL_WARN(mvm, "Time Event %s notification failure\n", | ||
183 | start ? "start" : "end"); | ||
181 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { | 184 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { |
182 | iwl_mvm_te_clear_data(mvm, te_data); | 185 | iwl_mvm_te_clear_data(mvm, te_data); |
183 | return; | 186 | return; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 941c0c88f982..e6272546395a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -353,6 +353,33 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
353 | 353 | ||
354 | /* 7265 Series */ | 354 | /* 7265 Series */ |
355 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 355 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
356 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | ||
357 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, | ||
358 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, | ||
359 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, | ||
360 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, | ||
361 | {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, | ||
362 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, | ||
363 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, | ||
364 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, | ||
365 | {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, | ||
366 | {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, | ||
367 | {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, | ||
368 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, | ||
369 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, | ||
370 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, | ||
371 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, | ||
372 | {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, | ||
373 | {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, | ||
374 | {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, | ||
375 | {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, | ||
376 | {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, | ||
377 | {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, | ||
378 | {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, | ||
379 | {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, | ||
380 | {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, | ||
381 | {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, | ||
382 | {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, | ||
356 | #endif /* CONFIG_IWLMVM */ | 383 | #endif /* CONFIG_IWLMVM */ |
357 | 384 | ||
358 | {0} | 385 | {0} |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index fa22639b63c9..051268c037b1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -477,4 +477,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) | |||
477 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); | 477 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); |
478 | } | 478 | } |
479 | 479 | ||
480 | static inline void iwl_nic_error(struct iwl_trans *trans) | ||
481 | { | ||
482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
483 | |||
484 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); | ||
485 | iwl_op_mode_nic_error(trans->op_mode); | ||
486 | } | ||
487 | |||
480 | #endif /* __iwl_trans_int_pcie_h__ */ | 488 | #endif /* __iwl_trans_int_pcie_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 3f237b42eb36..be3995afa9d0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) | |||
489 | 489 | ||
490 | /* Set interrupt coalescing timer to default (2048 usecs) */ | 490 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
491 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | 491 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); |
492 | |||
493 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ | ||
494 | if (trans->cfg->host_interrupt_operation_mode) | ||
495 | iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); | ||
492 | } | 496 | } |
493 | 497 | ||
494 | static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) | 498 | static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) |
@@ -796,12 +800,13 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) | |||
796 | iwl_pcie_dump_csr(trans); | 800 | iwl_pcie_dump_csr(trans); |
797 | iwl_dump_fh(trans, NULL); | 801 | iwl_dump_fh(trans, NULL); |
798 | 802 | ||
803 | /* set the ERROR bit before we wake up the caller */ | ||
799 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); | 804 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); |
800 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 805 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
801 | wake_up(&trans_pcie->wait_command_queue); | 806 | wake_up(&trans_pcie->wait_command_queue); |
802 | 807 | ||
803 | local_bh_disable(); | 808 | local_bh_disable(); |
804 | iwl_op_mode_nic_error(trans->op_mode); | 809 | iwl_nic_error(trans); |
805 | local_bh_enable(); | 810 | local_bh_enable(); |
806 | } | 811 | } |
807 | 812 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 5d9337bec67a..cde9c16f6e4f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -279,9 +279,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) | |||
279 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | 279 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
280 | iwl_pcie_apm_init(trans); | 280 | iwl_pcie_apm_init(trans); |
281 | 281 | ||
282 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
283 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
284 | |||
285 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 282 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
286 | 283 | ||
287 | iwl_pcie_set_pwr(trans, false); | 284 | iwl_pcie_set_pwr(trans, false); |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 059c5acad3a0..0adde919a258 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) | |||
207 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | 207 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, |
208 | le32_to_cpu(txq->scratchbufs[i].scratch)); | 208 | le32_to_cpu(txq->scratchbufs[i].scratch)); |
209 | 209 | ||
210 | iwl_op_mode_nic_error(trans->op_mode); | 210 | iwl_nic_error(trans); |
211 | } | 211 | } |
212 | 212 | ||
213 | /* | 213 | /* |
@@ -1023,7 +1023,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) | |||
1023 | if (nfreed++ > 0) { | 1023 | if (nfreed++ > 0) { |
1024 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | 1024 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", |
1025 | idx, q->write_ptr, q->read_ptr); | 1025 | idx, q->write_ptr, q->read_ptr); |
1026 | iwl_op_mode_nic_error(trans->op_mode); | 1026 | iwl_nic_error(trans); |
1027 | } | 1027 | } |
1028 | } | 1028 | } |
1029 | 1029 | ||
@@ -1562,7 +1562,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, | |||
1562 | get_cmd_string(trans_pcie, cmd->id)); | 1562 | get_cmd_string(trans_pcie, cmd->id)); |
1563 | ret = -ETIMEDOUT; | 1563 | ret = -ETIMEDOUT; |
1564 | 1564 | ||
1565 | iwl_op_mode_nic_error(trans->op_mode); | 1565 | iwl_nic_error(trans); |
1566 | 1566 | ||
1567 | goto cancel; | 1567 | goto cancel; |
1568 | } | 1568 | } |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9df7bc91a26f..a1b32ee9594a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -383,6 +383,14 @@ struct hwsim_radiotap_hdr { | |||
383 | __le16 rt_chbitmask; | 383 | __le16 rt_chbitmask; |
384 | } __packed; | 384 | } __packed; |
385 | 385 | ||
386 | struct hwsim_radiotap_ack_hdr { | ||
387 | struct ieee80211_radiotap_header hdr; | ||
388 | u8 rt_flags; | ||
389 | u8 pad; | ||
390 | __le16 rt_channel; | ||
391 | __le16 rt_chbitmask; | ||
392 | } __packed; | ||
393 | |||
386 | /* MAC80211_HWSIM netlinf family */ | 394 | /* MAC80211_HWSIM netlinf family */ |
387 | static struct genl_family hwsim_genl_family = { | 395 | static struct genl_family hwsim_genl_family = { |
388 | .id = GENL_ID_GENERATE, | 396 | .id = GENL_ID_GENERATE, |
@@ -500,7 +508,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, | |||
500 | const u8 *addr) | 508 | const u8 *addr) |
501 | { | 509 | { |
502 | struct sk_buff *skb; | 510 | struct sk_buff *skb; |
503 | struct hwsim_radiotap_hdr *hdr; | 511 | struct hwsim_radiotap_ack_hdr *hdr; |
504 | u16 flags; | 512 | u16 flags; |
505 | struct ieee80211_hdr *hdr11; | 513 | struct ieee80211_hdr *hdr11; |
506 | 514 | ||
@@ -511,14 +519,14 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, | |||
511 | if (skb == NULL) | 519 | if (skb == NULL) |
512 | return; | 520 | return; |
513 | 521 | ||
514 | hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr)); | 522 | hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr)); |
515 | hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; | 523 | hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; |
516 | hdr->hdr.it_pad = 0; | 524 | hdr->hdr.it_pad = 0; |
517 | hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); | 525 | hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); |
518 | hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | 526 | hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | |
519 | (1 << IEEE80211_RADIOTAP_CHANNEL)); | 527 | (1 << IEEE80211_RADIOTAP_CHANNEL)); |
520 | hdr->rt_flags = 0; | 528 | hdr->rt_flags = 0; |
521 | hdr->rt_rate = 0; | 529 | hdr->pad = 0; |
522 | hdr->rt_channel = cpu_to_le16(chan->center_freq); | 530 | hdr->rt_channel = cpu_to_le16(chan->center_freq); |
523 | flags = IEEE80211_CHAN_2GHZ; | 531 | flags = IEEE80211_CHAN_2GHZ; |
524 | hdr->rt_chbitmask = cpu_to_le16(flags); | 532 | hdr->rt_chbitmask = cpu_to_le16(flags); |
@@ -1230,7 +1238,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, | |||
1230 | HRTIMER_MODE_REL); | 1238 | HRTIMER_MODE_REL); |
1231 | } else if (!info->enable_beacon) { | 1239 | } else if (!info->enable_beacon) { |
1232 | unsigned int count = 0; | 1240 | unsigned int count = 0; |
1233 | ieee80211_iterate_active_interfaces( | 1241 | ieee80211_iterate_active_interfaces_atomic( |
1234 | data->hw, IEEE80211_IFACE_ITER_NORMAL, | 1242 | data->hw, IEEE80211_IFACE_ITER_NORMAL, |
1235 | mac80211_hwsim_bcn_en_iter, &count); | 1243 | mac80211_hwsim_bcn_en_iter, &count); |
1236 | wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", | 1244 | wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", |
@@ -2003,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
2003 | (hwsim_flags & HWSIM_TX_STAT_ACK)) { | 2011 | (hwsim_flags & HWSIM_TX_STAT_ACK)) { |
2004 | if (skb->len >= 16) { | 2012 | if (skb->len >= 16) { |
2005 | hdr = (struct ieee80211_hdr *) skb->data; | 2013 | hdr = (struct ieee80211_hdr *) skb->data; |
2006 | mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], | 2014 | mac80211_hwsim_monitor_ack(data2->channel, |
2007 | hdr->addr2); | 2015 | hdr->addr2); |
2008 | } | 2016 | } |
2009 | txi->flags |= IEEE80211_TX_STAT_ACK; | 2017 | txi->flags |= IEEE80211_TX_STAT_ACK; |
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 78e8a6666cc6..8bb8988c435c 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) | |||
746 | } | 746 | } |
747 | 747 | ||
748 | static u16 | 748 | static u16 |
749 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) | 749 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, |
750 | void *accel_priv) | ||
750 | { | 751 | { |
751 | skb->priority = cfg80211_classify8021d(skb); | 752 | skb->priority = cfg80211_classify8021d(skb); |
752 | return mwifiex_1d_to_wmm_queue[skb->priority]; | 753 | return mwifiex_1d_to_wmm_queue[skb->priority]; |
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index c8e029df770e..a09398fe9e2a 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c | |||
@@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, | |||
319 | if (bss_desc && bss_desc->ssid.ssid_len && | 319 | if (bss_desc && bss_desc->ssid.ssid_len && |
320 | (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. | 320 | (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. |
321 | ssid, &bss_desc->ssid))) { | 321 | ssid, &bss_desc->ssid))) { |
322 | kfree(bss_desc); | 322 | ret = 0; |
323 | return 0; | 323 | goto done; |
324 | } | 324 | } |
325 | 325 | ||
326 | /* Exit Adhoc mode first */ | 326 | /* Exit Adhoc mode first */ |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 0f494444bcd1..5a53195d016b 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
740 | }; | 740 | }; |
741 | int index = rtlpci->rx_ring[rx_queue_idx].idx; | 741 | int index = rtlpci->rx_ring[rx_queue_idx].idx; |
742 | 742 | ||
743 | if (rtlpci->driver_is_goingto_unload) | ||
744 | return; | ||
743 | /*RX NORMAL PKT */ | 745 | /*RX NORMAL PKT */ |
744 | while (count--) { | 746 | while (count--) { |
745 | /*rx descriptor */ | 747 | /*rx descriptor */ |
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) | |||
1636 | */ | 1638 | */ |
1637 | set_hal_stop(rtlhal); | 1639 | set_hal_stop(rtlhal); |
1638 | 1640 | ||
1641 | rtlpci->driver_is_goingto_unload = true; | ||
1639 | rtlpriv->cfg->ops->disable_interrupt(hw); | 1642 | rtlpriv->cfg->ops->disable_interrupt(hw); |
1640 | cancel_work_sync(&rtlpriv->works.lps_change_work); | 1643 | cancel_work_sync(&rtlpriv->works.lps_change_work); |
1641 | 1644 | ||
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) | |||
1653 | ppsc->rfchange_inprogress = true; | 1656 | ppsc->rfchange_inprogress = true; |
1654 | spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); | 1657 | spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); |
1655 | 1658 | ||
1656 | rtlpci->driver_is_goingto_unload = true; | ||
1657 | rtlpriv->cfg->ops->hw_disable(hw); | 1659 | rtlpriv->cfg->ops->hw_disable(hw); |
1658 | /* some things are not needed if firmware not available */ | 1660 | /* some things are not needed if firmware not available */ |
1659 | if (!rtlpriv->max_fw_size) | 1661 | if (!rtlpriv->max_fw_size) |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 08ae01b41c83..c47794b9d42f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -101,6 +101,13 @@ struct xenvif_rx_meta { | |||
101 | 101 | ||
102 | #define MAX_PENDING_REQS 256 | 102 | #define MAX_PENDING_REQS 256 |
103 | 103 | ||
104 | /* It's possible for an skb to have a maximal number of frags | ||
105 | * but still be less than MAX_BUFFER_OFFSET in size. Thus the | ||
106 | * worst-case number of copy operations is MAX_SKB_FRAGS per | ||
107 | * ring slot. | ||
108 | */ | ||
109 | #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) | ||
110 | |||
104 | struct xenvif { | 111 | struct xenvif { |
105 | /* Unique identifier for this interface. */ | 112 | /* Unique identifier for this interface. */ |
106 | domid_t domid; | 113 | domid_t domid; |
@@ -143,13 +150,13 @@ struct xenvif { | |||
143 | */ | 150 | */ |
144 | RING_IDX rx_req_cons_peek; | 151 | RING_IDX rx_req_cons_peek; |
145 | 152 | ||
146 | /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each | 153 | /* This array is allocated seperately as it is large */ |
147 | * head/fragment page uses 2 copy operations because it | 154 | struct gnttab_copy *grant_copy_op; |
148 | * straddles two buffers in the frontend. | ||
149 | */ | ||
150 | struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; | ||
151 | struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; | ||
152 | 155 | ||
156 | /* We create one meta structure per ring request we consume, so | ||
157 | * the maximum number is the same as the ring size. | ||
158 | */ | ||
159 | struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; | ||
153 | 160 | ||
154 | u8 fe_dev_addr[6]; | 161 | u8 fe_dev_addr[6]; |
155 | 162 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 2329cccf1fa6..fff8cddfed81 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
35 | #include <linux/rtnetlink.h> | 35 | #include <linux/rtnetlink.h> |
36 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
37 | #include <linux/vmalloc.h> | ||
37 | 38 | ||
38 | #include <xen/events.h> | 39 | #include <xen/events.h> |
39 | #include <asm/xen/hypercall.h> | 40 | #include <asm/xen/hypercall.h> |
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
307 | SET_NETDEV_DEV(dev, parent); | 308 | SET_NETDEV_DEV(dev, parent); |
308 | 309 | ||
309 | vif = netdev_priv(dev); | 310 | vif = netdev_priv(dev); |
311 | |||
312 | vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * | ||
313 | MAX_GRANT_COPY_OPS); | ||
314 | if (vif->grant_copy_op == NULL) { | ||
315 | pr_warn("Could not allocate grant copy space for %s\n", name); | ||
316 | free_netdev(dev); | ||
317 | return ERR_PTR(-ENOMEM); | ||
318 | } | ||
319 | |||
310 | vif->domid = domid; | 320 | vif->domid = domid; |
311 | vif->handle = handle; | 321 | vif->handle = handle; |
312 | vif->can_sg = 1; | 322 | vif->can_sg = 1; |
@@ -368,11 +378,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
368 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 378 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
369 | unsigned int rx_evtchn) | 379 | unsigned int rx_evtchn) |
370 | { | 380 | { |
381 | struct task_struct *task; | ||
371 | int err = -ENOMEM; | 382 | int err = -ENOMEM; |
372 | 383 | ||
373 | /* Already connected through? */ | 384 | BUG_ON(vif->tx_irq); |
374 | if (vif->tx_irq) | 385 | BUG_ON(vif->task); |
375 | return 0; | ||
376 | 386 | ||
377 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); | 387 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); |
378 | if (err < 0) | 388 | if (err < 0) |
@@ -411,14 +421,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
411 | } | 421 | } |
412 | 422 | ||
413 | init_waitqueue_head(&vif->wq); | 423 | init_waitqueue_head(&vif->wq); |
414 | vif->task = kthread_create(xenvif_kthread, | 424 | task = kthread_create(xenvif_kthread, |
415 | (void *)vif, "%s", vif->dev->name); | 425 | (void *)vif, "%s", vif->dev->name); |
416 | if (IS_ERR(vif->task)) { | 426 | if (IS_ERR(task)) { |
417 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 427 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); |
418 | err = PTR_ERR(vif->task); | 428 | err = PTR_ERR(task); |
419 | goto err_rx_unbind; | 429 | goto err_rx_unbind; |
420 | } | 430 | } |
421 | 431 | ||
432 | vif->task = task; | ||
433 | |||
422 | rtnl_lock(); | 434 | rtnl_lock(); |
423 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 435 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
424 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 436 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
@@ -461,8 +473,10 @@ void xenvif_disconnect(struct xenvif *vif) | |||
461 | if (netif_carrier_ok(vif->dev)) | 473 | if (netif_carrier_ok(vif->dev)) |
462 | xenvif_carrier_off(vif); | 474 | xenvif_carrier_off(vif); |
463 | 475 | ||
464 | if (vif->task) | 476 | if (vif->task) { |
465 | kthread_stop(vif->task); | 477 | kthread_stop(vif->task); |
478 | vif->task = NULL; | ||
479 | } | ||
466 | 480 | ||
467 | if (vif->tx_irq) { | 481 | if (vif->tx_irq) { |
468 | if (vif->tx_irq == vif->rx_irq) | 482 | if (vif->tx_irq == vif->rx_irq) |
@@ -483,6 +497,7 @@ void xenvif_free(struct xenvif *vif) | |||
483 | 497 | ||
484 | unregister_netdev(vif->dev); | 498 | unregister_netdev(vif->dev); |
485 | 499 | ||
500 | vfree(vif->grant_copy_op); | ||
486 | free_netdev(vif->dev); | 501 | free_netdev(vif->dev); |
487 | 502 | ||
488 | module_put(THIS_MODULE); | 503 | module_put(THIS_MODULE); |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 64f0e0d18b81..78425554a537 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -452,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | /* Set up a GSO prefix descriptor, if necessary */ | 454 | /* Set up a GSO prefix descriptor, if necessary */ |
455 | if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { | 455 | if ((1 << gso_type) & vif->gso_prefix_mask) { |
456 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 456 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); |
457 | meta = npo->meta + npo->meta_prod++; | 457 | meta = npo->meta + npo->meta_prod++; |
458 | meta->gso_type = gso_type; | 458 | meta->gso_type = gso_type; |
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif) | |||
608 | if (!npo.copy_prod) | 608 | if (!npo.copy_prod) |
609 | return; | 609 | return; |
610 | 610 | ||
611 | BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); | 611 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); |
612 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); | 612 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); |
613 | 613 | ||
614 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | 614 | while ((skb = __skb_dequeue(&rxq)) != NULL) { |
@@ -1149,75 +1149,99 @@ static int xenvif_set_skb_gso(struct xenvif *vif, | |||
1149 | return 0; | 1149 | return 0; |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) | 1152 | static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len, |
1153 | unsigned int max) | ||
1153 | { | 1154 | { |
1154 | if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { | 1155 | if (skb_headlen(skb) >= len) |
1155 | /* If we need to pullup then pullup to the max, so we | 1156 | return 0; |
1156 | * won't need to do it again. | 1157 | |
1157 | */ | 1158 | /* If we need to pullup then pullup to the max, so we |
1158 | int target = min_t(int, skb->len, MAX_TCP_HEADER); | 1159 | * won't need to do it again. |
1159 | __pskb_pull_tail(skb, target - skb_headlen(skb)); | 1160 | */ |
1160 | } | 1161 | if (max > skb->len) |
1162 | max = skb->len; | ||
1163 | |||
1164 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) | ||
1165 | return -ENOMEM; | ||
1166 | |||
1167 | if (skb_headlen(skb) < len) | ||
1168 | return -EPROTO; | ||
1169 | |||
1170 | return 0; | ||
1161 | } | 1171 | } |
1162 | 1172 | ||
1173 | /* This value should be large enough to cover a tagged ethernet header plus | ||
1174 | * maximally sized IP and TCP or UDP headers. | ||
1175 | */ | ||
1176 | #define MAX_IP_HDR_LEN 128 | ||
1177 | |||
1163 | static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, | 1178 | static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, |
1164 | int recalculate_partial_csum) | 1179 | int recalculate_partial_csum) |
1165 | { | 1180 | { |
1166 | struct iphdr *iph = (void *)skb->data; | ||
1167 | unsigned int header_size; | ||
1168 | unsigned int off; | 1181 | unsigned int off; |
1169 | int err = -EPROTO; | 1182 | bool fragment; |
1183 | int err; | ||
1170 | 1184 | ||
1171 | off = sizeof(struct iphdr); | 1185 | fragment = false; |
1172 | 1186 | ||
1173 | header_size = skb->network_header + off + MAX_IPOPTLEN; | 1187 | err = maybe_pull_tail(skb, |
1174 | maybe_pull_tail(skb, header_size); | 1188 | sizeof(struct iphdr), |
1189 | MAX_IP_HDR_LEN); | ||
1190 | if (err < 0) | ||
1191 | goto out; | ||
1175 | 1192 | ||
1176 | off = iph->ihl * 4; | 1193 | if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) |
1194 | fragment = true; | ||
1177 | 1195 | ||
1178 | switch (iph->protocol) { | 1196 | off = ip_hdrlen(skb); |
1179 | case IPPROTO_TCP: | ||
1180 | if (!skb_partial_csum_set(skb, off, | ||
1181 | offsetof(struct tcphdr, check))) | ||
1182 | goto out; | ||
1183 | 1197 | ||
1184 | if (recalculate_partial_csum) { | 1198 | err = -EPROTO; |
1185 | struct tcphdr *tcph = tcp_hdr(skb); | 1199 | |
1200 | if (fragment) | ||
1201 | goto out; | ||
1186 | 1202 | ||
1187 | header_size = skb->network_header + | 1203 | switch (ip_hdr(skb)->protocol) { |
1188 | off + | 1204 | case IPPROTO_TCP: |
1189 | sizeof(struct tcphdr); | 1205 | err = maybe_pull_tail(skb, |
1190 | maybe_pull_tail(skb, header_size); | 1206 | off + sizeof(struct tcphdr), |
1207 | MAX_IP_HDR_LEN); | ||
1208 | if (err < 0) | ||
1209 | goto out; | ||
1191 | 1210 | ||
1192 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | 1211 | if (!skb_partial_csum_set(skb, off, |
1193 | skb->len - off, | 1212 | offsetof(struct tcphdr, check))) { |
1194 | IPPROTO_TCP, 0); | 1213 | err = -EPROTO; |
1214 | goto out; | ||
1195 | } | 1215 | } |
1216 | |||
1217 | if (recalculate_partial_csum) | ||
1218 | tcp_hdr(skb)->check = | ||
1219 | ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
1220 | ip_hdr(skb)->daddr, | ||
1221 | skb->len - off, | ||
1222 | IPPROTO_TCP, 0); | ||
1196 | break; | 1223 | break; |
1197 | case IPPROTO_UDP: | 1224 | case IPPROTO_UDP: |
1198 | if (!skb_partial_csum_set(skb, off, | 1225 | err = maybe_pull_tail(skb, |
1199 | offsetof(struct udphdr, check))) | 1226 | off + sizeof(struct udphdr), |
1227 | MAX_IP_HDR_LEN); | ||
1228 | if (err < 0) | ||
1200 | goto out; | 1229 | goto out; |
1201 | 1230 | ||
1202 | if (recalculate_partial_csum) { | 1231 | if (!skb_partial_csum_set(skb, off, |
1203 | struct udphdr *udph = udp_hdr(skb); | 1232 | offsetof(struct udphdr, check))) { |
1204 | 1233 | err = -EPROTO; | |
1205 | header_size = skb->network_header + | 1234 | goto out; |
1206 | off + | ||
1207 | sizeof(struct udphdr); | ||
1208 | maybe_pull_tail(skb, header_size); | ||
1209 | |||
1210 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
1211 | skb->len - off, | ||
1212 | IPPROTO_UDP, 0); | ||
1213 | } | 1235 | } |
1236 | |||
1237 | if (recalculate_partial_csum) | ||
1238 | udp_hdr(skb)->check = | ||
1239 | ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
1240 | ip_hdr(skb)->daddr, | ||
1241 | skb->len - off, | ||
1242 | IPPROTO_UDP, 0); | ||
1214 | break; | 1243 | break; |
1215 | default: | 1244 | default: |
1216 | if (net_ratelimit()) | ||
1217 | netdev_err(vif->dev, | ||
1218 | "Attempting to checksum a non-TCP/UDP packet, " | ||
1219 | "dropping a protocol %d packet\n", | ||
1220 | iph->protocol); | ||
1221 | goto out; | 1245 | goto out; |
1222 | } | 1246 | } |
1223 | 1247 | ||
@@ -1227,121 +1251,142 @@ out: | |||
1227 | return err; | 1251 | return err; |
1228 | } | 1252 | } |
1229 | 1253 | ||
1254 | /* This value should be large enough to cover a tagged ethernet header plus | ||
1255 | * an IPv6 header, all options, and a maximal TCP or UDP header. | ||
1256 | */ | ||
1257 | #define MAX_IPV6_HDR_LEN 256 | ||
1258 | |||
1259 | #define OPT_HDR(type, skb, off) \ | ||
1260 | (type *)(skb_network_header(skb) + (off)) | ||
1261 | |||
1230 | static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, | 1262 | static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, |
1231 | int recalculate_partial_csum) | 1263 | int recalculate_partial_csum) |
1232 | { | 1264 | { |
1233 | int err = -EPROTO; | 1265 | int err; |
1234 | struct ipv6hdr *ipv6h = (void *)skb->data; | ||
1235 | u8 nexthdr; | 1266 | u8 nexthdr; |
1236 | unsigned int header_size; | ||
1237 | unsigned int off; | 1267 | unsigned int off; |
1268 | unsigned int len; | ||
1238 | bool fragment; | 1269 | bool fragment; |
1239 | bool done; | 1270 | bool done; |
1240 | 1271 | ||
1272 | fragment = false; | ||
1241 | done = false; | 1273 | done = false; |
1242 | 1274 | ||
1243 | off = sizeof(struct ipv6hdr); | 1275 | off = sizeof(struct ipv6hdr); |
1244 | 1276 | ||
1245 | header_size = skb->network_header + off; | 1277 | err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); |
1246 | maybe_pull_tail(skb, header_size); | 1278 | if (err < 0) |
1279 | goto out; | ||
1247 | 1280 | ||
1248 | nexthdr = ipv6h->nexthdr; | 1281 | nexthdr = ipv6_hdr(skb)->nexthdr; |
1249 | 1282 | ||
1250 | while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && | 1283 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); |
1251 | !done) { | 1284 | while (off <= len && !done) { |
1252 | switch (nexthdr) { | 1285 | switch (nexthdr) { |
1253 | case IPPROTO_DSTOPTS: | 1286 | case IPPROTO_DSTOPTS: |
1254 | case IPPROTO_HOPOPTS: | 1287 | case IPPROTO_HOPOPTS: |
1255 | case IPPROTO_ROUTING: { | 1288 | case IPPROTO_ROUTING: { |
1256 | struct ipv6_opt_hdr *hp = (void *)(skb->data + off); | 1289 | struct ipv6_opt_hdr *hp; |
1257 | 1290 | ||
1258 | header_size = skb->network_header + | 1291 | err = maybe_pull_tail(skb, |
1259 | off + | 1292 | off + |
1260 | sizeof(struct ipv6_opt_hdr); | 1293 | sizeof(struct ipv6_opt_hdr), |
1261 | maybe_pull_tail(skb, header_size); | 1294 | MAX_IPV6_HDR_LEN); |
1295 | if (err < 0) | ||
1296 | goto out; | ||
1262 | 1297 | ||
1298 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); | ||
1263 | nexthdr = hp->nexthdr; | 1299 | nexthdr = hp->nexthdr; |
1264 | off += ipv6_optlen(hp); | 1300 | off += ipv6_optlen(hp); |
1265 | break; | 1301 | break; |
1266 | } | 1302 | } |
1267 | case IPPROTO_AH: { | 1303 | case IPPROTO_AH: { |
1268 | struct ip_auth_hdr *hp = (void *)(skb->data + off); | 1304 | struct ip_auth_hdr *hp; |
1269 | 1305 | ||
1270 | header_size = skb->network_header + | 1306 | err = maybe_pull_tail(skb, |
1271 | off + | 1307 | off + |
1272 | sizeof(struct ip_auth_hdr); | 1308 | sizeof(struct ip_auth_hdr), |
1273 | maybe_pull_tail(skb, header_size); | 1309 | MAX_IPV6_HDR_LEN); |
1310 | if (err < 0) | ||
1311 | goto out; | ||
1274 | 1312 | ||
1313 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); | ||
1275 | nexthdr = hp->nexthdr; | 1314 | nexthdr = hp->nexthdr; |
1276 | off += (hp->hdrlen+2)<<2; | 1315 | off += ipv6_authlen(hp); |
1316 | break; | ||
1317 | } | ||
1318 | case IPPROTO_FRAGMENT: { | ||
1319 | struct frag_hdr *hp; | ||
1320 | |||
1321 | err = maybe_pull_tail(skb, | ||
1322 | off + | ||
1323 | sizeof(struct frag_hdr), | ||
1324 | MAX_IPV6_HDR_LEN); | ||
1325 | if (err < 0) | ||
1326 | goto out; | ||
1327 | |||
1328 | hp = OPT_HDR(struct frag_hdr, skb, off); | ||
1329 | |||
1330 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) | ||
1331 | fragment = true; | ||
1332 | |||
1333 | nexthdr = hp->nexthdr; | ||
1334 | off += sizeof(struct frag_hdr); | ||
1277 | break; | 1335 | break; |
1278 | } | 1336 | } |
1279 | case IPPROTO_FRAGMENT: | ||
1280 | fragment = true; | ||
1281 | /* fall through */ | ||
1282 | default: | 1337 | default: |
1283 | done = true; | 1338 | done = true; |
1284 | break; | 1339 | break; |
1285 | } | 1340 | } |
1286 | } | 1341 | } |
1287 | 1342 | ||
1288 | if (!done) { | 1343 | err = -EPROTO; |
1289 | if (net_ratelimit()) | ||
1290 | netdev_err(vif->dev, "Failed to parse packet header\n"); | ||
1291 | goto out; | ||
1292 | } | ||
1293 | 1344 | ||
1294 | if (fragment) { | 1345 | if (!done || fragment) |
1295 | if (net_ratelimit()) | ||
1296 | netdev_err(vif->dev, "Packet is a fragment!\n"); | ||
1297 | goto out; | 1346 | goto out; |
1298 | } | ||
1299 | 1347 | ||
1300 | switch (nexthdr) { | 1348 | switch (nexthdr) { |
1301 | case IPPROTO_TCP: | 1349 | case IPPROTO_TCP: |
1302 | if (!skb_partial_csum_set(skb, off, | 1350 | err = maybe_pull_tail(skb, |
1303 | offsetof(struct tcphdr, check))) | 1351 | off + sizeof(struct tcphdr), |
1352 | MAX_IPV6_HDR_LEN); | ||
1353 | if (err < 0) | ||
1304 | goto out; | 1354 | goto out; |
1305 | 1355 | ||
1306 | if (recalculate_partial_csum) { | 1356 | if (!skb_partial_csum_set(skb, off, |
1307 | struct tcphdr *tcph = tcp_hdr(skb); | 1357 | offsetof(struct tcphdr, check))) { |
1308 | 1358 | err = -EPROTO; | |
1309 | header_size = skb->network_header + | 1359 | goto out; |
1310 | off + | ||
1311 | sizeof(struct tcphdr); | ||
1312 | maybe_pull_tail(skb, header_size); | ||
1313 | |||
1314 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, | ||
1315 | &ipv6h->daddr, | ||
1316 | skb->len - off, | ||
1317 | IPPROTO_TCP, 0); | ||
1318 | } | 1360 | } |
1361 | |||
1362 | if (recalculate_partial_csum) | ||
1363 | tcp_hdr(skb)->check = | ||
1364 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
1365 | &ipv6_hdr(skb)->daddr, | ||
1366 | skb->len - off, | ||
1367 | IPPROTO_TCP, 0); | ||
1319 | break; | 1368 | break; |
1320 | case IPPROTO_UDP: | 1369 | case IPPROTO_UDP: |
1321 | if (!skb_partial_csum_set(skb, off, | 1370 | err = maybe_pull_tail(skb, |
1322 | offsetof(struct udphdr, check))) | 1371 | off + sizeof(struct udphdr), |
1372 | MAX_IPV6_HDR_LEN); | ||
1373 | if (err < 0) | ||
1323 | goto out; | 1374 | goto out; |
1324 | 1375 | ||
1325 | if (recalculate_partial_csum) { | 1376 | if (!skb_partial_csum_set(skb, off, |
1326 | struct udphdr *udph = udp_hdr(skb); | 1377 | offsetof(struct udphdr, check))) { |
1327 | 1378 | err = -EPROTO; | |
1328 | header_size = skb->network_header + | 1379 | goto out; |
1329 | off + | ||
1330 | sizeof(struct udphdr); | ||
1331 | maybe_pull_tail(skb, header_size); | ||
1332 | |||
1333 | udph->check = ~csum_ipv6_magic(&ipv6h->saddr, | ||
1334 | &ipv6h->daddr, | ||
1335 | skb->len - off, | ||
1336 | IPPROTO_UDP, 0); | ||
1337 | } | 1380 | } |
1381 | |||
1382 | if (recalculate_partial_csum) | ||
1383 | udp_hdr(skb)->check = | ||
1384 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
1385 | &ipv6_hdr(skb)->daddr, | ||
1386 | skb->len - off, | ||
1387 | IPPROTO_UDP, 0); | ||
1338 | break; | 1388 | break; |
1339 | default: | 1389 | default: |
1340 | if (net_ratelimit()) | ||
1341 | netdev_err(vif->dev, | ||
1342 | "Attempting to checksum a non-TCP/UDP packet, " | ||
1343 | "dropping a protocol %d packet\n", | ||
1344 | nexthdr); | ||
1345 | goto out; | 1390 | goto out; |
1346 | } | 1391 | } |
1347 | 1392 | ||
@@ -1411,14 +1456,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | |||
1411 | return false; | 1456 | return false; |
1412 | } | 1457 | } |
1413 | 1458 | ||
1414 | static unsigned xenvif_tx_build_gops(struct xenvif *vif) | 1459 | static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) |
1415 | { | 1460 | { |
1416 | struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; | 1461 | struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; |
1417 | struct sk_buff *skb; | 1462 | struct sk_buff *skb; |
1418 | int ret; | 1463 | int ret; |
1419 | 1464 | ||
1420 | while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX | 1465 | while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX |
1421 | < MAX_PENDING_REQS)) { | 1466 | < MAX_PENDING_REQS) && |
1467 | (skb_queue_len(&vif->tx_queue) < budget)) { | ||
1422 | struct xen_netif_tx_request txreq; | 1468 | struct xen_netif_tx_request txreq; |
1423 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; | 1469 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; |
1424 | struct page *page; | 1470 | struct page *page; |
@@ -1440,7 +1486,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) | |||
1440 | continue; | 1486 | continue; |
1441 | } | 1487 | } |
1442 | 1488 | ||
1443 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); | 1489 | work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); |
1444 | if (!work_to_do) | 1490 | if (!work_to_do) |
1445 | break; | 1491 | break; |
1446 | 1492 | ||
@@ -1580,14 +1626,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) | |||
1580 | } | 1626 | } |
1581 | 1627 | ||
1582 | 1628 | ||
1583 | static int xenvif_tx_submit(struct xenvif *vif, int budget) | 1629 | static int xenvif_tx_submit(struct xenvif *vif) |
1584 | { | 1630 | { |
1585 | struct gnttab_copy *gop = vif->tx_copy_ops; | 1631 | struct gnttab_copy *gop = vif->tx_copy_ops; |
1586 | struct sk_buff *skb; | 1632 | struct sk_buff *skb; |
1587 | int work_done = 0; | 1633 | int work_done = 0; |
1588 | 1634 | ||
1589 | while (work_done < budget && | 1635 | while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { |
1590 | (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { | ||
1591 | struct xen_netif_tx_request *txp; | 1636 | struct xen_netif_tx_request *txp; |
1592 | u16 pending_idx; | 1637 | u16 pending_idx; |
1593 | unsigned data_len; | 1638 | unsigned data_len; |
@@ -1662,14 +1707,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget) | |||
1662 | if (unlikely(!tx_work_todo(vif))) | 1707 | if (unlikely(!tx_work_todo(vif))) |
1663 | return 0; | 1708 | return 0; |
1664 | 1709 | ||
1665 | nr_gops = xenvif_tx_build_gops(vif); | 1710 | nr_gops = xenvif_tx_build_gops(vif, budget); |
1666 | 1711 | ||
1667 | if (nr_gops == 0) | 1712 | if (nr_gops == 0) |
1668 | return 0; | 1713 | return 0; |
1669 | 1714 | ||
1670 | gnttab_batch_copy(vif->tx_copy_ops, nr_gops); | 1715 | gnttab_batch_copy(vif->tx_copy_ops, nr_gops); |
1671 | 1716 | ||
1672 | work_done = xenvif_tx_submit(vif, nr_gops); | 1717 | work_done = xenvif_tx_submit(vif); |
1673 | 1718 | ||
1674 | return work_done; | 1719 | return work_done; |
1675 | } | 1720 | } |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index de6f8990246f..c6973f101a3e 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -20,7 +20,7 @@ config OF_SELFTEST | |||
20 | depends on OF_IRQ | 20 | depends on OF_IRQ |
21 | help | 21 | help |
22 | This option builds in test cases for the device tree infrastructure | 22 | This option builds in test cases for the device tree infrastructure |
23 | that are executed one at boot time, and the results dumped to the | 23 | that are executed once at boot time, and the results dumped to the |
24 | console. | 24 | console. |
25 | 25 | ||
26 | If unsure, say N here, but this option is safe to enable. | 26 | If unsure, say N here, but this option is safe to enable. |
diff --git a/drivers/of/address.c b/drivers/of/address.c index 4b9317bdb81c..d3dd41c840f1 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range, | |||
69 | (unsigned long long)cp, (unsigned long long)s, | 69 | (unsigned long long)cp, (unsigned long long)s, |
70 | (unsigned long long)da); | 70 | (unsigned long long)da); |
71 | 71 | ||
72 | /* | ||
73 | * If the number of address cells is larger than 2 we assume the | ||
74 | * mapping doesn't specify a physical address. Rather, the address | ||
75 | * specifies an identifier that must match exactly. | ||
76 | */ | ||
77 | if (na > 2 && memcmp(range, addr, na * 4) != 0) | ||
78 | return OF_BAD_ADDR; | ||
79 | |||
80 | if (da < cp || da >= (cp + s)) | 72 | if (da < cp || da >= (cp + s)) |
81 | return OF_BAD_ADDR; | 73 | return OF_BAD_ADDR; |
82 | return da - cp; | 74 | return da - cp; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 2fa024b97c43..758b4f8b30b7 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void) | |||
922 | */ | 922 | */ |
923 | void __init unflatten_and_copy_device_tree(void) | 923 | void __init unflatten_and_copy_device_tree(void) |
924 | { | 924 | { |
925 | int size = __be32_to_cpu(initial_boot_params->totalsize); | 925 | int size; |
926 | void *dt = early_init_dt_alloc_memory_arch(size, | 926 | void *dt; |
927 | |||
928 | if (!initial_boot_params) { | ||
929 | pr_warn("No valid device tree found, continuing without\n"); | ||
930 | return; | ||
931 | } | ||
932 | |||
933 | size = __be32_to_cpu(initial_boot_params->totalsize); | ||
934 | dt = early_init_dt_alloc_memory_arch(size, | ||
927 | __alignof__(struct boot_param_header)); | 935 | __alignof__(struct boot_param_header)); |
928 | 936 | ||
929 | if (dt) { | 937 | if (dt) { |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 786b0b47fae4..27212402c532 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) | |||
165 | if (of_get_property(ipar, "interrupt-controller", NULL) != | 165 | if (of_get_property(ipar, "interrupt-controller", NULL) != |
166 | NULL) { | 166 | NULL) { |
167 | pr_debug(" -> got it !\n"); | 167 | pr_debug(" -> got it !\n"); |
168 | of_node_put(old); | ||
169 | return 0; | 168 | return 0; |
170 | } | 169 | } |
171 | 170 | ||
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) | |||
250 | * Successfully parsed an interrrupt-map translation; copy new | 249 | * Successfully parsed an interrrupt-map translation; copy new |
251 | * interrupt specifier into the out_irq structure | 250 | * interrupt specifier into the out_irq structure |
252 | */ | 251 | */ |
253 | of_node_put(out_irq->np); | 252 | out_irq->np = newpar; |
254 | out_irq->np = of_node_get(newpar); | ||
255 | 253 | ||
256 | match_array = imap - newaddrsize - newintsize; | 254 | match_array = imap - newaddrsize - newintsize; |
257 | for (i = 0; i < newintsize; i++) | 255 | for (i = 0; i < newintsize; i++) |
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) | |||
268 | } | 266 | } |
269 | fail: | 267 | fail: |
270 | of_node_put(ipar); | 268 | of_node_put(ipar); |
271 | of_node_put(out_irq->np); | ||
272 | of_node_put(newpar); | 269 | of_node_put(newpar); |
273 | 270 | ||
274 | return -EINVAL; | 271 | return -EINVAL; |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index c269e430c760..2aa7b77c7c88 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, | |||
447 | *value = 0; | 447 | *value = 0; |
448 | break; | 448 | break; |
449 | 449 | ||
450 | case PCI_INTERRUPT_LINE: | ||
451 | /* LINE PIN MIN_GNT MAX_LAT */ | ||
452 | *value = 0; | ||
453 | break; | ||
454 | |||
450 | default: | 455 | default: |
451 | *value = 0xffffffff; | 456 | *value = 0xffffffff; |
452 | return PCIBIOS_BAD_REGISTER_NUMBER; | 457 | return PCIBIOS_BAD_REGISTER_NUMBER; |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 1cf605f67673..e86439283a5d 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, | |||
279 | 279 | ||
280 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); | 280 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); |
281 | if (ACPI_FAILURE(status)) { | 281 | if (ACPI_FAILURE(status)) { |
282 | acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); | 282 | if (status != AE_NOT_FOUND) |
283 | acpi_handle_warn(handle, | ||
284 | "can't evaluate _ADR (%#x)\n", status); | ||
283 | return AE_OK; | 285 | return AE_OK; |
284 | } | 286 | } |
285 | 287 | ||
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot) | |||
643 | slot->flags &= (~SLOT_ENABLED); | 645 | slot->flags &= (~SLOT_ENABLED); |
644 | } | 646 | } |
645 | 647 | ||
648 | static bool acpiphp_no_hotplug(acpi_handle handle) | ||
649 | { | ||
650 | struct acpi_device *adev = NULL; | ||
651 | |||
652 | acpi_bus_get_device(handle, &adev); | ||
653 | return adev && adev->flags.no_hotplug; | ||
654 | } | ||
655 | |||
656 | static bool slot_no_hotplug(struct acpiphp_slot *slot) | ||
657 | { | ||
658 | struct acpiphp_func *func; | ||
659 | |||
660 | list_for_each_entry(func, &slot->funcs, sibling) | ||
661 | if (acpiphp_no_hotplug(func_to_handle(func))) | ||
662 | return true; | ||
663 | |||
664 | return false; | ||
665 | } | ||
646 | 666 | ||
647 | /** | 667 | /** |
648 | * get_slot_status - get ACPI slot status | 668 | * get_slot_status - get ACPI slot status |
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev) | |||
701 | unsigned long long sta; | 721 | unsigned long long sta; |
702 | 722 | ||
703 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 723 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
704 | alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; | 724 | alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) |
725 | || acpiphp_no_hotplug(handle); | ||
705 | } | 726 | } |
706 | if (!alive) { | 727 | if (!alive) { |
707 | u32 v; | 728 | u32 v; |
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
741 | struct pci_dev *dev, *tmp; | 762 | struct pci_dev *dev, *tmp; |
742 | 763 | ||
743 | mutex_lock(&slot->crit_sect); | 764 | mutex_lock(&slot->crit_sect); |
744 | /* wake up all functions */ | 765 | if (slot_no_hotplug(slot)) { |
745 | if (get_slot_status(slot) == ACPI_STA_ALL) { | 766 | ; /* do nothing */ |
767 | } else if (get_slot_status(slot) == ACPI_STA_ALL) { | ||
746 | /* remove stale devices if any */ | 768 | /* remove stale devices if any */ |
747 | list_for_each_entry_safe(dev, tmp, &bus->devices, | 769 | list_for_each_entry_safe(dev, tmp, &bus->devices, |
748 | bus_list) | 770 | bus_list) |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 577074efbe62..f7ebdba14bde 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) | |||
330 | static void pci_acpi_setup(struct device *dev) | 330 | static void pci_acpi_setup(struct device *dev) |
331 | { | 331 | { |
332 | struct pci_dev *pci_dev = to_pci_dev(dev); | 332 | struct pci_dev *pci_dev = to_pci_dev(dev); |
333 | acpi_handle handle = ACPI_HANDLE(dev); | 333 | struct acpi_device *adev = ACPI_COMPANION(dev); |
334 | struct acpi_device *adev; | ||
335 | 334 | ||
336 | if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) | 335 | if (!adev) |
336 | return; | ||
337 | |||
338 | pci_acpi_add_pm_notifier(adev, pci_dev); | ||
339 | if (!adev->wakeup.flags.valid) | ||
337 | return; | 340 | return; |
338 | 341 | ||
339 | device_set_wakeup_capable(dev, true); | 342 | device_set_wakeup_capable(dev, true); |
340 | acpi_pci_sleep_wake(pci_dev, false); | 343 | acpi_pci_sleep_wake(pci_dev, false); |
341 | |||
342 | pci_acpi_add_pm_notifier(adev, pci_dev); | ||
343 | if (adev->wakeup.flags.run_wake) | 344 | if (adev->wakeup.flags.run_wake) |
344 | device_set_run_wake(dev, true); | 345 | device_set_run_wake(dev, true); |
345 | } | 346 | } |
346 | 347 | ||
347 | static void pci_acpi_cleanup(struct device *dev) | 348 | static void pci_acpi_cleanup(struct device *dev) |
348 | { | 349 | { |
349 | acpi_handle handle = ACPI_HANDLE(dev); | 350 | struct acpi_device *adev = ACPI_COMPANION(dev); |
350 | struct acpi_device *adev; | 351 | |
352 | if (!adev) | ||
353 | return; | ||
351 | 354 | ||
352 | if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { | 355 | pci_acpi_remove_pm_notifier(adev); |
356 | if (adev->wakeup.flags.valid) { | ||
353 | device_set_wakeup_capable(dev, false); | 357 | device_set_wakeup_capable(dev, false); |
354 | device_set_run_wake(dev, false); | 358 | device_set_run_wake(dev, false); |
355 | pci_acpi_remove_pm_notifier(adev); | ||
356 | } | 359 | } |
357 | } | 360 | } |
358 | 361 | ||
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 9042fdbd7244..25f0bc659164 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/pm_runtime.h> | 20 | #include <linux/pm_runtime.h> |
21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
22 | #include <linux/kexec.h> | ||
22 | #include "pci.h" | 23 | #include "pci.h" |
23 | 24 | ||
24 | struct pci_dynid { | 25 | struct pci_dynid { |
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | |||
288 | int error, node; | 289 | int error, node; |
289 | struct drv_dev_and_id ddi = { drv, dev, id }; | 290 | struct drv_dev_and_id ddi = { drv, dev, id }; |
290 | 291 | ||
291 | /* Execute driver initialization on node where the device's | 292 | /* |
292 | bus is attached to. This way the driver likely allocates | 293 | * Execute driver initialization on node where the device is |
293 | its local memory on the right node without any need to | 294 | * attached. This way the driver likely allocates its local memory |
294 | change it. */ | 295 | * on the right node. |
296 | */ | ||
295 | node = dev_to_node(&dev->dev); | 297 | node = dev_to_node(&dev->dev); |
296 | if (node >= 0) { | 298 | |
299 | /* | ||
300 | * On NUMA systems, we are likely to call a PF probe function using | ||
301 | * work_on_cpu(). If that probe calls pci_enable_sriov() (which | ||
302 | * adds the VF devices via pci_bus_add_device()), we may re-enter | ||
303 | * this function to call the VF probe function. Calling | ||
304 | * work_on_cpu() again will cause a lockdep warning. Since VFs are | ||
305 | * always on the same node as the PF, we can work around this by | ||
306 | * avoiding work_on_cpu() when we're already on the correct node. | ||
307 | * | ||
308 | * Preemption is enabled, so it's theoretically unsafe to use | ||
309 | * numa_node_id(), but even if we run the probe function on the | ||
310 | * wrong node, it should be functionally correct. | ||
311 | */ | ||
312 | if (node >= 0 && node != numa_node_id()) { | ||
297 | int cpu; | 313 | int cpu; |
298 | 314 | ||
299 | get_online_cpus(); | 315 | get_online_cpus(); |
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | |||
305 | put_online_cpus(); | 321 | put_online_cpus(); |
306 | } else | 322 | } else |
307 | error = local_pci_probe(&ddi); | 323 | error = local_pci_probe(&ddi); |
324 | |||
308 | return error; | 325 | return error; |
309 | } | 326 | } |
310 | 327 | ||
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev) | |||
399 | pci_msi_shutdown(pci_dev); | 416 | pci_msi_shutdown(pci_dev); |
400 | pci_msix_shutdown(pci_dev); | 417 | pci_msix_shutdown(pci_dev); |
401 | 418 | ||
419 | #ifdef CONFIG_KEXEC | ||
402 | /* | 420 | /* |
403 | * Turn off Bus Master bit on the device to tell it to not | 421 | * If this is a kexec reboot, turn off Bus Master bit on the |
404 | * continue to do DMA. Don't touch devices in D3cold or unknown states. | 422 | * device to tell it to not continue to do DMA. Don't touch |
423 | * devices in D3cold or unknown states. | ||
424 | * If it is not a kexec reboot, firmware will hit the PCI | ||
425 | * devices with big hammer and stop their DMA any way. | ||
405 | */ | 426 | */ |
406 | if (pci_dev->current_state <= PCI_D3hot) | 427 | if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) |
407 | pci_clear_master(pci_dev); | 428 | pci_clear_master(pci_dev); |
429 | #endif | ||
408 | } | 430 | } |
409 | 431 | ||
410 | #ifdef CONFIG_PM | 432 | #ifdef CONFIG_PM |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 33120d156668..07369f32e8bb 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
4165 | return 0; | 4165 | return 0; |
4166 | } | 4166 | } |
4167 | 4167 | ||
4168 | bool pci_device_is_present(struct pci_dev *pdev) | ||
4169 | { | ||
4170 | u32 v; | ||
4171 | |||
4172 | return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); | ||
4173 | } | ||
4174 | EXPORT_SYMBOL_GPL(pci_device_is_present); | ||
4175 | |||
4168 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE | 4176 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
4169 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; | 4177 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
4170 | static DEFINE_SPINLOCK(resource_alignment_lock); | 4178 | static DEFINE_SPINLOCK(resource_alignment_lock); |
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 1576851028db..cc9337a71529 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c | |||
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev) | |||
24 | if (dev->is_added) { | 24 | if (dev->is_added) { |
25 | pci_proc_detach_device(dev); | 25 | pci_proc_detach_device(dev); |
26 | pci_remove_sysfs_dev_files(dev); | 26 | pci_remove_sysfs_dev_files(dev); |
27 | device_del(&dev->dev); | 27 | device_release_driver(&dev->dev); |
28 | dev->is_added = 0; | 28 | dev->is_added = 0; |
29 | } | 29 | } |
30 | 30 | ||
@@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev) | |||
34 | 34 | ||
35 | static void pci_destroy_dev(struct pci_dev *dev) | 35 | static void pci_destroy_dev(struct pci_dev *dev) |
36 | { | 36 | { |
37 | device_del(&dev->dev); | ||
38 | |||
37 | down_write(&pci_bus_sem); | 39 | down_write(&pci_bus_sem); |
38 | list_del(&dev->bus_list); | 40 | list_del(&dev->bus_list); |
39 | up_write(&pci_bus_sem); | 41 | up_write(&pci_bus_sem); |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a344f3d52361..330ef2d06567 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO | |||
24 | config OMAP_USB2 | 24 | config OMAP_USB2 |
25 | tristate "OMAP USB2 PHY Driver" | 25 | tristate "OMAP USB2 PHY Driver" |
26 | depends on ARCH_OMAP2PLUS | 26 | depends on ARCH_OMAP2PLUS |
27 | depends on USB_PHY | ||
27 | select GENERIC_PHY | 28 | select GENERIC_PHY |
28 | select USB_PHY | ||
29 | select OMAP_CONTROL_USB | 29 | select OMAP_CONTROL_USB |
30 | help | 30 | help |
31 | Enable this to support the transceiver that is part of SOC. This | 31 | Enable this to support the transceiver that is part of SOC. This |
@@ -36,8 +36,8 @@ config OMAP_USB2 | |||
36 | config TWL4030_USB | 36 | config TWL4030_USB |
37 | tristate "TWL4030 USB Transceiver Driver" | 37 | tristate "TWL4030 USB Transceiver Driver" |
38 | depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS | 38 | depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS |
39 | depends on USB_PHY | ||
39 | select GENERIC_PHY | 40 | select GENERIC_PHY |
40 | select USB_PHY | ||
41 | help | 41 | help |
42 | Enable this to support the USB OTG transceiver on TWL4030 | 42 | Enable this to support the USB OTG transceiver on TWL4030 |
43 | family chips (including the TWL5030 and TPS659x0 devices). | 43 | family chips (including the TWL5030 and TPS659x0 devices). |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 03cf8fb81554..58e0e9739028 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, | |||
437 | int id; | 437 | int id; |
438 | struct phy *phy; | 438 | struct phy *phy; |
439 | 439 | ||
440 | if (!dev) { | 440 | if (WARN_ON(!dev)) |
441 | dev_WARN(dev, "no device provided for PHY\n"); | 441 | return ERR_PTR(-EINVAL); |
442 | ret = -EINVAL; | ||
443 | goto err0; | ||
444 | } | ||
445 | 442 | ||
446 | phy = kzalloc(sizeof(*phy), GFP_KERNEL); | 443 | phy = kzalloc(sizeof(*phy), GFP_KERNEL); |
447 | if (!phy) { | 444 | if (!phy) |
448 | ret = -ENOMEM; | 445 | return ERR_PTR(-ENOMEM); |
449 | goto err0; | ||
450 | } | ||
451 | 446 | ||
452 | id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); | 447 | id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); |
453 | if (id < 0) { | 448 | if (id < 0) { |
454 | dev_err(dev, "unable to get id\n"); | 449 | dev_err(dev, "unable to get id\n"); |
455 | ret = id; | 450 | ret = id; |
456 | goto err0; | 451 | goto free_phy; |
457 | } | 452 | } |
458 | 453 | ||
459 | device_initialize(&phy->dev); | 454 | device_initialize(&phy->dev); |
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, | |||
468 | 463 | ||
469 | ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); | 464 | ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); |
470 | if (ret) | 465 | if (ret) |
471 | goto err1; | 466 | goto put_dev; |
472 | 467 | ||
473 | ret = device_add(&phy->dev); | 468 | ret = device_add(&phy->dev); |
474 | if (ret) | 469 | if (ret) |
475 | goto err1; | 470 | goto put_dev; |
476 | 471 | ||
477 | if (pm_runtime_enabled(dev)) { | 472 | if (pm_runtime_enabled(dev)) { |
478 | pm_runtime_enable(&phy->dev); | 473 | pm_runtime_enable(&phy->dev); |
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, | |||
481 | 476 | ||
482 | return phy; | 477 | return phy; |
483 | 478 | ||
484 | err1: | 479 | put_dev: |
485 | ida_remove(&phy_ida, phy->id); | ||
486 | put_device(&phy->dev); | 480 | put_device(&phy->dev); |
481 | ida_remove(&phy_ida, phy->id); | ||
482 | free_phy: | ||
487 | kfree(phy); | 483 | kfree(phy); |
488 | |||
489 | err0: | ||
490 | return ERR_PTR(ret); | 484 | return ERR_PTR(ret); |
491 | } | 485 | } |
492 | EXPORT_SYMBOL_GPL(phy_create); | 486 | EXPORT_SYMBOL_GPL(phy_create); |
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index 2832576d8b12..114f5ef4b73a 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c | |||
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = { | |||
512 | 512 | ||
513 | static const struct acpi_device_id byt_gpio_acpi_match[] = { | 513 | static const struct acpi_device_id byt_gpio_acpi_match[] = { |
514 | { "INT33B2", 0 }, | 514 | { "INT33B2", 0 }, |
515 | { "INT33FC", 0 }, | ||
515 | { } | 516 | { } |
516 | }; | 517 | }; |
517 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); | 518 | MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); |
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 11bd0d970a52..e2142956a8e5 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h | |||
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info { | |||
254 | #define PINMUX_GPIO(_pin) \ | 254 | #define PINMUX_GPIO(_pin) \ |
255 | [GPIO_##_pin] = { \ | 255 | [GPIO_##_pin] = { \ |
256 | .pin = (u16)-1, \ | 256 | .pin = (u16)-1, \ |
257 | .name = __stringify(name), \ | 257 | .name = __stringify(GPIO_##_pin), \ |
258 | .enum_id = _pin##_DATA, \ | 258 | .enum_id = _pin##_DATA, \ |
259 | } | 259 | } |
260 | 260 | ||
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 5e2054afe840..85ad58c6da17 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
@@ -196,6 +196,7 @@ config BATTERY_MAX17040 | |||
196 | config BATTERY_MAX17042 | 196 | config BATTERY_MAX17042 |
197 | tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" | 197 | tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" |
198 | depends on I2C | 198 | depends on I2C |
199 | select REGMAP_I2C | ||
199 | help | 200 | help |
200 | MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries | 201 | MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries |
201 | in handheld and portable equipment. The MAX17042 is configured | 202 | in handheld and portable equipment. The MAX17042 is configured |
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 00e667296360..557af943b2f5 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
511 | dev_set_drvdata(dev, psy); | 511 | dev_set_drvdata(dev, psy); |
512 | psy->dev = dev; | 512 | psy->dev = dev; |
513 | 513 | ||
514 | rc = dev_set_name(dev, "%s", psy->name); | ||
515 | if (rc) | ||
516 | goto dev_set_name_failed; | ||
517 | |||
514 | INIT_WORK(&psy->changed_work, power_supply_changed_work); | 518 | INIT_WORK(&psy->changed_work, power_supply_changed_work); |
515 | 519 | ||
516 | rc = power_supply_check_supplies(psy); | 520 | rc = power_supply_check_supplies(psy); |
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
524 | if (rc) | 528 | if (rc) |
525 | goto wakeup_init_failed; | 529 | goto wakeup_init_failed; |
526 | 530 | ||
527 | rc = kobject_set_name(&dev->kobj, "%s", psy->name); | ||
528 | if (rc) | ||
529 | goto kobject_set_name_failed; | ||
530 | |||
531 | rc = device_add(dev); | 531 | rc = device_add(dev); |
532 | if (rc) | 532 | if (rc) |
533 | goto device_add_failed; | 533 | goto device_add_failed; |
@@ -553,11 +553,11 @@ create_triggers_failed: | |||
553 | register_cooler_failed: | 553 | register_cooler_failed: |
554 | psy_unregister_thermal(psy); | 554 | psy_unregister_thermal(psy); |
555 | register_thermal_failed: | 555 | register_thermal_failed: |
556 | wakeup_init_failed: | ||
557 | device_del(dev); | 556 | device_del(dev); |
558 | kobject_set_name_failed: | ||
559 | device_add_failed: | 557 | device_add_failed: |
558 | wakeup_init_failed: | ||
560 | check_supplies_failed: | 559 | check_supplies_failed: |
560 | dev_set_name_failed: | ||
561 | put_device(dev); | 561 | put_device(dev); |
562 | success: | 562 | success: |
563 | return rc; | 563 | return rc; |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 2a786c504460..3c6768378a94 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
833 | return 0; | 833 | return 0; |
834 | } | 834 | } |
835 | 835 | ||
836 | static const struct x86_cpu_id energy_unit_quirk_ids[] = { | ||
837 | { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ | ||
838 | {} | ||
839 | }; | ||
840 | |||
836 | static int rapl_check_unit(struct rapl_package *rp, int cpu) | 841 | static int rapl_check_unit(struct rapl_package *rp, int cpu) |
837 | { | 842 | { |
838 | u64 msr_val; | 843 | u64 msr_val; |
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu) | |||
853 | * time unit: 1/time_unit_divisor Seconds | 858 | * time unit: 1/time_unit_divisor Seconds |
854 | */ | 859 | */ |
855 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 860 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
856 | rp->energy_unit_divisor = 1 << value; | 861 | /* some CPUs have different way to calculate energy unit */ |
857 | 862 | if (x86_match_cpu(energy_unit_quirk_ids)) | |
863 | rp->energy_unit_divisor = 1000000 / (1 << value); | ||
864 | else | ||
865 | rp->energy_unit_divisor = 1 << value; | ||
858 | 866 | ||
859 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 867 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
860 | rp->power_unit_divisor = 1 << value; | 868 | rp->power_unit_divisor = 1 << value; |
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id) | |||
941 | static const struct x86_cpu_id rapl_ids[] = { | 949 | static const struct x86_cpu_id rapl_ids[] = { |
942 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ | 950 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ |
943 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ | 951 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ |
952 | { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ | ||
944 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ | 953 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ |
945 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ | 954 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ |
946 | /* TODO: Add more CPU IDs after testing */ | 955 | /* TODO: Add more CPU IDs after testing */ |
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index 5917fe3dc983..b9f1d24c6812 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c | |||
@@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev, | |||
590 | default: | 590 | default: |
591 | return -EINVAL; | 591 | return -EINVAL; |
592 | } | 592 | } |
593 | ret <<= ffs(mask) - 1; | ||
593 | val = ret & mask; | 594 | val = ret & mask; |
594 | val <<= ffs(mask) - 1; | ||
595 | return as3722_update_bits(as3722, reg, mask, val); | 595 | return as3722_update_bits(as3722, reg, mask, val); |
596 | } | 596 | } |
597 | 597 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 3fe13130baec..d85f31385b24 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev) | |||
119 | return ""; | 119 | return ""; |
120 | } | 120 | } |
121 | 121 | ||
122 | static bool have_full_constraints(void) | ||
123 | { | ||
124 | return has_full_constraints || of_have_populated_dt(); | ||
125 | } | ||
126 | |||
122 | /** | 127 | /** |
123 | * of_get_regulator - get a regulator device node based on supply name | 128 | * of_get_regulator - get a regulator device node based on supply name |
124 | * @dev: Device pointer for the consumer (of regulator) device | 129 | * @dev: Device pointer for the consumer (of regulator) device |
@@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, | |||
1340 | * Assume that a regulator is physically present and enabled | 1345 | * Assume that a regulator is physically present and enabled |
1341 | * even if it isn't hooked up and just provide a dummy. | 1346 | * even if it isn't hooked up and just provide a dummy. |
1342 | */ | 1347 | */ |
1343 | if (has_full_constraints && allow_dummy) { | 1348 | if (have_full_constraints() && allow_dummy) { |
1344 | pr_warn("%s supply %s not found, using dummy regulator\n", | 1349 | pr_warn("%s supply %s not found, using dummy regulator\n", |
1345 | devname, id); | 1350 | devname, id); |
1346 | 1351 | ||
@@ -3627,7 +3632,7 @@ int regulator_suspend_finish(void) | |||
3627 | if (error) | 3632 | if (error) |
3628 | ret = error; | 3633 | ret = error; |
3629 | } else { | 3634 | } else { |
3630 | if (!has_full_constraints) | 3635 | if (!have_full_constraints()) |
3631 | goto unlock; | 3636 | goto unlock; |
3632 | if (!ops->disable) | 3637 | if (!ops->disable) |
3633 | goto unlock; | 3638 | goto unlock; |
@@ -3825,7 +3830,7 @@ static int __init regulator_init_complete(void) | |||
3825 | if (!enabled) | 3830 | if (!enabled) |
3826 | goto unlock; | 3831 | goto unlock; |
3827 | 3832 | ||
3828 | if (has_full_constraints) { | 3833 | if (have_full_constraints()) { |
3829 | /* We log since this may kill the system if it | 3834 | /* We log since this may kill the system if it |
3830 | * goes wrong. */ | 3835 | * goes wrong. */ |
3831 | rdev_info(rdev, "disabling\n"); | 3836 | rdev_info(rdev, "disabling\n"); |
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 032df3799efb..8b5e4c712a01 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define PFUZE100_DEVICEID 0x0 | 39 | #define PFUZE100_DEVICEID 0x0 |
40 | #define PFUZE100_REVID 0x3 | 40 | #define PFUZE100_REVID 0x3 |
41 | #define PFUZE100_FABID 0x3 | 41 | #define PFUZE100_FABID 0x4 |
42 | 42 | ||
43 | #define PFUZE100_SW1ABVOL 0x20 | 43 | #define PFUZE100_SW1ABVOL 0x20 |
44 | #define PFUZE100_SW1CVOL 0x2e | 44 | #define PFUZE100_SW1CVOL 0x2e |
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 333677d68d0e..9e61922d8230 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
@@ -438,7 +438,7 @@ common_reg: | |||
438 | platform_set_drvdata(pdev, s2mps11); | 438 | platform_set_drvdata(pdev, s2mps11); |
439 | 439 | ||
440 | config.dev = &pdev->dev; | 440 | config.dev = &pdev->dev; |
441 | config.regmap = iodev->regmap; | 441 | config.regmap = iodev->regmap_pmic; |
442 | config.driver_data = s2mps11; | 442 | config.driver_data = s2mps11; |
443 | for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { | 443 | for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { |
444 | if (!reg_np) { | 444 | if (!reg_np) { |
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index cbf91e25cf7f..aeb40aad0ae7 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c | |||
@@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) | |||
925 | config.dev = s5m8767->dev; | 925 | config.dev = s5m8767->dev; |
926 | config.init_data = pdata->regulators[i].initdata; | 926 | config.init_data = pdata->regulators[i].initdata; |
927 | config.driver_data = s5m8767; | 927 | config.driver_data = s5m8767; |
928 | config.regmap = iodev->regmap; | 928 | config.regmap = iodev->regmap_pmic; |
929 | config.of_node = pdata->regulators[i].reg_node; | 929 | config.of_node = pdata->regulators[i].reg_node; |
930 | 930 | ||
931 | rdev[i] = devm_regulator_register(&pdev->dev, ®ulators[id], | 931 | rdev[i] = devm_regulator_register(&pdev->dev, ®ulators[id], |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index c0da95e95702..3281c90691c3 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
220 | 220 | ||
221 | at91_alarm_year = tm.tm_year; | 221 | at91_alarm_year = tm.tm_year; |
222 | 222 | ||
223 | tm.tm_mon = alrm->time.tm_mon; | ||
224 | tm.tm_mday = alrm->time.tm_mday; | ||
223 | tm.tm_hour = alrm->time.tm_hour; | 225 | tm.tm_hour = alrm->time.tm_hour; |
224 | tm.tm_min = alrm->time.tm_min; | 226 | tm.tm_min = alrm->time.tm_min; |
225 | tm.tm_sec = alrm->time.tm_sec; | 227 | tm.tm_sec = alrm->time.tm_sec; |
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b7fd02bc0a14..ae8119dc2846 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c | |||
@@ -28,10 +28,20 @@ | |||
28 | #include <linux/mfd/samsung/irq.h> | 28 | #include <linux/mfd/samsung/irq.h> |
29 | #include <linux/mfd/samsung/rtc.h> | 29 | #include <linux/mfd/samsung/rtc.h> |
30 | 30 | ||
31 | /* | ||
32 | * Maximum number of retries for checking changes in UDR field | ||
33 | * of SEC_RTC_UDR_CON register (to limit possible endless loop). | ||
34 | * | ||
35 | * After writing to RTC registers (setting time or alarm) read the UDR field | ||
36 | * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have | ||
37 | * been transferred. | ||
38 | */ | ||
39 | #define UDR_READ_RETRY_CNT 5 | ||
40 | |||
31 | struct s5m_rtc_info { | 41 | struct s5m_rtc_info { |
32 | struct device *dev; | 42 | struct device *dev; |
33 | struct sec_pmic_dev *s5m87xx; | 43 | struct sec_pmic_dev *s5m87xx; |
34 | struct regmap *rtc; | 44 | struct regmap *regmap; |
35 | struct rtc_device *rtc_dev; | 45 | struct rtc_device *rtc_dev; |
36 | int irq; | 46 | int irq; |
37 | int device_type; | 47 | int device_type; |
@@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data) | |||
84 | } | 94 | } |
85 | } | 95 | } |
86 | 96 | ||
97 | /* | ||
98 | * Read RTC_UDR_CON register and wait till UDR field is cleared. | ||
99 | * This indicates that time/alarm update ended. | ||
100 | */ | ||
101 | static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info) | ||
102 | { | ||
103 | int ret, retry = UDR_READ_RETRY_CNT; | ||
104 | unsigned int data; | ||
105 | |||
106 | do { | ||
107 | ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); | ||
108 | } while (--retry && (data & RTC_UDR_MASK) && !ret); | ||
109 | |||
110 | if (!retry) | ||
111 | dev_err(info->dev, "waiting for UDR update, reached max number of retries\n"); | ||
112 | |||
113 | return ret; | ||
114 | } | ||
115 | |||
87 | static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) | 116 | static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) |
88 | { | 117 | { |
89 | int ret; | 118 | int ret; |
90 | unsigned int data; | 119 | unsigned int data; |
91 | 120 | ||
92 | ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); | 121 | ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); |
93 | if (ret < 0) { | 122 | if (ret < 0) { |
94 | dev_err(info->dev, "failed to read update reg(%d)\n", ret); | 123 | dev_err(info->dev, "failed to read update reg(%d)\n", ret); |
95 | return ret; | 124 | return ret; |
@@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) | |||
98 | data |= RTC_TIME_EN_MASK; | 127 | data |= RTC_TIME_EN_MASK; |
99 | data |= RTC_UDR_MASK; | 128 | data |= RTC_UDR_MASK; |
100 | 129 | ||
101 | ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); | 130 | ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data); |
102 | if (ret < 0) { | 131 | if (ret < 0) { |
103 | dev_err(info->dev, "failed to write update reg(%d)\n", ret); | 132 | dev_err(info->dev, "failed to write update reg(%d)\n", ret); |
104 | return ret; | 133 | return ret; |
105 | } | 134 | } |
106 | 135 | ||
107 | do { | 136 | ret = s5m8767_wait_for_udr_update(info); |
108 | ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); | ||
109 | } while ((data & RTC_UDR_MASK) && !ret); | ||
110 | 137 | ||
111 | return ret; | 138 | return ret; |
112 | } | 139 | } |
@@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info) | |||
116 | int ret; | 143 | int ret; |
117 | unsigned int data; | 144 | unsigned int data; |
118 | 145 | ||
119 | ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); | 146 | ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); |
120 | if (ret < 0) { | 147 | if (ret < 0) { |
121 | dev_err(info->dev, "%s: fail to read update reg(%d)\n", | 148 | dev_err(info->dev, "%s: fail to read update reg(%d)\n", |
122 | __func__, ret); | 149 | __func__, ret); |
@@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info) | |||
126 | data &= ~RTC_TIME_EN_MASK; | 153 | data &= ~RTC_TIME_EN_MASK; |
127 | data |= RTC_UDR_MASK; | 154 | data |= RTC_UDR_MASK; |
128 | 155 | ||
129 | ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); | 156 | ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data); |
130 | if (ret < 0) { | 157 | if (ret < 0) { |
131 | dev_err(info->dev, "%s: fail to write update reg(%d)\n", | 158 | dev_err(info->dev, "%s: fail to write update reg(%d)\n", |
132 | __func__, ret); | 159 | __func__, ret); |
133 | return ret; | 160 | return ret; |
134 | } | 161 | } |
135 | 162 | ||
136 | do { | 163 | ret = s5m8767_wait_for_udr_update(info); |
137 | ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); | ||
138 | } while ((data & RTC_UDR_MASK) && !ret); | ||
139 | 164 | ||
140 | return ret; | 165 | return ret; |
141 | } | 166 | } |
@@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
178 | u8 data[8]; | 203 | u8 data[8]; |
179 | int ret; | 204 | int ret; |
180 | 205 | ||
181 | ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8); | 206 | ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8); |
182 | if (ret < 0) | 207 | if (ret < 0) |
183 | return ret; | 208 | return ret; |
184 | 209 | ||
@@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
226 | 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday, | 251 | 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday, |
227 | tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); | 252 | tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); |
228 | 253 | ||
229 | ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8); | 254 | ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8); |
230 | if (ret < 0) | 255 | if (ret < 0) |
231 | return ret; | 256 | return ret; |
232 | 257 | ||
@@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
242 | unsigned int val; | 267 | unsigned int val; |
243 | int ret, i; | 268 | int ret, i; |
244 | 269 | ||
245 | ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); | 270 | ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); |
246 | if (ret < 0) | 271 | if (ret < 0) |
247 | return ret; | 272 | return ret; |
248 | 273 | ||
249 | switch (info->device_type) { | 274 | switch (info->device_type) { |
250 | case S5M8763X: | 275 | case S5M8763X: |
251 | s5m8763_data_to_tm(data, &alrm->time); | 276 | s5m8763_data_to_tm(data, &alrm->time); |
252 | ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val); | 277 | ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val); |
253 | if (ret < 0) | 278 | if (ret < 0) |
254 | return ret; | 279 | return ret; |
255 | 280 | ||
256 | alrm->enabled = !!val; | 281 | alrm->enabled = !!val; |
257 | 282 | ||
258 | ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); | 283 | ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val); |
259 | if (ret < 0) | 284 | if (ret < 0) |
260 | return ret; | 285 | return ret; |
261 | 286 | ||
@@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
278 | } | 303 | } |
279 | 304 | ||
280 | alrm->pending = 0; | 305 | alrm->pending = 0; |
281 | ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); | 306 | ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val); |
282 | if (ret < 0) | 307 | if (ret < 0) |
283 | return ret; | 308 | return ret; |
284 | break; | 309 | break; |
@@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info) | |||
301 | int ret, i; | 326 | int ret, i; |
302 | struct rtc_time tm; | 327 | struct rtc_time tm; |
303 | 328 | ||
304 | ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); | 329 | ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); |
305 | if (ret < 0) | 330 | if (ret < 0) |
306 | return ret; | 331 | return ret; |
307 | 332 | ||
@@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info) | |||
312 | 337 | ||
313 | switch (info->device_type) { | 338 | switch (info->device_type) { |
314 | case S5M8763X: | 339 | case S5M8763X: |
315 | ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0); | 340 | ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0); |
316 | break; | 341 | break; |
317 | 342 | ||
318 | case S5M8767X: | 343 | case S5M8767X: |
319 | for (i = 0; i < 7; i++) | 344 | for (i = 0; i < 7; i++) |
320 | data[i] &= ~ALARM_ENABLE_MASK; | 345 | data[i] &= ~ALARM_ENABLE_MASK; |
321 | 346 | ||
322 | ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); | 347 | ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); |
323 | if (ret < 0) | 348 | if (ret < 0) |
324 | return ret; | 349 | return ret; |
325 | 350 | ||
@@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) | |||
341 | u8 alarm0_conf; | 366 | u8 alarm0_conf; |
342 | struct rtc_time tm; | 367 | struct rtc_time tm; |
343 | 368 | ||
344 | ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); | 369 | ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); |
345 | if (ret < 0) | 370 | if (ret < 0) |
346 | return ret; | 371 | return ret; |
347 | 372 | ||
@@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) | |||
353 | switch (info->device_type) { | 378 | switch (info->device_type) { |
354 | case S5M8763X: | 379 | case S5M8763X: |
355 | alarm0_conf = 0x77; | 380 | alarm0_conf = 0x77; |
356 | ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf); | 381 | ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf); |
357 | break; | 382 | break; |
358 | 383 | ||
359 | case S5M8767X: | 384 | case S5M8767X: |
@@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) | |||
368 | if (data[RTC_YEAR1] & 0x7f) | 393 | if (data[RTC_YEAR1] & 0x7f) |
369 | data[RTC_YEAR1] |= ALARM_ENABLE_MASK; | 394 | data[RTC_YEAR1] |= ALARM_ENABLE_MASK; |
370 | 395 | ||
371 | ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); | 396 | ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); |
372 | if (ret < 0) | 397 | if (ret < 0) |
373 | return ret; | 398 | return ret; |
374 | ret = s5m8767_rtc_set_alarm_reg(info); | 399 | ret = s5m8767_rtc_set_alarm_reg(info); |
@@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
410 | if (ret < 0) | 435 | if (ret < 0) |
411 | return ret; | 436 | return ret; |
412 | 437 | ||
413 | ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); | 438 | ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); |
414 | if (ret < 0) | 439 | if (ret < 0) |
415 | return ret; | 440 | return ret; |
416 | 441 | ||
@@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = { | |||
455 | static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) | 480 | static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) |
456 | { | 481 | { |
457 | int ret; | 482 | int ret; |
458 | ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, | 483 | ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL, |
459 | WTSR_ENABLE_MASK, | 484 | WTSR_ENABLE_MASK, |
460 | enable ? WTSR_ENABLE_MASK : 0); | 485 | enable ? WTSR_ENABLE_MASK : 0); |
461 | if (ret < 0) | 486 | if (ret < 0) |
@@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) | |||
466 | static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable) | 491 | static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable) |
467 | { | 492 | { |
468 | int ret; | 493 | int ret; |
469 | ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, | 494 | ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL, |
470 | SMPL_ENABLE_MASK, | 495 | SMPL_ENABLE_MASK, |
471 | enable ? SMPL_ENABLE_MASK : 0); | 496 | enable ? SMPL_ENABLE_MASK : 0); |
472 | if (ret < 0) | 497 | if (ret < 0) |
@@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) | |||
481 | int ret; | 506 | int ret; |
482 | struct rtc_time tm; | 507 | struct rtc_time tm; |
483 | 508 | ||
484 | ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read); | 509 | ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read); |
485 | if (ret < 0) { | 510 | if (ret < 0) { |
486 | dev_err(info->dev, "%s: fail to read control reg(%d)\n", | 511 | dev_err(info->dev, "%s: fail to read control reg(%d)\n", |
487 | __func__, ret); | 512 | __func__, ret); |
@@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) | |||
493 | data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); | 518 | data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); |
494 | 519 | ||
495 | info->rtc_24hr_mode = 1; | 520 | info->rtc_24hr_mode = 1; |
496 | ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2); | 521 | ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2); |
497 | if (ret < 0) { | 522 | if (ret < 0) { |
498 | dev_err(info->dev, "%s: fail to write controlm reg(%d)\n", | 523 | dev_err(info->dev, "%s: fail to write controlm reg(%d)\n", |
499 | __func__, ret); | 524 | __func__, ret); |
@@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) | |||
515 | ret = s5m_rtc_set_time(info->dev, &tm); | 540 | ret = s5m_rtc_set_time(info->dev, &tm); |
516 | } | 541 | } |
517 | 542 | ||
518 | ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON, | 543 | ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON, |
519 | RTC_TCON_MASK, tp_read | RTC_TCON_MASK); | 544 | RTC_TCON_MASK, tp_read | RTC_TCON_MASK); |
520 | if (ret < 0) | 545 | if (ret < 0) |
521 | dev_err(info->dev, "%s: fail to update TCON reg(%d)\n", | 546 | dev_err(info->dev, "%s: fail to update TCON reg(%d)\n", |
@@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev) | |||
542 | 567 | ||
543 | info->dev = &pdev->dev; | 568 | info->dev = &pdev->dev; |
544 | info->s5m87xx = s5m87xx; | 569 | info->s5m87xx = s5m87xx; |
545 | info->rtc = s5m87xx->rtc; | 570 | info->regmap = s5m87xx->regmap_rtc; |
546 | info->device_type = s5m87xx->device_type; | 571 | info->device_type = s5m87xx->device_type; |
547 | info->wtsr_smpl = s5m87xx->wtsr_smpl; | 572 | info->wtsr_smpl = s5m87xx->wtsr_smpl; |
548 | 573 | ||
549 | switch (pdata->device_type) { | 574 | switch (pdata->device_type) { |
550 | case S5M8763X: | 575 | case S5M8763X: |
551 | info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0; | 576 | info->irq = regmap_irq_get_virq(s5m87xx->irq_data, |
577 | S5M8763_IRQ_ALARM0); | ||
552 | break; | 578 | break; |
553 | 579 | ||
554 | case S5M8767X: | 580 | case S5M8767X: |
555 | info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1; | 581 | info->irq = regmap_irq_get_virq(s5m87xx->irq_data, |
582 | S5M8767_IRQ_RTCA1); | ||
556 | break; | 583 | break; |
557 | 584 | ||
558 | default: | 585 | default: |
@@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) | |||
596 | if (info->wtsr_smpl) { | 623 | if (info->wtsr_smpl) { |
597 | for (i = 0; i < 3; i++) { | 624 | for (i = 0; i < 3; i++) { |
598 | s5m_rtc_enable_wtsr(info, false); | 625 | s5m_rtc_enable_wtsr(info, false); |
599 | regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val); | 626 | regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val); |
600 | pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); | 627 | pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); |
601 | if (val & WTSR_ENABLE_MASK) | 628 | if (val & WTSR_ENABLE_MASK) |
602 | pr_emerg("%s: fail to disable WTSR\n", | 629 | pr_emerg("%s: fail to disable WTSR\n", |
@@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) | |||
612 | s5m_rtc_enable_smpl(info, false); | 639 | s5m_rtc_enable_smpl(info, false); |
613 | } | 640 | } |
614 | 641 | ||
642 | static int s5m_rtc_resume(struct device *dev) | ||
643 | { | ||
644 | struct s5m_rtc_info *info = dev_get_drvdata(dev); | ||
645 | int ret = 0; | ||
646 | |||
647 | if (device_may_wakeup(dev)) | ||
648 | ret = disable_irq_wake(info->irq); | ||
649 | |||
650 | return ret; | ||
651 | } | ||
652 | |||
653 | static int s5m_rtc_suspend(struct device *dev) | ||
654 | { | ||
655 | struct s5m_rtc_info *info = dev_get_drvdata(dev); | ||
656 | int ret = 0; | ||
657 | |||
658 | if (device_may_wakeup(dev)) | ||
659 | ret = enable_irq_wake(info->irq); | ||
660 | |||
661 | return ret; | ||
662 | } | ||
663 | |||
664 | static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); | ||
665 | |||
615 | static const struct platform_device_id s5m_rtc_id[] = { | 666 | static const struct platform_device_id s5m_rtc_id[] = { |
616 | { "s5m-rtc", 0 }, | 667 | { "s5m-rtc", 0 }, |
617 | }; | 668 | }; |
@@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = { | |||
620 | .driver = { | 671 | .driver = { |
621 | .name = "s5m-rtc", | 672 | .name = "s5m-rtc", |
622 | .owner = THIS_MODULE, | 673 | .owner = THIS_MODULE, |
674 | .pm = &s5m_rtc_pm_ops, | ||
623 | }, | 675 | }, |
624 | .probe = s5m_rtc_probe, | 676 | .probe = s5m_rtc_probe, |
625 | .shutdown = s5m_rtc_shutdown, | 677 | .shutdown = s5m_rtc_shutdown, |
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index f64921756ad6..f224d59c4b6b 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
@@ -87,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block) | |||
87 | { | 87 | { |
88 | if (block->gdp) { | 88 | if (block->gdp) { |
89 | del_gendisk(block->gdp); | 89 | del_gendisk(block->gdp); |
90 | block->gdp->queue = NULL; | ||
91 | block->gdp->private_data = NULL; | 90 | block->gdp->private_data = NULL; |
92 | put_disk(block->gdp); | 91 | put_disk(block->gdp); |
93 | block->gdp = NULL; | 92 | block->gdp = NULL; |
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index f7aa080e9b28..1465e9563101 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c | |||
@@ -35,7 +35,6 @@ struct read_info_sccb { | |||
35 | u8 _reserved5[4096 - 112]; /* 112-4095 */ | 35 | u8 _reserved5[4096 - 112]; /* 112-4095 */ |
36 | } __packed __aligned(PAGE_SIZE); | 36 | } __packed __aligned(PAGE_SIZE); |
37 | 37 | ||
38 | static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE); | ||
39 | static __initdata struct read_info_sccb early_read_info_sccb; | 38 | static __initdata struct read_info_sccb early_read_info_sccb; |
40 | static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); | 39 | static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); |
41 | static unsigned long sclp_hsa_size; | 40 | static unsigned long sclp_hsa_size; |
@@ -113,7 +112,7 @@ static void __init sclp_facilities_detect(void) | |||
113 | 112 | ||
114 | bool __init sclp_has_linemode(void) | 113 | bool __init sclp_has_linemode(void) |
115 | { | 114 | { |
116 | struct init_sccb *sccb = &early_event_mask_sccb; | 115 | struct init_sccb *sccb = (void *) &sccb_early; |
117 | 116 | ||
118 | if (sccb->header.response_code != 0x20) | 117 | if (sccb->header.response_code != 0x20) |
119 | return 0; | 118 | return 0; |
@@ -126,7 +125,7 @@ bool __init sclp_has_linemode(void) | |||
126 | 125 | ||
127 | bool __init sclp_has_vt220(void) | 126 | bool __init sclp_has_vt220(void) |
128 | { | 127 | { |
129 | struct init_sccb *sccb = &early_event_mask_sccb; | 128 | struct init_sccb *sccb = (void *) &sccb_early; |
130 | 129 | ||
131 | if (sccb->header.response_code != 0x20) | 130 | if (sccb->header.response_code != 0x20) |
132 | return 0; | 131 | return 0; |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 3f4ca4e09a4c..34629ea913d4 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
942 | return rc; | 942 | return rc; |
943 | } | 943 | } |
944 | 944 | ||
945 | tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); | 945 | tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols); |
946 | if (IS_ERR(tp->screen)) { | 946 | if (IS_ERR(tp->screen)) { |
947 | rc = PTR_ERR(tp->screen); | 947 | rc = PTR_ERR(tp->screen); |
948 | raw3270_put_view(&tp->view); | 948 | raw3270_put_view(&tp->view); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 596480022b0a..38a1257e76e1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, | |||
471 | schedule_delayed_work(&tgt->sess_del_work, 0); | 471 | schedule_delayed_work(&tgt->sess_del_work, 0); |
472 | else | 472 | else |
473 | schedule_delayed_work(&tgt->sess_del_work, | 473 | schedule_delayed_work(&tgt->sess_del_work, |
474 | jiffies - sess->expires); | 474 | sess->expires - jiffies); |
475 | } | 475 | } |
476 | 476 | ||
477 | /* ha->hardware_lock supposed to be held on entry */ | 477 | /* ha->hardware_lock supposed to be held on entry */ |
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) | |||
550 | struct scsi_qla_host *vha = tgt->vha; | 550 | struct scsi_qla_host *vha = tgt->vha; |
551 | struct qla_hw_data *ha = vha->hw; | 551 | struct qla_hw_data *ha = vha->hw; |
552 | struct qla_tgt_sess *sess; | 552 | struct qla_tgt_sess *sess; |
553 | unsigned long flags; | 553 | unsigned long flags, elapsed; |
554 | 554 | ||
555 | spin_lock_irqsave(&ha->hardware_lock, flags); | 555 | spin_lock_irqsave(&ha->hardware_lock, flags); |
556 | while (!list_empty(&tgt->del_sess_list)) { | 556 | while (!list_empty(&tgt->del_sess_list)) { |
557 | sess = list_entry(tgt->del_sess_list.next, typeof(*sess), | 557 | sess = list_entry(tgt->del_sess_list.next, typeof(*sess), |
558 | del_list_entry); | 558 | del_list_entry); |
559 | if (time_after_eq(jiffies, sess->expires)) { | 559 | elapsed = jiffies; |
560 | if (time_after_eq(elapsed, sess->expires)) { | ||
560 | qlt_undelete_sess(sess); | 561 | qlt_undelete_sess(sess); |
561 | 562 | ||
562 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, | 563 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, |
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) | |||
566 | ha->tgt.tgt_ops->put_sess(sess); | 567 | ha->tgt.tgt_ops->put_sess(sess); |
567 | } else { | 568 | } else { |
568 | schedule_delayed_work(&tgt->sess_del_work, | 569 | schedule_delayed_work(&tgt->sess_del_work, |
569 | jiffies - sess->expires); | 570 | sess->expires - elapsed); |
570 | break; | 571 | break; |
571 | } | 572 | } |
572 | } | 573 | } |
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, | |||
4290 | if (rc != 0) { | 4291 | if (rc != 0) { |
4291 | ha->tgt.tgt_ops = NULL; | 4292 | ha->tgt.tgt_ops = NULL; |
4292 | ha->tgt.target_lport_ptr = NULL; | 4293 | ha->tgt.target_lport_ptr = NULL; |
4294 | scsi_host_put(host); | ||
4293 | } | 4295 | } |
4294 | mutex_unlock(&qla_tgt_mutex); | 4296 | mutex_unlock(&qla_tgt_mutex); |
4295 | return rc; | 4297 | return rc; |
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 53fee2f9a498..8dfdd2732bdc 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c | |||
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev) | |||
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) | 42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, |
43 | void *accel_priv) | ||
43 | { | 44 | { |
44 | return ClassifyPacket(netdev_priv(dev), skb); | 45 | return ClassifyPacket(netdev_priv(dev), skb); |
45 | } | 46 | } |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 8f02bf66e20b..4964d2a2fc7d 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev, | |||
446 | release_firmware(fw); | 446 | release_firmware(fw); |
447 | } | 447 | } |
448 | 448 | ||
449 | return ret; | 449 | return ret < 0 ? ret : 0; |
450 | } | 450 | } |
451 | EXPORT_SYMBOL_GPL(comedi_load_firmware); | 451 | EXPORT_SYMBOL_GPL(comedi_load_firmware); |
452 | 452 | ||
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c index 432e3f9c3301..c55f234b29e6 100644 --- a/drivers/staging/comedi/drivers/8255_pci.c +++ b/drivers/staging/comedi/drivers/8255_pci.c | |||
@@ -63,7 +63,8 @@ enum pci_8255_boardid { | |||
63 | BOARD_ADLINK_PCI7296, | 63 | BOARD_ADLINK_PCI7296, |
64 | BOARD_CB_PCIDIO24, | 64 | BOARD_CB_PCIDIO24, |
65 | BOARD_CB_PCIDIO24H, | 65 | BOARD_CB_PCIDIO24H, |
66 | BOARD_CB_PCIDIO48H, | 66 | BOARD_CB_PCIDIO48H_OLD, |
67 | BOARD_CB_PCIDIO48H_NEW, | ||
67 | BOARD_CB_PCIDIO96H, | 68 | BOARD_CB_PCIDIO96H, |
68 | BOARD_NI_PCIDIO96, | 69 | BOARD_NI_PCIDIO96, |
69 | BOARD_NI_PCIDIO96B, | 70 | BOARD_NI_PCIDIO96B, |
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = { | |||
106 | .dio_badr = 2, | 107 | .dio_badr = 2, |
107 | .n_8255 = 1, | 108 | .n_8255 = 1, |
108 | }, | 109 | }, |
109 | [BOARD_CB_PCIDIO48H] = { | 110 | [BOARD_CB_PCIDIO48H_OLD] = { |
110 | .name = "cb_pci-dio48h", | 111 | .name = "cb_pci-dio48h", |
111 | .dio_badr = 1, | 112 | .dio_badr = 1, |
112 | .n_8255 = 2, | 113 | .n_8255 = 2, |
113 | }, | 114 | }, |
115 | [BOARD_CB_PCIDIO48H_NEW] = { | ||
116 | .name = "cb_pci-dio48h", | ||
117 | .dio_badr = 2, | ||
118 | .n_8255 = 2, | ||
119 | }, | ||
114 | [BOARD_CB_PCIDIO96H] = { | 120 | [BOARD_CB_PCIDIO96H] = { |
115 | .name = "cb_pci-dio96h", | 121 | .name = "cb_pci-dio96h", |
116 | .dio_badr = 2, | 122 | .dio_badr = 2, |
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = { | |||
263 | { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, | 269 | { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, |
264 | { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, | 270 | { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, |
265 | { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, | 271 | { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, |
266 | { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, | 272 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000), |
273 | .driver_data = BOARD_CB_PCIDIO48H_OLD }, | ||
274 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b), | ||
275 | .driver_data = BOARD_CB_PCIDIO48H_NEW }, | ||
267 | { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, | 276 | { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, |
268 | { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, | 277 | { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, |
269 | { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, | 278 | { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, |
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c index 99421f90d189..0485d7f39867 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.c +++ b/drivers/staging/iio/magnetometer/hmc5843.c | |||
@@ -451,7 +451,12 @@ done: | |||
451 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ | 451 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ |
452 | BIT(IIO_CHAN_INFO_SAMP_FREQ), \ | 452 | BIT(IIO_CHAN_INFO_SAMP_FREQ), \ |
453 | .scan_index = idx, \ | 453 | .scan_index = idx, \ |
454 | .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ | 454 | .scan_type = { \ |
455 | .sign = 's', \ | ||
456 | .realbits = 16, \ | ||
457 | .storagebits = 16, \ | ||
458 | .endianness = IIO_BE, \ | ||
459 | }, \ | ||
455 | } | 460 | } |
456 | 461 | ||
457 | static const struct iio_chan_spec hmc5843_channels[] = { | 462 | static const struct iio_chan_spec hmc5843_channels[] = { |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 6bd015ac9d68..96e4eee344ef 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) | |||
88 | 88 | ||
89 | imx_drm_device_put(); | 89 | imx_drm_device_put(); |
90 | 90 | ||
91 | drm_mode_config_cleanup(imxdrm->drm); | 91 | drm_vblank_cleanup(imxdrm->drm); |
92 | drm_kms_helper_poll_fini(imxdrm->drm); | 92 | drm_kms_helper_poll_fini(imxdrm->drm); |
93 | drm_mode_config_cleanup(imxdrm->drm); | ||
93 | 94 | ||
94 | return 0; | 95 | return 0; |
95 | } | 96 | } |
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm, | |||
199 | if (!file->is_master) | 200 | if (!file->is_master) |
200 | return; | 201 | return; |
201 | 202 | ||
202 | for (i = 0; i < 4; i++) | 203 | for (i = 0; i < MAX_CRTC; i++) |
203 | imx_drm_disable_vblank(drm , i); | 204 | imx_drm_disable_vblank(drm, i); |
204 | } | 205 | } |
205 | 206 | ||
206 | static const struct file_operations imx_drm_driver_fops = { | 207 | static const struct file_operations imx_drm_driver_fops = { |
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) | |||
376 | struct imx_drm_device *imxdrm = __imx_drm_device(); | 377 | struct imx_drm_device *imxdrm = __imx_drm_device(); |
377 | int ret; | 378 | int ret; |
378 | 379 | ||
379 | drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, | ||
380 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
381 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); | 380 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); |
382 | if (ret) | 381 | if (ret) |
383 | return ret; | 382 | return ret; |
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) | |||
385 | drm_crtc_helper_add(imx_drm_crtc->crtc, | 384 | drm_crtc_helper_add(imx_drm_crtc->crtc, |
386 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | 385 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); |
387 | 386 | ||
387 | drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, | ||
388 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
389 | |||
388 | drm_mode_group_reinit(imxdrm->drm); | 390 | drm_mode_group_reinit(imxdrm->drm); |
389 | 391 | ||
390 | return 0; | 392 | return 0; |
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
428 | ret = drm_mode_group_init_legacy_group(imxdrm->drm, | 430 | ret = drm_mode_group_init_legacy_group(imxdrm->drm, |
429 | &imxdrm->drm->primary->mode_group); | 431 | &imxdrm->drm->primary->mode_group); |
430 | if (ret) | 432 | if (ret) |
431 | goto err_init; | 433 | goto err_kms; |
432 | 434 | ||
433 | ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); | 435 | ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); |
434 | if (ret) | 436 | if (ret) |
435 | goto err_init; | 437 | goto err_kms; |
436 | 438 | ||
437 | /* | 439 | /* |
438 | * with vblank_disable_allowed = true, vblank interrupt will be disabled | 440 | * with vblank_disable_allowed = true, vblank interrupt will be disabled |
@@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
441 | */ | 443 | */ |
442 | imxdrm->drm->vblank_disable_allowed = true; | 444 | imxdrm->drm->vblank_disable_allowed = true; |
443 | 445 | ||
444 | if (!imx_drm_device_get()) | 446 | if (!imx_drm_device_get()) { |
445 | ret = -EINVAL; | 447 | ret = -EINVAL; |
448 | goto err_vblank; | ||
449 | } | ||
446 | 450 | ||
447 | ret = 0; | 451 | mutex_unlock(&imxdrm->mutex); |
452 | return 0; | ||
448 | 453 | ||
449 | err_init: | 454 | err_vblank: |
455 | drm_vblank_cleanup(drm); | ||
456 | err_kms: | ||
457 | drm_kms_helper_poll_fini(drm); | ||
458 | drm_mode_config_cleanup(drm); | ||
450 | mutex_unlock(&imxdrm->mutex); | 459 | mutex_unlock(&imxdrm->mutex); |
451 | 460 | ||
452 | return ret; | 461 | return ret; |
@@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
492 | 501 | ||
493 | mutex_lock(&imxdrm->mutex); | 502 | mutex_lock(&imxdrm->mutex); |
494 | 503 | ||
504 | /* | ||
505 | * The vblank arrays are dimensioned by MAX_CRTC - we can't | ||
506 | * pass IDs greater than this to those functions. | ||
507 | */ | ||
508 | if (imxdrm->pipes >= MAX_CRTC) { | ||
509 | ret = -EINVAL; | ||
510 | goto err_busy; | ||
511 | } | ||
512 | |||
495 | if (imxdrm->drm->open_count) { | 513 | if (imxdrm->drm->open_count) { |
496 | ret = -EBUSY; | 514 | ret = -EBUSY; |
497 | goto err_busy; | 515 | goto err_busy; |
@@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
528 | return 0; | 546 | return 0; |
529 | 547 | ||
530 | err_register: | 548 | err_register: |
549 | list_del(&imx_drm_crtc->list); | ||
531 | kfree(imx_drm_crtc); | 550 | kfree(imx_drm_crtc); |
532 | err_alloc: | 551 | err_alloc: |
533 | err_busy: | 552 | err_busy: |
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 680f4c8fa081..2c44fef8d58b 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c | |||
@@ -114,7 +114,6 @@ struct imx_tve { | |||
114 | struct drm_encoder encoder; | 114 | struct drm_encoder encoder; |
115 | struct imx_drm_encoder *imx_drm_encoder; | 115 | struct imx_drm_encoder *imx_drm_encoder; |
116 | struct device *dev; | 116 | struct device *dev; |
117 | spinlock_t enable_lock; /* serializes tve_enable/disable */ | ||
118 | spinlock_t lock; /* register lock */ | 117 | spinlock_t lock; /* register lock */ |
119 | bool enabled; | 118 | bool enabled; |
120 | int mode; | 119 | int mode; |
@@ -146,10 +145,8 @@ __releases(&tve->lock) | |||
146 | 145 | ||
147 | static void tve_enable(struct imx_tve *tve) | 146 | static void tve_enable(struct imx_tve *tve) |
148 | { | 147 | { |
149 | unsigned long flags; | ||
150 | int ret; | 148 | int ret; |
151 | 149 | ||
152 | spin_lock_irqsave(&tve->enable_lock, flags); | ||
153 | if (!tve->enabled) { | 150 | if (!tve->enabled) { |
154 | tve->enabled = true; | 151 | tve->enabled = true; |
155 | clk_prepare_enable(tve->clk); | 152 | clk_prepare_enable(tve->clk); |
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve) | |||
169 | TVE_CD_SM_IEN | | 166 | TVE_CD_SM_IEN | |
170 | TVE_CD_LM_IEN | | 167 | TVE_CD_LM_IEN | |
171 | TVE_CD_MON_END_IEN); | 168 | TVE_CD_MON_END_IEN); |
172 | |||
173 | spin_unlock_irqrestore(&tve->enable_lock, flags); | ||
174 | } | 169 | } |
175 | 170 | ||
176 | static void tve_disable(struct imx_tve *tve) | 171 | static void tve_disable(struct imx_tve *tve) |
177 | { | 172 | { |
178 | unsigned long flags; | ||
179 | int ret; | 173 | int ret; |
180 | 174 | ||
181 | spin_lock_irqsave(&tve->enable_lock, flags); | ||
182 | if (tve->enabled) { | 175 | if (tve->enabled) { |
183 | tve->enabled = false; | 176 | tve->enabled = false; |
184 | ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, | 177 | ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, |
185 | TVE_IPU_CLK_EN | TVE_EN, 0); | 178 | TVE_IPU_CLK_EN | TVE_EN, 0); |
186 | clk_disable_unprepare(tve->clk); | 179 | clk_disable_unprepare(tve->clk); |
187 | } | 180 | } |
188 | spin_unlock_irqrestore(&tve->enable_lock, flags); | ||
189 | } | 181 | } |
190 | 182 | ||
191 | static int tve_setup_tvout(struct imx_tve *tve) | 183 | static int tve_setup_tvout(struct imx_tve *tve) |
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev) | |||
601 | 593 | ||
602 | tve->dev = &pdev->dev; | 594 | tve->dev = &pdev->dev; |
603 | spin_lock_init(&tve->lock); | 595 | spin_lock_init(&tve->lock); |
604 | spin_lock_init(&tve->enable_lock); | ||
605 | 596 | ||
606 | ddc_node = of_parse_phandle(np, "ddc", 0); | 597 | ddc_node = of_parse_phandle(np, "ddc", 0); |
607 | if (ddc_node) { | 598 | if (ddc_node) { |
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c index 7a22ce619ed2..97ca6924dbb3 100644 --- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c +++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c | |||
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = { | |||
996 | }, | 996 | }, |
997 | }; | 997 | }; |
998 | 998 | ||
999 | static DEFINE_MUTEX(ipu_client_id_mutex); | ||
999 | static int ipu_client_id; | 1000 | static int ipu_client_id; |
1000 | 1001 | ||
1001 | static int ipu_add_subdevice_pdata(struct device *dev, | ||
1002 | const struct ipu_platform_reg *reg) | ||
1003 | { | ||
1004 | struct platform_device *pdev; | ||
1005 | |||
1006 | pdev = platform_device_register_data(dev, reg->name, ipu_client_id++, | ||
1007 | ®->pdata, sizeof(struct ipu_platform_reg)); | ||
1008 | |||
1009 | return PTR_ERR_OR_ZERO(pdev); | ||
1010 | } | ||
1011 | |||
1012 | static int ipu_add_client_devices(struct ipu_soc *ipu) | 1002 | static int ipu_add_client_devices(struct ipu_soc *ipu) |
1013 | { | 1003 | { |
1014 | int ret; | 1004 | struct device *dev = ipu->dev; |
1015 | int i; | 1005 | unsigned i; |
1006 | int id, ret; | ||
1007 | |||
1008 | mutex_lock(&ipu_client_id_mutex); | ||
1009 | id = ipu_client_id; | ||
1010 | ipu_client_id += ARRAY_SIZE(client_reg); | ||
1011 | mutex_unlock(&ipu_client_id_mutex); | ||
1016 | 1012 | ||
1017 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { | 1013 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { |
1018 | const struct ipu_platform_reg *reg = &client_reg[i]; | 1014 | const struct ipu_platform_reg *reg = &client_reg[i]; |
1019 | ret = ipu_add_subdevice_pdata(ipu->dev, reg); | 1015 | struct platform_device *pdev; |
1020 | if (ret) | 1016 | |
1017 | pdev = platform_device_register_data(dev, reg->name, | ||
1018 | id++, ®->pdata, sizeof(reg->pdata)); | ||
1019 | |||
1020 | if (IS_ERR(pdev)) | ||
1021 | goto err_register; | 1021 | goto err_register; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | return 0; | 1024 | return 0; |
1025 | 1025 | ||
1026 | err_register: | 1026 | err_register: |
1027 | platform_device_unregister_children(to_platform_device(ipu->dev)); | 1027 | platform_device_unregister_children(to_platform_device(dev)); |
1028 | 1028 | ||
1029 | return ret; | 1029 | return ret; |
1030 | } | 1030 | } |
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 235d2b1ec593..eedffed17e39 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c | |||
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, | |||
306 | return NETDEV_TX_OK; | 306 | return NETDEV_TX_OK; |
307 | } | 307 | } |
308 | 308 | ||
309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) | 309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, |
310 | void *accel_priv) | ||
310 | { | 311 | { |
311 | return (u16)smp_processor_id(); | 312 | return (u16)smp_processor_id(); |
312 | } | 313 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 17659bb04bef..dd69e344e409 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c | |||
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) | |||
652 | return dscp >> 5; | 652 | return dscp >> 5; |
653 | } | 653 | } |
654 | 654 | ||
655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) | 655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, |
656 | void *accel_priv) | ||
656 | { | 657 | { |
657 | struct adapter *padapter = rtw_netdev_priv(dev); | 658 | struct adapter *padapter = rtw_netdev_priv(dev); |
658 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; | 659 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; |
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 1aa4a3fd0f1b..56e355b3e7fa 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c | |||
@@ -258,7 +258,8 @@ err: | |||
258 | /* This function maps kernel space memory to user space memory. */ | 258 | /* This function maps kernel space memory to user space memory. */ |
259 | static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) | 259 | static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) |
260 | { | 260 | { |
261 | u32 status; | 261 | struct omap_dsp_platform_data *pdata = |
262 | omap_dspbridge_dev->dev.platform_data; | ||
262 | 263 | ||
263 | /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ | 264 | /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ |
264 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 265 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
@@ -268,13 +269,9 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) | |||
268 | vma->vm_start, vma->vm_end, vma->vm_page_prot, | 269 | vma->vm_start, vma->vm_end, vma->vm_page_prot, |
269 | vma->vm_flags); | 270 | vma->vm_flags); |
270 | 271 | ||
271 | status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 272 | return vm_iomap_memory(vma, |
272 | vma->vm_end - vma->vm_start, | 273 | pdata->phys_mempool_base, |
273 | vma->vm_page_prot); | 274 | pdata->phys_mempool_size); |
274 | if (status != 0) | ||
275 | status = -EAGAIN; | ||
276 | |||
277 | return status; | ||
278 | } | 275 | } |
279 | 276 | ||
280 | static const struct file_operations bridge_fops = { | 277 | static const struct file_operations bridge_fops = { |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d70e9119e906..00867190413c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np) | |||
465 | */ | 465 | */ |
466 | send_sig(SIGINT, np->np_thread, 1); | 466 | send_sig(SIGINT, np->np_thread, 1); |
467 | kthread_stop(np->np_thread); | 467 | kthread_stop(np->np_thread); |
468 | np->np_thread = NULL; | ||
468 | } | 469 | } |
469 | 470 | ||
470 | np->np_transport->iscsit_free_np(np); | 471 | np->np_transport->iscsit_free_np(np); |
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
823 | if (((hdr->flags & ISCSI_FLAG_CMD_READ) || | 824 | if (((hdr->flags & ISCSI_FLAG_CMD_READ) || |
824 | (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { | 825 | (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { |
825 | /* | 826 | /* |
826 | * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) | 827 | * From RFC-3720 Section 10.3.1: |
827 | * that adds support for RESERVE/RELEASE. There is a bug | 828 | * |
828 | * add with this new functionality that sets R/W bits when | 829 | * "Either or both of R and W MAY be 1 when either the |
829 | * neither CDB carries any READ or WRITE datapayloads. | 830 | * Expected Data Transfer Length and/or Bidirectional Read |
831 | * Expected Data Transfer Length are 0" | ||
832 | * | ||
833 | * For this case, go ahead and clear the unnecssary bits | ||
834 | * to avoid any confusion with ->data_direction. | ||
830 | */ | 835 | */ |
831 | if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { | 836 | hdr->flags &= ~ISCSI_FLAG_CMD_READ; |
832 | hdr->flags &= ~ISCSI_FLAG_CMD_READ; | 837 | hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; |
833 | hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; | ||
834 | goto done; | ||
835 | } | ||
836 | 838 | ||
837 | pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" | 839 | pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" |
838 | " set when Expected Data Transfer Length is 0 for" | 840 | " set when Expected Data Transfer Length is 0 for" |
839 | " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); | 841 | " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); |
840 | return iscsit_add_reject_cmd(cmd, | ||
841 | ISCSI_REASON_BOOKMARK_INVALID, buf); | ||
842 | } | 842 | } |
843 | done: | ||
844 | 843 | ||
845 | if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && | 844 | if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && |
846 | !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { | 845 | !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index e3318edb233d..1c0088fe9e99 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \ | |||
474 | \ | 474 | \ |
475 | if (!capable(CAP_SYS_ADMIN)) \ | 475 | if (!capable(CAP_SYS_ADMIN)) \ |
476 | return -EPERM; \ | 476 | return -EPERM; \ |
477 | \ | 477 | if (count >= sizeof(auth->name)) \ |
478 | return -EINVAL; \ | ||
478 | snprintf(auth->name, sizeof(auth->name), "%s", page); \ | 479 | snprintf(auth->name, sizeof(auth->name), "%s", page); \ |
479 | if (!strncmp("NULL", auth->name, 4)) \ | 480 | if (!strncmp("NULL", auth->name, 4)) \ |
480 | auth->naf_flags &= ~flags; \ | 481 | auth->naf_flags &= ~flags; \ |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 4eb93b2b6473..e29279e6b577 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -1403,11 +1403,6 @@ old_sess_out: | |||
1403 | 1403 | ||
1404 | out: | 1404 | out: |
1405 | stop = kthread_should_stop(); | 1405 | stop = kthread_should_stop(); |
1406 | if (!stop && signal_pending(current)) { | ||
1407 | spin_lock_bh(&np->np_thread_lock); | ||
1408 | stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); | ||
1409 | spin_unlock_bh(&np->np_thread_lock); | ||
1410 | } | ||
1411 | /* Wait for another socket.. */ | 1406 | /* Wait for another socket.. */ |
1412 | if (!stop) | 1407 | if (!stop) |
1413 | return 1; | 1408 | return 1; |
@@ -1415,7 +1410,6 @@ exit: | |||
1415 | iscsi_stop_login_thread_timer(np); | 1410 | iscsi_stop_login_thread_timer(np); |
1416 | spin_lock_bh(&np->np_thread_lock); | 1411 | spin_lock_bh(&np->np_thread_lock); |
1417 | np->np_thread_state = ISCSI_NP_THREAD_EXIT; | 1412 | np->np_thread_state = ISCSI_NP_THREAD_EXIT; |
1418 | np->np_thread = NULL; | ||
1419 | spin_unlock_bh(&np->np_thread_lock); | 1413 | spin_unlock_bh(&np->np_thread_lock); |
1420 | 1414 | ||
1421 | return 0; | 1415 | return 0; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 207b340498a3..d06de84b069b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
1106 | dev->dev_attrib.block_size = block_size; | 1106 | dev->dev_attrib.block_size = block_size; |
1107 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", | 1107 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", |
1108 | dev, block_size); | 1108 | dev, block_size); |
1109 | |||
1110 | if (dev->dev_attrib.max_bytes_per_io) | ||
1111 | dev->dev_attrib.hw_max_sectors = | ||
1112 | dev->dev_attrib.max_bytes_per_io / block_size; | ||
1113 | |||
1109 | return 0; | 1114 | return 0; |
1110 | } | 1115 | } |
1111 | 1116 | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0e34cda3271e..78241a53b555 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) | |||
66 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" | 66 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
67 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, | 67 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
68 | TARGET_CORE_MOD_VERSION); | 68 | TARGET_CORE_MOD_VERSION); |
69 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" | 69 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", |
70 | " MaxSectors: %u\n", | 70 | hba->hba_id, fd_host->fd_host_id); |
71 | hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); | ||
72 | 71 | ||
73 | return 0; | 72 | return 0; |
74 | } | 73 | } |
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev) | |||
220 | } | 219 | } |
221 | 220 | ||
222 | dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; | 221 | dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; |
223 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; | 222 | dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; |
223 | dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; | ||
224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; | 224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
225 | 225 | ||
226 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { | 226 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5bd2399..d7772c167685 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -7,7 +7,10 @@ | |||
7 | #define FD_DEVICE_QUEUE_DEPTH 32 | 7 | #define FD_DEVICE_QUEUE_DEPTH 32 |
8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 | 8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 |
9 | #define FD_BLOCKSIZE 512 | 9 | #define FD_BLOCKSIZE 512 |
10 | #define FD_MAX_SECTORS 2048 | 10 | /* |
11 | * Limited by the number of iovecs (2048) per vfs_[writev,readv] call | ||
12 | */ | ||
13 | #define FD_MAX_BYTES 8388608 | ||
11 | 14 | ||
12 | #define RRF_EMULATE_CDB 0x01 | 15 | #define RRF_EMULATE_CDB 0x01 |
13 | #define RRF_GOT_LBA 0x02 | 16 | #define RRF_GOT_LBA 0x02 |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f697f8baec54..2a573de19a9f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
279 | acl->se_tpg = tpg; | 279 | acl->se_tpg = tpg; |
280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
281 | spin_lock_init(&acl->stats_lock); | ||
282 | acl->dynamic_node_acl = 1; | 281 | acl->dynamic_node_acl = 1; |
283 | 282 | ||
284 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | 283 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
406 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 405 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
407 | acl->se_tpg = tpg; | 406 | acl->se_tpg = tpg; |
408 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 407 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
409 | spin_lock_init(&acl->stats_lock); | ||
410 | 408 | ||
411 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | 409 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
412 | 410 | ||
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |||
658 | spin_lock_init(&lun->lun_sep_lock); | 656 | spin_lock_init(&lun->lun_sep_lock); |
659 | init_completion(&lun->lun_ref_comp); | 657 | init_completion(&lun->lun_ref_comp); |
660 | 658 | ||
661 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); | ||
662 | if (ret < 0) | ||
663 | return ret; | ||
664 | |||
665 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); | 659 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); |
666 | if (ret < 0) { | 660 | if (ret < 0) |
667 | percpu_ref_cancel_init(&lun->lun_ref); | ||
668 | return ret; | 661 | return ret; |
669 | } | ||
670 | 662 | ||
671 | return 0; | 663 | return 0; |
672 | } | 664 | } |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 0f74945af624..34aacaaae14a 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -93,6 +93,7 @@ struct n_tty_data { | |||
93 | size_t canon_head; | 93 | size_t canon_head; |
94 | size_t echo_head; | 94 | size_t echo_head; |
95 | size_t echo_commit; | 95 | size_t echo_commit; |
96 | size_t echo_mark; | ||
96 | DECLARE_BITMAP(char_map, 256); | 97 | DECLARE_BITMAP(char_map, 256); |
97 | 98 | ||
98 | /* private to n_tty_receive_overrun (single-threaded) */ | 99 | /* private to n_tty_receive_overrun (single-threaded) */ |
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata) | |||
336 | { | 337 | { |
337 | ldata->read_head = ldata->canon_head = ldata->read_tail = 0; | 338 | ldata->read_head = ldata->canon_head = ldata->read_tail = 0; |
338 | ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; | 339 | ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; |
340 | ldata->echo_mark = 0; | ||
339 | ldata->line_start = 0; | 341 | ldata->line_start = 0; |
340 | 342 | ||
341 | ldata->erasing = 0; | 343 | ldata->erasing = 0; |
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty) | |||
787 | size_t head; | 789 | size_t head; |
788 | 790 | ||
789 | head = ldata->echo_head; | 791 | head = ldata->echo_head; |
792 | ldata->echo_mark = head; | ||
790 | old = ldata->echo_commit - ldata->echo_tail; | 793 | old = ldata->echo_commit - ldata->echo_tail; |
791 | 794 | ||
792 | /* Process committed echoes if the accumulated # of bytes | 795 | /* Process committed echoes if the accumulated # of bytes |
@@ -810,10 +813,12 @@ static void process_echoes(struct tty_struct *tty) | |||
810 | struct n_tty_data *ldata = tty->disc_data; | 813 | struct n_tty_data *ldata = tty->disc_data; |
811 | size_t echoed; | 814 | size_t echoed; |
812 | 815 | ||
813 | if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail) | 816 | if ((!L_ECHO(tty) && !L_ECHONL(tty)) || |
817 | ldata->echo_mark == ldata->echo_tail) | ||
814 | return; | 818 | return; |
815 | 819 | ||
816 | mutex_lock(&ldata->output_lock); | 820 | mutex_lock(&ldata->output_lock); |
821 | ldata->echo_commit = ldata->echo_mark; | ||
817 | echoed = __process_echoes(tty); | 822 | echoed = __process_echoes(tty); |
818 | mutex_unlock(&ldata->output_lock); | 823 | mutex_unlock(&ldata->output_lock); |
819 | 824 | ||
@@ -821,11 +826,13 @@ static void process_echoes(struct tty_struct *tty) | |||
821 | tty->ops->flush_chars(tty); | 826 | tty->ops->flush_chars(tty); |
822 | } | 827 | } |
823 | 828 | ||
829 | /* NB: echo_mark and echo_head should be equivalent here */ | ||
824 | static void flush_echoes(struct tty_struct *tty) | 830 | static void flush_echoes(struct tty_struct *tty) |
825 | { | 831 | { |
826 | struct n_tty_data *ldata = tty->disc_data; | 832 | struct n_tty_data *ldata = tty->disc_data; |
827 | 833 | ||
828 | if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head) | 834 | if ((!L_ECHO(tty) && !L_ECHONL(tty)) || |
835 | ldata->echo_commit == ldata->echo_head) | ||
829 | return; | 836 | return; |
830 | 837 | ||
831 | mutex_lock(&ldata->output_lock); | 838 | mutex_lock(&ldata->output_lock); |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 4658e3e0ec42..06525f10e364 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) | |||
96 | if (offset == UART_LCR) { | 96 | if (offset == UART_LCR) { |
97 | int tries = 1000; | 97 | int tries = 1000; |
98 | while (tries--) { | 98 | while (tries--) { |
99 | if (value == p->serial_in(p, UART_LCR)) | 99 | unsigned int lcr = p->serial_in(p, UART_LCR); |
100 | if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) | ||
100 | return; | 101 | return; |
101 | dw8250_force_idle(p); | 102 | dw8250_force_idle(p); |
102 | writeb(value, p->membase + (UART_LCR << p->regshift)); | 103 | writeb(value, p->membase + (UART_LCR << p->regshift)); |
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) | |||
132 | if (offset == UART_LCR) { | 133 | if (offset == UART_LCR) { |
133 | int tries = 1000; | 134 | int tries = 1000; |
134 | while (tries--) { | 135 | while (tries--) { |
135 | if (value == p->serial_in(p, UART_LCR)) | 136 | unsigned int lcr = p->serial_in(p, UART_LCR); |
137 | if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) | ||
136 | return; | 138 | return; |
137 | dw8250_force_idle(p); | 139 | dw8250_force_idle(p); |
138 | writel(value, p->membase + (UART_LCR << p->regshift)); | 140 | writel(value, p->membase + (UART_LCR << p->regshift)); |
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); | |||
455 | static const struct acpi_device_id dw8250_acpi_match[] = { | 457 | static const struct acpi_device_id dw8250_acpi_match[] = { |
456 | { "INT33C4", 0 }, | 458 | { "INT33C4", 0 }, |
457 | { "INT33C5", 0 }, | 459 | { "INT33C5", 0 }, |
460 | { "INT3434", 0 }, | ||
461 | { "INT3435", 0 }, | ||
458 | { "80860F0A", 0 }, | 462 | { "80860F0A", 0 }, |
459 | { }, | 463 | { }, |
460 | }; | 464 | }; |
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index e46e9f3f19b9..f619ad5b5eae 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c | |||
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) | |||
240 | continue; | 240 | continue; |
241 | } | 241 | } |
242 | 242 | ||
243 | #ifdef SUPPORT_SYSRQ | ||
243 | /* | 244 | /* |
244 | * uart_handle_sysrq_char() doesn't work if | 245 | * uart_handle_sysrq_char() doesn't work if |
245 | * spinlocked, for some reason | 246 | * spinlocked, for some reason |
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) | |||
253 | } | 254 | } |
254 | spin_lock(&port->lock); | 255 | spin_lock(&port->lock); |
255 | } | 256 | } |
257 | #endif | ||
256 | 258 | ||
257 | port->icount.rx++; | 259 | port->icount.rx++; |
258 | 260 | ||
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 22fad8ad5ac2..d8a55e87877f 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c | |||
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) | |||
86 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); | 86 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); |
87 | } | 87 | } |
88 | 88 | ||
89 | /* | ||
90 | * ldsem_cmpxchg() updates @*old with the last-known sem->count value. | ||
91 | * Returns 1 if count was successfully changed; @*old will have @new value. | ||
92 | * Returns 0 if count was not changed; @*old will have most recent sem->count | ||
93 | */ | ||
89 | static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) | 94 | static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) |
90 | { | 95 | { |
91 | long tmp = *old; | 96 | long tmp = atomic_long_cmpxchg(&sem->count, *old, new); |
92 | *old = atomic_long_cmpxchg(&sem->count, *old, new); | 97 | if (tmp == *old) { |
93 | return *old == tmp; | 98 | *old = new; |
99 | return 1; | ||
100 | } else { | ||
101 | *old = tmp; | ||
102 | return 0; | ||
103 | } | ||
94 | } | 104 | } |
95 | 105 | ||
96 | /* | 106 | /* |
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 5d8981c5235e..6e73f8cd60e5 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c | |||
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev) | |||
642 | : CI_ROLE_GADGET; | 642 | : CI_ROLE_GADGET; |
643 | } | 643 | } |
644 | 644 | ||
645 | /* only update vbus status for peripheral */ | ||
646 | if (ci->role == CI_ROLE_GADGET) | ||
647 | ci_handle_vbus_change(ci); | ||
648 | |||
645 | ret = ci_role_start(ci, ci->role); | 649 | ret = ci_role_start(ci, ci->role); |
646 | if (ret) { | 650 | if (ret) { |
647 | dev_err(dev, "can't start %s role\n", ci_role(ci)->name); | 651 | dev_err(dev, "can't start %s role\n", ci_role(ci)->name); |
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 59e6020ea753..526cd77563d8 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c | |||
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci) | |||
88 | return ret; | 88 | return ret; |
89 | 89 | ||
90 | disable_reg: | 90 | disable_reg: |
91 | regulator_disable(ci->platdata->reg_vbus); | 91 | if (ci->platdata->reg_vbus) |
92 | regulator_disable(ci->platdata->reg_vbus); | ||
92 | 93 | ||
93 | put_hcd: | 94 | put_hcd: |
94 | usb_put_hcd(hcd); | 95 | usb_put_hcd(hcd); |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index b34c81969cba..69d20fbb38a2 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci) | |||
1795 | pm_runtime_no_callbacks(&ci->gadget.dev); | 1795 | pm_runtime_no_callbacks(&ci->gadget.dev); |
1796 | pm_runtime_enable(&ci->gadget.dev); | 1796 | pm_runtime_enable(&ci->gadget.dev); |
1797 | 1797 | ||
1798 | /* Update ci->vbus_active */ | ||
1799 | ci_handle_vbus_change(ci); | ||
1800 | |||
1801 | return retval; | 1798 | return retval; |
1802 | 1799 | ||
1803 | destroy_eps: | 1800 | destroy_eps: |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3e7560f004f8..e8404319ca68 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1515,6 +1515,8 @@ static int acm_reset_resume(struct usb_interface *intf) | |||
1515 | 1515 | ||
1516 | static const struct usb_device_id acm_ids[] = { | 1516 | static const struct usb_device_id acm_ids[] = { |
1517 | /* quirky and broken devices */ | 1517 | /* quirky and broken devices */ |
1518 | { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ | ||
1519 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ | ||
1518 | { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ | 1520 | { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ |
1519 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | 1521 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ |
1520 | }, | 1522 | }, |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 4d387596f3f0..0b23a8639311 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on) | |||
854 | { | 854 | { |
855 | /* need autopm_get/put here to ensure the usbcore sees the new value */ | 855 | /* need autopm_get/put here to ensure the usbcore sees the new value */ |
856 | int rv = usb_autopm_get_interface(intf); | 856 | int rv = usb_autopm_get_interface(intf); |
857 | if (rv < 0) | ||
858 | goto err; | ||
859 | 857 | ||
860 | intf->needs_remote_wakeup = on; | 858 | intf->needs_remote_wakeup = on; |
861 | usb_autopm_put_interface(intf); | 859 | if (!rv) |
862 | err: | 860 | usb_autopm_put_interface(intf); |
863 | return rv; | 861 | return 0; |
864 | } | 862 | } |
865 | 863 | ||
866 | static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) | 864 | static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a7c04e24ca48..bd9dc3504b51 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -4832,8 +4832,9 @@ static void hub_events(void) | |||
4832 | hub->ports[i - 1]->child; | 4832 | hub->ports[i - 1]->child; |
4833 | 4833 | ||
4834 | dev_dbg(hub_dev, "warm reset port %d\n", i); | 4834 | dev_dbg(hub_dev, "warm reset port %d\n", i); |
4835 | if (!udev || !(portstatus & | 4835 | if (!udev || |
4836 | USB_PORT_STAT_CONNECTION)) { | 4836 | !(portstatus & USB_PORT_STAT_CONNECTION) || |
4837 | udev->state == USB_STATE_NOTATTACHED) { | ||
4837 | status = hub_port_reset(hub, i, | 4838 | status = hub_port_reset(hub, i, |
4838 | NULL, HUB_BH_RESET_TIME, | 4839 | NULL, HUB_BH_RESET_TIME, |
4839 | true); | 4840 | true); |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 74f9cf02da07..a49217ae3533 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev) | |||
455 | if (IS_ERR(regs)) | 455 | if (IS_ERR(regs)) |
456 | return PTR_ERR(regs); | 456 | return PTR_ERR(regs); |
457 | 457 | ||
458 | usb_phy_set_suspend(dwc->usb2_phy, 0); | ||
459 | usb_phy_set_suspend(dwc->usb3_phy, 0); | ||
460 | |||
461 | spin_lock_init(&dwc->lock); | 458 | spin_lock_init(&dwc->lock); |
462 | platform_set_drvdata(pdev, dwc); | 459 | platform_set_drvdata(pdev, dwc); |
463 | 460 | ||
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev) | |||
488 | goto err0; | 485 | goto err0; |
489 | } | 486 | } |
490 | 487 | ||
488 | usb_phy_set_suspend(dwc->usb2_phy, 0); | ||
489 | usb_phy_set_suspend(dwc->usb3_phy, 0); | ||
490 | |||
491 | ret = dwc3_event_buffers_setup(dwc); | 491 | ret = dwc3_event_buffers_setup(dwc); |
492 | if (ret) { | 492 | if (ret) { |
493 | dev_err(dwc->dev, "failed to setup event buffers\n"); | 493 | dev_err(dwc->dev, "failed to setup event buffers\n"); |
@@ -569,6 +569,8 @@ err2: | |||
569 | dwc3_event_buffers_cleanup(dwc); | 569 | dwc3_event_buffers_cleanup(dwc); |
570 | 570 | ||
571 | err1: | 571 | err1: |
572 | usb_phy_set_suspend(dwc->usb2_phy, 1); | ||
573 | usb_phy_set_suspend(dwc->usb3_phy, 1); | ||
572 | dwc3_core_exit(dwc); | 574 | dwc3_core_exit(dwc); |
573 | 575 | ||
574 | err0: | 576 | err0: |
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 95f7649c71a7..21a352079bc2 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c | |||
@@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, | |||
459 | dep = dwc3_wIndex_to_dep(dwc, wIndex); | 459 | dep = dwc3_wIndex_to_dep(dwc, wIndex); |
460 | if (!dep) | 460 | if (!dep) |
461 | return -EINVAL; | 461 | return -EINVAL; |
462 | if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) | ||
463 | break; | ||
462 | ret = __dwc3_gadget_ep_set_halt(dep, set); | 464 | ret = __dwc3_gadget_ep_set_halt(dep, set); |
463 | if (ret) | 465 | if (ret) |
464 | return -EINVAL; | 466 | return -EINVAL; |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 5452c0fce360..02e44fcaf205 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) | |||
1200 | else | 1200 | else |
1201 | dep->flags |= DWC3_EP_STALL; | 1201 | dep->flags |= DWC3_EP_STALL; |
1202 | } else { | 1202 | } else { |
1203 | if (dep->flags & DWC3_EP_WEDGE) | ||
1204 | return 0; | ||
1205 | |||
1206 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | 1203 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, |
1207 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | 1204 | DWC3_DEPCMD_CLEARSTALL, ¶ms); |
1208 | if (ret) | 1205 | if (ret) |
@@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) | |||
1210 | value ? "set" : "clear", | 1207 | value ? "set" : "clear", |
1211 | dep->name); | 1208 | dep->name); |
1212 | else | 1209 | else |
1213 | dep->flags &= ~DWC3_EP_STALL; | 1210 | dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); |
1214 | } | 1211 | } |
1215 | 1212 | ||
1216 | return ret; | 1213 | return ret; |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index a91e6422f930..f66d96ad1f51 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -682,6 +682,7 @@ config USB_CONFIGFS_PHONET | |||
682 | config USB_CONFIGFS_MASS_STORAGE | 682 | config USB_CONFIGFS_MASS_STORAGE |
683 | boolean "Mass storage" | 683 | boolean "Mass storage" |
684 | depends on USB_CONFIGFS | 684 | depends on USB_CONFIGFS |
685 | depends on BLOCK | ||
685 | select USB_F_MASS_STORAGE | 686 | select USB_F_MASS_STORAGE |
686 | help | 687 | help |
687 | The Mass Storage Gadget acts as a USB Mass Storage disk drive. | 688 | The Mass Storage Gadget acts as a USB Mass Storage disk drive. |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 3e7ae707f691..2018ba1a2172 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev) | |||
593 | bitmap_zero(f->endpoints, 32); | 593 | bitmap_zero(f->endpoints, 32); |
594 | } | 594 | } |
595 | cdev->config = NULL; | 595 | cdev->config = NULL; |
596 | cdev->delayed_status = 0; | ||
596 | } | 597 | } |
597 | 598 | ||
598 | static int set_config(struct usb_composite_dev *cdev, | 599 | static int set_config(struct usb_composite_dev *cdev, |
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 774e8b89cdb5..241fc873ffa4 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c | |||
@@ -1304,7 +1304,7 @@ static struct ffs_data *ffs_data_new(void) | |||
1304 | { | 1304 | { |
1305 | struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); | 1305 | struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); |
1306 | if (unlikely(!ffs)) | 1306 | if (unlikely(!ffs)) |
1307 | return 0; | 1307 | return NULL; |
1308 | 1308 | ||
1309 | ENTER(); | 1309 | ENTER(); |
1310 | 1310 | ||
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index a03ba2c83589..b96393908860 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c | |||
@@ -523,7 +523,7 @@ static int fsg_setup(struct usb_function *f, | |||
523 | */ | 523 | */ |
524 | DBG(fsg, "bulk reset request\n"); | 524 | DBG(fsg, "bulk reset request\n"); |
525 | raise_exception(fsg->common, FSG_STATE_RESET); | 525 | raise_exception(fsg->common, FSG_STATE_RESET); |
526 | return DELAYED_STATUS; | 526 | return USB_GADGET_DELAYED_STATUS; |
527 | 527 | ||
528 | case US_BULK_GET_MAX_LUN: | 528 | case US_BULK_GET_MAX_LUN: |
529 | if (ctrl->bRequestType != | 529 | if (ctrl->bRequestType != |
@@ -602,13 +602,14 @@ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) | |||
602 | return true; | 602 | return true; |
603 | } | 603 | } |
604 | 604 | ||
605 | static int sleep_thread(struct fsg_common *common) | 605 | static int sleep_thread(struct fsg_common *common, bool can_freeze) |
606 | { | 606 | { |
607 | int rc = 0; | 607 | int rc = 0; |
608 | 608 | ||
609 | /* Wait until a signal arrives or we are woken up */ | 609 | /* Wait until a signal arrives or we are woken up */ |
610 | for (;;) { | 610 | for (;;) { |
611 | try_to_freeze(); | 611 | if (can_freeze) |
612 | try_to_freeze(); | ||
612 | set_current_state(TASK_INTERRUPTIBLE); | 613 | set_current_state(TASK_INTERRUPTIBLE); |
613 | if (signal_pending(current)) { | 614 | if (signal_pending(current)) { |
614 | rc = -EINTR; | 615 | rc = -EINTR; |
@@ -682,7 +683,7 @@ static int do_read(struct fsg_common *common) | |||
682 | /* Wait for the next buffer to become available */ | 683 | /* Wait for the next buffer to become available */ |
683 | bh = common->next_buffhd_to_fill; | 684 | bh = common->next_buffhd_to_fill; |
684 | while (bh->state != BUF_STATE_EMPTY) { | 685 | while (bh->state != BUF_STATE_EMPTY) { |
685 | rc = sleep_thread(common); | 686 | rc = sleep_thread(common, false); |
686 | if (rc) | 687 | if (rc) |
687 | return rc; | 688 | return rc; |
688 | } | 689 | } |
@@ -937,7 +938,7 @@ static int do_write(struct fsg_common *common) | |||
937 | } | 938 | } |
938 | 939 | ||
939 | /* Wait for something to happen */ | 940 | /* Wait for something to happen */ |
940 | rc = sleep_thread(common); | 941 | rc = sleep_thread(common, false); |
941 | if (rc) | 942 | if (rc) |
942 | return rc; | 943 | return rc; |
943 | } | 944 | } |
@@ -1504,7 +1505,7 @@ static int throw_away_data(struct fsg_common *common) | |||
1504 | } | 1505 | } |
1505 | 1506 | ||
1506 | /* Otherwise wait for something to happen */ | 1507 | /* Otherwise wait for something to happen */ |
1507 | rc = sleep_thread(common); | 1508 | rc = sleep_thread(common, true); |
1508 | if (rc) | 1509 | if (rc) |
1509 | return rc; | 1510 | return rc; |
1510 | } | 1511 | } |
@@ -1625,7 +1626,7 @@ static int send_status(struct fsg_common *common) | |||
1625 | /* Wait for the next buffer to become available */ | 1626 | /* Wait for the next buffer to become available */ |
1626 | bh = common->next_buffhd_to_fill; | 1627 | bh = common->next_buffhd_to_fill; |
1627 | while (bh->state != BUF_STATE_EMPTY) { | 1628 | while (bh->state != BUF_STATE_EMPTY) { |
1628 | rc = sleep_thread(common); | 1629 | rc = sleep_thread(common, true); |
1629 | if (rc) | 1630 | if (rc) |
1630 | return rc; | 1631 | return rc; |
1631 | } | 1632 | } |
@@ -1828,7 +1829,7 @@ static int do_scsi_command(struct fsg_common *common) | |||
1828 | bh = common->next_buffhd_to_fill; | 1829 | bh = common->next_buffhd_to_fill; |
1829 | common->next_buffhd_to_drain = bh; | 1830 | common->next_buffhd_to_drain = bh; |
1830 | while (bh->state != BUF_STATE_EMPTY) { | 1831 | while (bh->state != BUF_STATE_EMPTY) { |
1831 | rc = sleep_thread(common); | 1832 | rc = sleep_thread(common, true); |
1832 | if (rc) | 1833 | if (rc) |
1833 | return rc; | 1834 | return rc; |
1834 | } | 1835 | } |
@@ -2174,7 +2175,7 @@ static int get_next_command(struct fsg_common *common) | |||
2174 | /* Wait for the next buffer to become available */ | 2175 | /* Wait for the next buffer to become available */ |
2175 | bh = common->next_buffhd_to_fill; | 2176 | bh = common->next_buffhd_to_fill; |
2176 | while (bh->state != BUF_STATE_EMPTY) { | 2177 | while (bh->state != BUF_STATE_EMPTY) { |
2177 | rc = sleep_thread(common); | 2178 | rc = sleep_thread(common, true); |
2178 | if (rc) | 2179 | if (rc) |
2179 | return rc; | 2180 | return rc; |
2180 | } | 2181 | } |
@@ -2193,7 +2194,7 @@ static int get_next_command(struct fsg_common *common) | |||
2193 | 2194 | ||
2194 | /* Wait for the CBW to arrive */ | 2195 | /* Wait for the CBW to arrive */ |
2195 | while (bh->state != BUF_STATE_FULL) { | 2196 | while (bh->state != BUF_STATE_FULL) { |
2196 | rc = sleep_thread(common); | 2197 | rc = sleep_thread(common, true); |
2197 | if (rc) | 2198 | if (rc) |
2198 | return rc; | 2199 | return rc; |
2199 | } | 2200 | } |
@@ -2379,7 +2380,7 @@ static void handle_exception(struct fsg_common *common) | |||
2379 | } | 2380 | } |
2380 | if (num_active == 0) | 2381 | if (num_active == 0) |
2381 | break; | 2382 | break; |
2382 | if (sleep_thread(common)) | 2383 | if (sleep_thread(common, true)) |
2383 | return; | 2384 | return; |
2384 | } | 2385 | } |
2385 | 2386 | ||
@@ -2516,7 +2517,7 @@ static int fsg_main_thread(void *common_) | |||
2516 | } | 2517 | } |
2517 | 2518 | ||
2518 | if (!common->running) { | 2519 | if (!common->running) { |
2519 | sleep_thread(common); | 2520 | sleep_thread(common, true); |
2520 | continue; | 2521 | continue; |
2521 | } | 2522 | } |
2522 | 2523 | ||
@@ -3111,7 +3112,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) | |||
3111 | fsg->common->can_stall); | 3112 | fsg->common->can_stall); |
3112 | if (ret) | 3113 | if (ret) |
3113 | return ret; | 3114 | return ret; |
3114 | fsg_common_set_inquiry_string(fsg->common, 0, 0); | 3115 | fsg_common_set_inquiry_string(fsg->common, NULL, NULL); |
3115 | ret = fsg_common_run_thread(fsg->common); | 3116 | ret = fsg_common_run_thread(fsg->common); |
3116 | if (ret) | 3117 | if (ret) |
3117 | return ret; | 3118 | return ret; |
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index 0ac6064aa3b8..409a3c45a36a 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c | |||
@@ -54,6 +54,7 @@ | |||
54 | */ | 54 | */ |
55 | #ifdef CONFIG_ARCH_PXA | 55 | #ifdef CONFIG_ARCH_PXA |
56 | #include <mach/pxa25x-udc.h> | 56 | #include <mach/pxa25x-udc.h> |
57 | #include <mach/hardware.h> | ||
57 | #endif | 58 | #endif |
58 | 59 | ||
59 | #ifdef CONFIG_ARCH_LUBBOCK | 60 | #ifdef CONFIG_ARCH_LUBBOCK |
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 9875d9c0823f..e20bc109fdd7 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c | |||
@@ -1180,6 +1180,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg, | |||
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); | 1182 | static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); |
1183 | static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg); | ||
1183 | 1184 | ||
1184 | /** | 1185 | /** |
1185 | * s3c_hsotg_process_control - process a control request | 1186 | * s3c_hsotg_process_control - process a control request |
@@ -1221,6 +1222,7 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, | |||
1221 | if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { | 1222 | if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { |
1222 | switch (ctrl->bRequest) { | 1223 | switch (ctrl->bRequest) { |
1223 | case USB_REQ_SET_ADDRESS: | 1224 | case USB_REQ_SET_ADDRESS: |
1225 | s3c_hsotg_disconnect(hsotg); | ||
1224 | dcfg = readl(hsotg->regs + DCFG); | 1226 | dcfg = readl(hsotg->regs + DCFG); |
1225 | dcfg &= ~DCFG_DevAddr_MASK; | 1227 | dcfg &= ~DCFG_DevAddr_MASK; |
1226 | dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT; | 1228 | dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT; |
@@ -1245,7 +1247,9 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, | |||
1245 | /* as a fallback, try delivering it to the driver to deal with */ | 1247 | /* as a fallback, try delivering it to the driver to deal with */ |
1246 | 1248 | ||
1247 | if (ret == 0 && hsotg->driver) { | 1249 | if (ret == 0 && hsotg->driver) { |
1250 | spin_unlock(&hsotg->lock); | ||
1248 | ret = hsotg->driver->setup(&hsotg->gadget, ctrl); | 1251 | ret = hsotg->driver->setup(&hsotg->gadget, ctrl); |
1252 | spin_lock(&hsotg->lock); | ||
1249 | if (ret < 0) | 1253 | if (ret < 0) |
1250 | dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); | 1254 | dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); |
1251 | } | 1255 | } |
@@ -1308,10 +1312,12 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep, | |||
1308 | return; | 1312 | return; |
1309 | } | 1313 | } |
1310 | 1314 | ||
1315 | spin_lock(&hsotg->lock); | ||
1311 | if (req->actual == 0) | 1316 | if (req->actual == 0) |
1312 | s3c_hsotg_enqueue_setup(hsotg); | 1317 | s3c_hsotg_enqueue_setup(hsotg); |
1313 | else | 1318 | else |
1314 | s3c_hsotg_process_control(hsotg, req->buf); | 1319 | s3c_hsotg_process_control(hsotg, req->buf); |
1320 | spin_unlock(&hsotg->lock); | ||
1315 | } | 1321 | } |
1316 | 1322 | ||
1317 | /** | 1323 | /** |
@@ -2533,7 +2539,6 @@ irq_retry: | |||
2533 | writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS); | 2539 | writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS); |
2534 | 2540 | ||
2535 | call_gadget(hsotg, suspend); | 2541 | call_gadget(hsotg, suspend); |
2536 | s3c_hsotg_disconnect(hsotg); | ||
2537 | } | 2542 | } |
2538 | 2543 | ||
2539 | if (gintsts & GINTSTS_WkUpInt) { | 2544 | if (gintsts & GINTSTS_WkUpInt) { |
diff --git a/drivers/usb/gadget/storage_common.h b/drivers/usb/gadget/storage_common.h index c74c2fdbd56e..70c891469f57 100644 --- a/drivers/usb/gadget/storage_common.h +++ b/drivers/usb/gadget/storage_common.h | |||
@@ -119,10 +119,6 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun) | |||
119 | return curlun->filp != NULL; | 119 | return curlun->filp != NULL; |
120 | } | 120 | } |
121 | 121 | ||
122 | /* Big enough to hold our biggest descriptor */ | ||
123 | #define EP0_BUFSIZE 256 | ||
124 | #define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ | ||
125 | |||
126 | /* Default size of buffer length. */ | 122 | /* Default size of buffer length. */ |
127 | #define FSG_BUFLEN ((u32)16384) | 123 | #define FSG_BUFLEN ((u32)16384) |
128 | 124 | ||
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index 6c3d7950d2a9..0f8aad78b54f 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c | |||
@@ -370,7 +370,7 @@ err: | |||
370 | return -ENOMEM; | 370 | return -ENOMEM; |
371 | } | 371 | } |
372 | 372 | ||
373 | void bot_cleanup_old_alt(struct f_uas *fu) | 373 | static void bot_cleanup_old_alt(struct f_uas *fu) |
374 | { | 374 | { |
375 | if (!(fu->flags & USBG_ENABLED)) | 375 | if (!(fu->flags & USBG_ENABLED)) |
376 | return; | 376 | return; |
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c index 0dd07ae1555d..f49b0b61ecc8 100644 --- a/drivers/usb/gadget/zero.c +++ b/drivers/usb/gadget/zero.c | |||
@@ -91,17 +91,17 @@ static struct usb_zero_options gzero_options = { | |||
91 | * functional coverage for the "USBCV" test harness from USB-IF. | 91 | * functional coverage for the "USBCV" test harness from USB-IF. |
92 | * It's always set if OTG mode is enabled. | 92 | * It's always set if OTG mode is enabled. |
93 | */ | 93 | */ |
94 | unsigned autoresume = DEFAULT_AUTORESUME; | 94 | static unsigned autoresume = DEFAULT_AUTORESUME; |
95 | module_param(autoresume, uint, S_IRUGO); | 95 | module_param(autoresume, uint, S_IRUGO); |
96 | MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup"); | 96 | MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup"); |
97 | 97 | ||
98 | /* Maximum Autoresume time */ | 98 | /* Maximum Autoresume time */ |
99 | unsigned max_autoresume; | 99 | static unsigned max_autoresume; |
100 | module_param(max_autoresume, uint, S_IRUGO); | 100 | module_param(max_autoresume, uint, S_IRUGO); |
101 | MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup"); | 101 | MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup"); |
102 | 102 | ||
103 | /* Interval between two remote wakeups */ | 103 | /* Interval between two remote wakeups */ |
104 | unsigned autoresume_interval_ms; | 104 | static unsigned autoresume_interval_ms; |
105 | module_param(autoresume_interval_ms, uint, S_IRUGO); | 105 | module_param(autoresume_interval_ms, uint, S_IRUGO); |
106 | MODULE_PARM_DESC(autoresume_interval_ms, | 106 | MODULE_PARM_DESC(autoresume_interval_ms, |
107 | "milliseconds to increase successive wakeup delays"); | 107 | "milliseconds to increase successive wakeup delays"); |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 418444ebb1b8..8c356af79409 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, | |||
136 | struct ohci_hcd *ohci; | 136 | struct ohci_hcd *ohci; |
137 | int retval; | 137 | int retval; |
138 | struct usb_hcd *hcd = NULL; | 138 | struct usb_hcd *hcd = NULL; |
139 | 139 | struct device *dev = &pdev->dev; | |
140 | if (pdev->num_resources != 2) { | 140 | struct resource *res; |
141 | pr_debug("hcd probe: invalid num_resources"); | 141 | int irq; |
142 | return -ENODEV; | 142 | |
143 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
144 | if (!res) { | ||
145 | dev_dbg(dev, "hcd probe: missing memory resource\n"); | ||
146 | return -ENXIO; | ||
143 | } | 147 | } |
144 | 148 | ||
145 | if ((pdev->resource[0].flags != IORESOURCE_MEM) | 149 | irq = platform_get_irq(pdev, 0); |
146 | || (pdev->resource[1].flags != IORESOURCE_IRQ)) { | 150 | if (irq < 0) { |
147 | pr_debug("hcd probe: invalid resource type\n"); | 151 | dev_dbg(dev, "hcd probe: missing irq resource\n"); |
148 | return -ENODEV; | 152 | return irq; |
149 | } | 153 | } |
150 | 154 | ||
151 | hcd = usb_create_hcd(driver, &pdev->dev, "at91"); | 155 | hcd = usb_create_hcd(driver, &pdev->dev, "at91"); |
152 | if (!hcd) | 156 | if (!hcd) |
153 | return -ENOMEM; | 157 | return -ENOMEM; |
154 | hcd->rsrc_start = pdev->resource[0].start; | 158 | hcd->rsrc_start = res->start; |
155 | hcd->rsrc_len = resource_size(&pdev->resource[0]); | 159 | hcd->rsrc_len = resource_size(res); |
156 | 160 | ||
157 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | 161 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { |
158 | pr_debug("request_mem_region failed\n"); | 162 | pr_debug("request_mem_region failed\n"); |
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, | |||
199 | ohci->num_ports = board->ports; | 203 | ohci->num_ports = board->ports; |
200 | at91_start_hc(pdev); | 204 | at91_start_hc(pdev); |
201 | 205 | ||
202 | retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); | 206 | retval = usb_add_hcd(hcd, irq, IRQF_SHARED); |
203 | if (retval == 0) | 207 | if (retval == 0) |
204 | return retval; | 208 | return retval; |
205 | 209 | ||
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index e89ac4d4b87e..9b7435f0dcd6 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index b8dffd59eb25..73f5208714a4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
128 | * any other sleep) on Haswell machines with LPT and LPT-LP | 128 | * any other sleep) on Haswell machines with LPT and LPT-LP |
129 | * with the new Intel BIOS | 129 | * with the new Intel BIOS |
130 | */ | 130 | */ |
131 | xhci->quirks |= XHCI_SPURIOUS_WAKEUP; | 131 | /* Limit the quirk to only known vendors, as this triggers |
132 | * yet another BIOS bug on some other machines | ||
133 | * https://bugzilla.kernel.org/show_bug.cgi?id=66171 | ||
134 | */ | ||
135 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) | ||
136 | xhci->quirks |= XHCI_SPURIOUS_WAKEUP; | ||
132 | } | 137 | } |
133 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && | 138 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
134 | pdev->device == PCI_DEVICE_ID_ASROCK_P67) { | 139 | pdev->device == PCI_DEVICE_ID_ASROCK_P67) { |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1e2f3f495843..53c2e296467f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -2973,8 +2973,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
2973 | } | 2973 | } |
2974 | 2974 | ||
2975 | while (1) { | 2975 | while (1) { |
2976 | if (room_on_ring(xhci, ep_ring, num_trbs)) | 2976 | if (room_on_ring(xhci, ep_ring, num_trbs)) { |
2977 | break; | 2977 | union xhci_trb *trb = ep_ring->enqueue; |
2978 | unsigned int usable = ep_ring->enq_seg->trbs + | ||
2979 | TRBS_PER_SEGMENT - 1 - trb; | ||
2980 | u32 nop_cmd; | ||
2981 | |||
2982 | /* | ||
2983 | * Section 4.11.7.1 TD Fragments states that a link | ||
2984 | * TRB must only occur at the boundary between | ||
2985 | * data bursts (eg 512 bytes for 480M). | ||
2986 | * While it is possible to split a large fragment | ||
2987 | * we don't know the size yet. | ||
2988 | * Simplest solution is to fill the trb before the | ||
2989 | * LINK with nop commands. | ||
2990 | */ | ||
2991 | if (num_trbs == 1 || num_trbs <= usable || usable == 0) | ||
2992 | break; | ||
2993 | |||
2994 | if (ep_ring->type != TYPE_BULK) | ||
2995 | /* | ||
2996 | * While isoc transfers might have a buffer that | ||
2997 | * crosses a 64k boundary it is unlikely. | ||
2998 | * Since we can't add NOPs without generating | ||
2999 | * gaps in the traffic just hope it never | ||
3000 | * happens at the end of the ring. | ||
3001 | * This could be fixed by writing a LINK TRB | ||
3002 | * instead of the first NOP - however the | ||
3003 | * TRB_TYPE_LINK_LE32() calls would all need | ||
3004 | * changing to check the ring length. | ||
3005 | */ | ||
3006 | break; | ||
3007 | |||
3008 | if (num_trbs >= TRBS_PER_SEGMENT) { | ||
3009 | xhci_err(xhci, "Too many fragments %d, max %d\n", | ||
3010 | num_trbs, TRBS_PER_SEGMENT - 1); | ||
3011 | return -ENOMEM; | ||
3012 | } | ||
3013 | |||
3014 | nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) | | ||
3015 | ep_ring->cycle_state); | ||
3016 | ep_ring->num_trbs_free -= usable; | ||
3017 | do { | ||
3018 | trb->generic.field[0] = 0; | ||
3019 | trb->generic.field[1] = 0; | ||
3020 | trb->generic.field[2] = 0; | ||
3021 | trb->generic.field[3] = nop_cmd; | ||
3022 | trb++; | ||
3023 | } while (--usable); | ||
3024 | ep_ring->enqueue = trb; | ||
3025 | if (room_on_ring(xhci, ep_ring, num_trbs)) | ||
3026 | break; | ||
3027 | } | ||
2978 | 3028 | ||
2979 | if (ep_ring == xhci->cmd_ring) { | 3029 | if (ep_ring == xhci->cmd_ring) { |
2980 | xhci_err(xhci, "Do not support expand command ring\n"); | 3030 | xhci_err(xhci, "Do not support expand command ring\n"); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 0a43329569d1..4d4499b80449 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1809,7 +1809,6 @@ static void musb_free(struct musb *musb) | |||
1809 | disable_irq_wake(musb->nIrq); | 1809 | disable_irq_wake(musb->nIrq); |
1810 | free_irq(musb->nIrq, musb); | 1810 | free_irq(musb->nIrq, musb); |
1811 | } | 1811 | } |
1812 | cancel_work_sync(&musb->irq_work); | ||
1813 | 1812 | ||
1814 | musb_host_free(musb); | 1813 | musb_host_free(musb); |
1815 | } | 1814 | } |
@@ -1896,6 +1895,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
1896 | musb_platform_disable(musb); | 1895 | musb_platform_disable(musb); |
1897 | musb_generic_disable(musb); | 1896 | musb_generic_disable(musb); |
1898 | 1897 | ||
1898 | /* Init IRQ workqueue before request_irq */ | ||
1899 | INIT_WORK(&musb->irq_work, musb_irq_work); | ||
1900 | |||
1899 | /* setup musb parts of the core (especially endpoints) */ | 1901 | /* setup musb parts of the core (especially endpoints) */ |
1900 | status = musb_core_init(plat->config->multipoint | 1902 | status = musb_core_init(plat->config->multipoint |
1901 | ? MUSB_CONTROLLER_MHDRC | 1903 | ? MUSB_CONTROLLER_MHDRC |
@@ -1905,9 +1907,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
1905 | 1907 | ||
1906 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); | 1908 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); |
1907 | 1909 | ||
1908 | /* Init IRQ workqueue before request_irq */ | ||
1909 | INIT_WORK(&musb->irq_work, musb_irq_work); | ||
1910 | |||
1911 | /* attach to the IRQ */ | 1910 | /* attach to the IRQ */ |
1912 | if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { | 1911 | if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { |
1913 | dev_err(dev, "request_irq %d failed!\n", nIrq); | 1912 | dev_err(dev, "request_irq %d failed!\n", nIrq); |
@@ -1981,6 +1980,7 @@ fail4: | |||
1981 | musb_host_cleanup(musb); | 1980 | musb_host_cleanup(musb); |
1982 | 1981 | ||
1983 | fail3: | 1982 | fail3: |
1983 | cancel_work_sync(&musb->irq_work); | ||
1984 | if (musb->dma_controller) | 1984 | if (musb->dma_controller) |
1985 | dma_controller_destroy(musb->dma_controller); | 1985 | dma_controller_destroy(musb->dma_controller); |
1986 | fail2_5: | 1986 | fail2_5: |
@@ -2043,6 +2043,7 @@ static int musb_remove(struct platform_device *pdev) | |||
2043 | if (musb->dma_controller) | 2043 | if (musb->dma_controller) |
2044 | dma_controller_destroy(musb->dma_controller); | 2044 | dma_controller_destroy(musb->dma_controller); |
2045 | 2045 | ||
2046 | cancel_work_sync(&musb->irq_work); | ||
2046 | musb_free(musb); | 2047 | musb_free(musb); |
2047 | device_init_wakeup(dev, 0); | 2048 | device_init_wakeup(dev, 0); |
2048 | return 0; | 2049 | return 0; |
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index ff9d6de2b746..a12bd30401e0 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c | |||
@@ -38,6 +38,7 @@ struct cppi41_dma_channel { | |||
38 | u32 prog_len; | 38 | u32 prog_len; |
39 | u32 transferred; | 39 | u32 transferred; |
40 | u32 packet_sz; | 40 | u32 packet_sz; |
41 | struct list_head tx_check; | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | #define MUSB_DMA_NUM_CHANNELS 15 | 44 | #define MUSB_DMA_NUM_CHANNELS 15 |
@@ -47,6 +48,8 @@ struct cppi41_dma_controller { | |||
47 | struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; | 48 | struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; |
48 | struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; | 49 | struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; |
49 | struct musb *musb; | 50 | struct musb *musb; |
51 | struct hrtimer early_tx; | ||
52 | struct list_head early_tx_list; | ||
50 | u32 rx_mode; | 53 | u32 rx_mode; |
51 | u32 tx_mode; | 54 | u32 tx_mode; |
52 | u32 auto_req; | 55 | u32 auto_req; |
@@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) | |||
96 | cppi41_channel->usb_toggle = toggle; | 99 | cppi41_channel->usb_toggle = toggle; |
97 | } | 100 | } |
98 | 101 | ||
99 | static void cppi41_dma_callback(void *private_data) | 102 | static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) |
100 | { | 103 | { |
101 | struct dma_channel *channel = private_data; | 104 | u8 epnum = hw_ep->epnum; |
102 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 105 | struct musb *musb = hw_ep->musb; |
103 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | 106 | void __iomem *epio = musb->endpoints[epnum].regs; |
104 | struct musb *musb = hw_ep->musb; | 107 | u16 csr; |
105 | unsigned long flags; | ||
106 | struct dma_tx_state txstate; | ||
107 | u32 transferred; | ||
108 | 108 | ||
109 | spin_lock_irqsave(&musb->lock, flags); | 109 | csr = musb_readw(epio, MUSB_TXCSR); |
110 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
111 | return false; | ||
112 | return true; | ||
113 | } | ||
110 | 114 | ||
111 | dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, | 115 | static void cppi41_dma_callback(void *private_data); |
112 | &txstate); | ||
113 | transferred = cppi41_channel->prog_len - txstate.residue; | ||
114 | cppi41_channel->transferred += transferred; | ||
115 | 116 | ||
116 | dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", | 117 | static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) |
117 | hw_ep->epnum, cppi41_channel->transferred, | 118 | { |
118 | cppi41_channel->total_len); | 119 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; |
120 | struct musb *musb = hw_ep->musb; | ||
119 | 121 | ||
120 | update_rx_toggle(cppi41_channel); | 122 | if (!cppi41_channel->prog_len) { |
121 | |||
122 | if (cppi41_channel->transferred == cppi41_channel->total_len || | ||
123 | transferred < cppi41_channel->packet_sz) { | ||
124 | 123 | ||
125 | /* done, complete */ | 124 | /* done, complete */ |
126 | cppi41_channel->channel.actual_len = | 125 | cppi41_channel->channel.actual_len = |
@@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data) | |||
150 | remain_bytes, | 149 | remain_bytes, |
151 | direction, | 150 | direction, |
152 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 151 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
153 | if (WARN_ON(!dma_desc)) { | 152 | if (WARN_ON(!dma_desc)) |
154 | spin_unlock_irqrestore(&musb->lock, flags); | ||
155 | return; | 153 | return; |
156 | } | ||
157 | 154 | ||
158 | dma_desc->callback = cppi41_dma_callback; | 155 | dma_desc->callback = cppi41_dma_callback; |
159 | dma_desc->callback_param = channel; | 156 | dma_desc->callback_param = &cppi41_channel->channel; |
160 | cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); | 157 | cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); |
161 | dma_async_issue_pending(dc); | 158 | dma_async_issue_pending(dc); |
162 | 159 | ||
@@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data) | |||
166 | musb_writew(epio, MUSB_RXCSR, csr); | 163 | musb_writew(epio, MUSB_RXCSR, csr); |
167 | } | 164 | } |
168 | } | 165 | } |
166 | } | ||
167 | |||
168 | static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) | ||
169 | { | ||
170 | struct cppi41_dma_controller *controller; | ||
171 | struct cppi41_dma_channel *cppi41_channel, *n; | ||
172 | struct musb *musb; | ||
173 | unsigned long flags; | ||
174 | enum hrtimer_restart ret = HRTIMER_NORESTART; | ||
175 | |||
176 | controller = container_of(timer, struct cppi41_dma_controller, | ||
177 | early_tx); | ||
178 | musb = controller->musb; | ||
179 | |||
180 | spin_lock_irqsave(&musb->lock, flags); | ||
181 | list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, | ||
182 | tx_check) { | ||
183 | bool empty; | ||
184 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | ||
185 | |||
186 | empty = musb_is_tx_fifo_empty(hw_ep); | ||
187 | if (empty) { | ||
188 | list_del_init(&cppi41_channel->tx_check); | ||
189 | cppi41_trans_done(cppi41_channel); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | if (!list_empty(&controller->early_tx_list)) { | ||
194 | ret = HRTIMER_RESTART; | ||
195 | hrtimer_forward_now(&controller->early_tx, | ||
196 | ktime_set(0, 150 * NSEC_PER_USEC)); | ||
197 | } | ||
198 | |||
199 | spin_unlock_irqrestore(&musb->lock, flags); | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static void cppi41_dma_callback(void *private_data) | ||
204 | { | ||
205 | struct dma_channel *channel = private_data; | ||
206 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | ||
207 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | ||
208 | struct musb *musb = hw_ep->musb; | ||
209 | unsigned long flags; | ||
210 | struct dma_tx_state txstate; | ||
211 | u32 transferred; | ||
212 | bool empty; | ||
213 | |||
214 | spin_lock_irqsave(&musb->lock, flags); | ||
215 | |||
216 | dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, | ||
217 | &txstate); | ||
218 | transferred = cppi41_channel->prog_len - txstate.residue; | ||
219 | cppi41_channel->transferred += transferred; | ||
220 | |||
221 | dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", | ||
222 | hw_ep->epnum, cppi41_channel->transferred, | ||
223 | cppi41_channel->total_len); | ||
224 | |||
225 | update_rx_toggle(cppi41_channel); | ||
226 | |||
227 | if (cppi41_channel->transferred == cppi41_channel->total_len || | ||
228 | transferred < cppi41_channel->packet_sz) | ||
229 | cppi41_channel->prog_len = 0; | ||
230 | |||
231 | empty = musb_is_tx_fifo_empty(hw_ep); | ||
232 | if (empty) { | ||
233 | cppi41_trans_done(cppi41_channel); | ||
234 | } else { | ||
235 | struct cppi41_dma_controller *controller; | ||
236 | /* | ||
237 | * On AM335x it has been observed that the TX interrupt fires | ||
238 | * too early that means the TXFIFO is not yet empty but the DMA | ||
239 | * engine says that it is done with the transfer. We don't | ||
240 | * receive a FIFO empty interrupt so the only thing we can do is | ||
241 | * to poll for the bit. On HS it usually takes 2us, on FS around | ||
242 | * 110us - 150us depending on the transfer size. | ||
243 | * We spin on HS (no longer than than 25us and setup a timer on | ||
244 | * FS to check for the bit and complete the transfer. | ||
245 | */ | ||
246 | controller = cppi41_channel->controller; | ||
247 | |||
248 | if (musb->g.speed == USB_SPEED_HIGH) { | ||
249 | unsigned wait = 25; | ||
250 | |||
251 | do { | ||
252 | empty = musb_is_tx_fifo_empty(hw_ep); | ||
253 | if (empty) | ||
254 | break; | ||
255 | wait--; | ||
256 | if (!wait) | ||
257 | break; | ||
258 | udelay(1); | ||
259 | } while (1); | ||
260 | |||
261 | empty = musb_is_tx_fifo_empty(hw_ep); | ||
262 | if (empty) { | ||
263 | cppi41_trans_done(cppi41_channel); | ||
264 | goto out; | ||
265 | } | ||
266 | } | ||
267 | list_add_tail(&cppi41_channel->tx_check, | ||
268 | &controller->early_tx_list); | ||
269 | if (!hrtimer_active(&controller->early_tx)) { | ||
270 | hrtimer_start_range_ns(&controller->early_tx, | ||
271 | ktime_set(0, 140 * NSEC_PER_USEC), | ||
272 | 40 * NSEC_PER_USEC, | ||
273 | HRTIMER_MODE_REL); | ||
274 | } | ||
275 | } | ||
276 | out: | ||
169 | spin_unlock_irqrestore(&musb->lock, flags); | 277 | spin_unlock_irqrestore(&musb->lock, flags); |
170 | } | 278 | } |
171 | 279 | ||
@@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, | |||
364 | WARN_ON(1); | 472 | WARN_ON(1); |
365 | return 1; | 473 | return 1; |
366 | } | 474 | } |
475 | if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) | ||
476 | return 0; | ||
367 | if (cppi41_channel->is_tx) | 477 | if (cppi41_channel->is_tx) |
368 | return 1; | 478 | return 1; |
369 | /* AM335x Advisory 1.0.13. No workaround for device RX mode */ | 479 | /* AM335x Advisory 1.0.13. No workaround for device RX mode */ |
@@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel) | |||
388 | if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) | 498 | if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) |
389 | return 0; | 499 | return 0; |
390 | 500 | ||
501 | list_del_init(&cppi41_channel->tx_check); | ||
391 | if (is_tx) { | 502 | if (is_tx) { |
392 | csr = musb_readw(epio, MUSB_TXCSR); | 503 | csr = musb_readw(epio, MUSB_TXCSR); |
393 | csr &= ~MUSB_TXCSR_DMAENAB; | 504 | csr &= ~MUSB_TXCSR_DMAENAB; |
@@ -495,6 +606,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) | |||
495 | cppi41_channel->controller = controller; | 606 | cppi41_channel->controller = controller; |
496 | cppi41_channel->port_num = port; | 607 | cppi41_channel->port_num = port; |
497 | cppi41_channel->is_tx = is_tx; | 608 | cppi41_channel->is_tx = is_tx; |
609 | INIT_LIST_HEAD(&cppi41_channel->tx_check); | ||
498 | 610 | ||
499 | musb_dma = &cppi41_channel->channel; | 611 | musb_dma = &cppi41_channel->channel; |
500 | musb_dma->private_data = cppi41_channel; | 612 | musb_dma->private_data = cppi41_channel; |
@@ -520,6 +632,7 @@ void dma_controller_destroy(struct dma_controller *c) | |||
520 | struct cppi41_dma_controller *controller = container_of(c, | 632 | struct cppi41_dma_controller *controller = container_of(c, |
521 | struct cppi41_dma_controller, controller); | 633 | struct cppi41_dma_controller, controller); |
522 | 634 | ||
635 | hrtimer_cancel(&controller->early_tx); | ||
523 | cppi41_dma_controller_stop(controller); | 636 | cppi41_dma_controller_stop(controller); |
524 | kfree(controller); | 637 | kfree(controller); |
525 | } | 638 | } |
@@ -539,6 +652,9 @@ struct dma_controller *dma_controller_create(struct musb *musb, | |||
539 | if (!controller) | 652 | if (!controller) |
540 | goto kzalloc_fail; | 653 | goto kzalloc_fail; |
541 | 654 | ||
655 | hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
656 | controller->early_tx.function = cppi41_recheck_tx_req; | ||
657 | INIT_LIST_HEAD(&controller->early_tx_list); | ||
542 | controller->musb = musb; | 658 | controller->musb = musb; |
543 | 659 | ||
544 | controller->controller.channel_alloc = cppi41_dma_channel_allocate; | 660 | controller->controller.channel_alloc = cppi41_dma_channel_allocate; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index d2d3a173b315..32fb057c03f5 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1796,7 +1796,11 @@ int musb_gadget_setup(struct musb *musb) | |||
1796 | 1796 | ||
1797 | /* this "gadget" abstracts/virtualizes the controller */ | 1797 | /* this "gadget" abstracts/virtualizes the controller */ |
1798 | musb->g.name = musb_driver_name; | 1798 | musb->g.name = musb_driver_name; |
1799 | #if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE) | ||
1799 | musb->g.is_otg = 1; | 1800 | musb->g.is_otg = 1; |
1801 | #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) | ||
1802 | musb->g.is_otg = 0; | ||
1803 | #endif | ||
1800 | 1804 | ||
1801 | musb_g_init_endpoints(musb); | 1805 | musb_g_init_endpoints(musb); |
1802 | 1806 | ||
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 08e2f39027ec..2b41c636a52a 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
@@ -19,8 +19,9 @@ config AB8500_USB | |||
19 | in host mode, low speed. | 19 | in host mode, low speed. |
20 | 20 | ||
21 | config FSL_USB2_OTG | 21 | config FSL_USB2_OTG |
22 | bool "Freescale USB OTG Transceiver Driver" | 22 | tristate "Freescale USB OTG Transceiver Driver" |
23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME | 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME |
24 | depends on USB | ||
24 | select USB_OTG | 25 | select USB_OTG |
25 | select USB_PHY | 26 | select USB_PHY |
26 | help | 27 | help |
@@ -29,6 +30,7 @@ config FSL_USB2_OTG | |||
29 | config ISP1301_OMAP | 30 | config ISP1301_OMAP |
30 | tristate "Philips ISP1301 with OMAP OTG" | 31 | tristate "Philips ISP1301 with OMAP OTG" |
31 | depends on I2C && ARCH_OMAP_OTG | 32 | depends on I2C && ARCH_OMAP_OTG |
33 | depends on USB | ||
32 | select USB_PHY | 34 | select USB_PHY |
33 | help | 35 | help |
34 | If you say yes here you get support for the Philips ISP1301 | 36 | If you say yes here you get support for the Philips ISP1301 |
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 6370e50649d7..0e3c60cb669a 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c | |||
@@ -52,8 +52,7 @@ static int am335x_phy_probe(struct platform_device *pdev) | |||
52 | return am_phy->id; | 52 | return am_phy->id; |
53 | } | 53 | } |
54 | 54 | ||
55 | ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, | 55 | ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL); |
56 | USB_PHY_TYPE_USB2, 0, false); | ||
57 | if (ret) | 56 | if (ret) |
58 | return ret; | 57 | return ret; |
59 | 58 | ||
@@ -66,8 +65,6 @@ static int am335x_phy_probe(struct platform_device *pdev) | |||
66 | platform_set_drvdata(pdev, am_phy); | 65 | platform_set_drvdata(pdev, am_phy); |
67 | 66 | ||
68 | return 0; | 67 | return 0; |
69 | |||
70 | return ret; | ||
71 | } | 68 | } |
72 | 69 | ||
73 | static int am335x_phy_remove(struct platform_device *pdev) | 70 | static int am335x_phy_remove(struct platform_device *pdev) |
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index fce3a9e9bb5d..aa6d37b3378a 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c | |||
@@ -48,8 +48,9 @@ void usb_nop_xceiv_register(void) | |||
48 | if (pd) | 48 | if (pd) |
49 | return; | 49 | return; |
50 | pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0); | 50 | pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0); |
51 | if (!pd) { | 51 | if (IS_ERR(pd)) { |
52 | pr_err("Unable to register generic usb transceiver\n"); | 52 | pr_err("Unable to register generic usb transceiver\n"); |
53 | pd = NULL; | ||
53 | return; | 54 | return; |
54 | } | 55 | } |
55 | } | 56 | } |
@@ -150,10 +151,40 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host) | |||
150 | } | 151 | } |
151 | 152 | ||
152 | int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, | 153 | int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, |
153 | enum usb_phy_type type, u32 clk_rate, bool needs_vcc) | 154 | struct usb_phy_gen_xceiv_platform_data *pdata) |
154 | { | 155 | { |
156 | enum usb_phy_type type = USB_PHY_TYPE_USB2; | ||
155 | int err; | 157 | int err; |
156 | 158 | ||
159 | u32 clk_rate = 0; | ||
160 | bool needs_vcc = false; | ||
161 | |||
162 | nop->reset_active_low = true; /* default behaviour */ | ||
163 | |||
164 | if (dev->of_node) { | ||
165 | struct device_node *node = dev->of_node; | ||
166 | enum of_gpio_flags flags = 0; | ||
167 | |||
168 | if (of_property_read_u32(node, "clock-frequency", &clk_rate)) | ||
169 | clk_rate = 0; | ||
170 | |||
171 | needs_vcc = of_property_read_bool(node, "vcc-supply"); | ||
172 | nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios", | ||
173 | 0, &flags); | ||
174 | if (nop->gpio_reset == -EPROBE_DEFER) | ||
175 | return -EPROBE_DEFER; | ||
176 | |||
177 | nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; | ||
178 | |||
179 | } else if (pdata) { | ||
180 | type = pdata->type; | ||
181 | clk_rate = pdata->clk_rate; | ||
182 | needs_vcc = pdata->needs_vcc; | ||
183 | nop->gpio_reset = pdata->gpio_reset; | ||
184 | } else { | ||
185 | nop->gpio_reset = -1; | ||
186 | } | ||
187 | |||
157 | nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg), | 188 | nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg), |
158 | GFP_KERNEL); | 189 | GFP_KERNEL); |
159 | if (!nop->phy.otg) | 190 | if (!nop->phy.otg) |
@@ -218,43 +249,14 @@ EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy); | |||
218 | static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) | 249 | static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) |
219 | { | 250 | { |
220 | struct device *dev = &pdev->dev; | 251 | struct device *dev = &pdev->dev; |
221 | struct usb_phy_gen_xceiv_platform_data *pdata = | ||
222 | dev_get_platdata(&pdev->dev); | ||
223 | struct usb_phy_gen_xceiv *nop; | 252 | struct usb_phy_gen_xceiv *nop; |
224 | enum usb_phy_type type = USB_PHY_TYPE_USB2; | ||
225 | int err; | 253 | int err; |
226 | u32 clk_rate = 0; | ||
227 | bool needs_vcc = false; | ||
228 | 254 | ||
229 | nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL); | 255 | nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL); |
230 | if (!nop) | 256 | if (!nop) |
231 | return -ENOMEM; | 257 | return -ENOMEM; |
232 | 258 | ||
233 | nop->reset_active_low = true; /* default behaviour */ | 259 | err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev)); |
234 | |||
235 | if (dev->of_node) { | ||
236 | struct device_node *node = dev->of_node; | ||
237 | enum of_gpio_flags flags; | ||
238 | |||
239 | if (of_property_read_u32(node, "clock-frequency", &clk_rate)) | ||
240 | clk_rate = 0; | ||
241 | |||
242 | needs_vcc = of_property_read_bool(node, "vcc-supply"); | ||
243 | nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios", | ||
244 | 0, &flags); | ||
245 | if (nop->gpio_reset == -EPROBE_DEFER) | ||
246 | return -EPROBE_DEFER; | ||
247 | |||
248 | nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; | ||
249 | |||
250 | } else if (pdata) { | ||
251 | type = pdata->type; | ||
252 | clk_rate = pdata->clk_rate; | ||
253 | needs_vcc = pdata->needs_vcc; | ||
254 | nop->gpio_reset = pdata->gpio_reset; | ||
255 | } | ||
256 | |||
257 | err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc); | ||
258 | if (err) | 260 | if (err) |
259 | return err; | 261 | return err; |
260 | 262 | ||
@@ -271,8 +273,6 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) | |||
271 | platform_set_drvdata(pdev, nop); | 273 | platform_set_drvdata(pdev, nop); |
272 | 274 | ||
273 | return 0; | 275 | return 0; |
274 | |||
275 | return err; | ||
276 | } | 276 | } |
277 | 277 | ||
278 | static int usb_phy_gen_xceiv_remove(struct platform_device *pdev) | 278 | static int usb_phy_gen_xceiv_remove(struct platform_device *pdev) |
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h index d2a220d81734..38a81f307b82 100644 --- a/drivers/usb/phy/phy-generic.h +++ b/drivers/usb/phy/phy-generic.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _PHY_GENERIC_H_ | 1 | #ifndef _PHY_GENERIC_H_ |
2 | #define _PHY_GENERIC_H_ | 2 | #define _PHY_GENERIC_H_ |
3 | 3 | ||
4 | #include <linux/usb/usb_phy_gen_xceiv.h> | ||
5 | |||
4 | struct usb_phy_gen_xceiv { | 6 | struct usb_phy_gen_xceiv { |
5 | struct usb_phy phy; | 7 | struct usb_phy phy; |
6 | struct device *dev; | 8 | struct device *dev; |
@@ -14,6 +16,6 @@ int usb_gen_phy_init(struct usb_phy *phy); | |||
14 | void usb_gen_phy_shutdown(struct usb_phy *phy); | 16 | void usb_gen_phy_shutdown(struct usb_phy *phy); |
15 | 17 | ||
16 | int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, | 18 | int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, |
17 | enum usb_phy_type type, u32 clk_rate, bool needs_vcc); | 19 | struct usb_phy_gen_xceiv_platform_data *pdata); |
18 | 20 | ||
19 | #endif | 21 | #endif |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index fdd33b44dbd3..545844b7e796 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
@@ -164,7 +164,7 @@ static int mxs_phy_probe(struct platform_device *pdev) | |||
164 | 164 | ||
165 | mxs_phy->clk = clk; | 165 | mxs_phy->clk = clk; |
166 | 166 | ||
167 | platform_set_drvdata(pdev, &mxs_phy->phy); | 167 | platform_set_drvdata(pdev, mxs_phy); |
168 | 168 | ||
169 | ret = usb_add_phy_dev(&mxs_phy->phy); | 169 | ret = usb_add_phy_dev(&mxs_phy->phy); |
170 | if (ret) | 170 | if (ret) |
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c index a99a6953f11c..db3ab34cddb4 100644 --- a/drivers/usb/phy/phy-rcar-gen2-usb.c +++ b/drivers/usb/phy/phy-rcar-gen2-usb.c | |||
@@ -107,10 +107,10 @@ static void __rcar_gen2_usb_phy_init(struct rcar_gen2_usb_phy_priv *priv) | |||
107 | clk_prepare_enable(priv->clk); | 107 | clk_prepare_enable(priv->clk); |
108 | 108 | ||
109 | /* Set USB channels in the USBHS UGCTRL2 register */ | 109 | /* Set USB channels in the USBHS UGCTRL2 register */ |
110 | val = ioread32(priv->base); | 110 | val = ioread32(priv->base + USBHS_UGCTRL2_REG); |
111 | val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS); | 111 | val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS); |
112 | val |= priv->ugctrl2; | 112 | val |= priv->ugctrl2; |
113 | iowrite32(val, priv->base); | 113 | iowrite32(val, priv->base + USBHS_UGCTRL2_REG); |
114 | } | 114 | } |
115 | 115 | ||
116 | /* Shutdown USB channels */ | 116 | /* Shutdown USB channels */ |
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 82232acf1ab6..bbe4f8e6e8d7 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c | |||
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, | |||
876 | 876 | ||
877 | tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, | 877 | tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, |
878 | resource_size(res)); | 878 | resource_size(res)); |
879 | if (!tegra_phy->regs) { | 879 | if (!tegra_phy->pad_regs) { |
880 | dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); | 880 | dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); |
881 | return -ENOMEM; | 881 | return -ENOMEM; |
882 | } | 882 | } |
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 30e8a61552d4..bad57ce77ba5 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c | |||
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module, | |||
127 | 127 | ||
128 | static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) | 128 | static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) |
129 | { | 129 | { |
130 | u8 data, ret = 0; | 130 | u8 data; |
131 | int ret; | ||
131 | 132 | ||
132 | ret = twl_i2c_read_u8(module, &data, address); | 133 | ret = twl_i2c_read_u8(module, &data, address); |
133 | if (ret >= 0) | 134 | if (ret >= 0) |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9ced8937a8f3..fb0d537435eb 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -2123,6 +2123,20 @@ static void ftdi_set_termios(struct tty_struct *tty, | |||
2123 | termios->c_cflag |= CRTSCTS; | 2123 | termios->c_cflag |= CRTSCTS; |
2124 | } | 2124 | } |
2125 | 2125 | ||
2126 | /* | ||
2127 | * All FTDI UART chips are limited to CS7/8. We won't pretend to | ||
2128 | * support CS5/6 and revert the CSIZE setting instead. | ||
2129 | */ | ||
2130 | if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) { | ||
2131 | dev_warn(ddev, "requested CSIZE setting not supported\n"); | ||
2132 | |||
2133 | termios->c_cflag &= ~CSIZE; | ||
2134 | if (old_termios) | ||
2135 | termios->c_cflag |= old_termios->c_cflag & CSIZE; | ||
2136 | else | ||
2137 | termios->c_cflag |= CS8; | ||
2138 | } | ||
2139 | |||
2126 | cflag = termios->c_cflag; | 2140 | cflag = termios->c_cflag; |
2127 | 2141 | ||
2128 | if (!old_termios) | 2142 | if (!old_termios) |
@@ -2159,19 +2173,16 @@ no_skip: | |||
2159 | } else { | 2173 | } else { |
2160 | urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; | 2174 | urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; |
2161 | } | 2175 | } |
2162 | if (cflag & CSIZE) { | 2176 | switch (cflag & CSIZE) { |
2163 | switch (cflag & CSIZE) { | 2177 | case CS7: |
2164 | case CS7: | 2178 | urb_value |= 7; |
2165 | urb_value |= 7; | 2179 | dev_dbg(ddev, "Setting CS7\n"); |
2166 | dev_dbg(ddev, "Setting CS7\n"); | 2180 | break; |
2167 | break; | 2181 | default: |
2168 | case CS8: | 2182 | case CS8: |
2169 | urb_value |= 8; | 2183 | urb_value |= 8; |
2170 | dev_dbg(ddev, "Setting CS8\n"); | 2184 | dev_dbg(ddev, "Setting CS8\n"); |
2171 | break; | 2185 | break; |
2172 | default: | ||
2173 | dev_err(ddev, "CSIZE was set but not CS7-CS8\n"); | ||
2174 | } | ||
2175 | } | 2186 | } |
2176 | 2187 | ||
2177 | /* This is needed by the break command since it uses the same command | 2188 | /* This is needed by the break command since it uses the same command |
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 2b01ec8651c2..b63ce023f96f 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
@@ -173,16 +173,8 @@ retry: | |||
173 | clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); | 173 | clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); |
174 | return result; | 174 | return result; |
175 | } | 175 | } |
176 | /* | ||
177 | * Try sending off another urb, unless called from completion handler | ||
178 | * (in which case there will be no free urb or no data). | ||
179 | */ | ||
180 | if (mem_flags != GFP_ATOMIC) | ||
181 | goto retry; | ||
182 | 176 | ||
183 | clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); | 177 | goto retry; /* try sending off another urb */ |
184 | |||
185 | return 0; | ||
186 | } | 178 | } |
187 | EXPORT_SYMBOL_GPL(usb_serial_generic_write_start); | 179 | EXPORT_SYMBOL_GPL(usb_serial_generic_write_start); |
188 | 180 | ||
@@ -208,7 +200,7 @@ int usb_serial_generic_write(struct tty_struct *tty, | |||
208 | return 0; | 200 | return 0; |
209 | 201 | ||
210 | count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); | 202 | count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); |
211 | result = usb_serial_generic_write_start(port, GFP_KERNEL); | 203 | result = usb_serial_generic_write_start(port, GFP_ATOMIC); |
212 | if (result) | 204 | if (result) |
213 | return result; | 205 | return result; |
214 | 206 | ||
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index e5bdd987b9e8..a69da83604c0 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -1813,25 +1813,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty, | |||
1813 | iflag = tty->termios.c_iflag; | 1813 | iflag = tty->termios.c_iflag; |
1814 | 1814 | ||
1815 | /* Change the number of bits */ | 1815 | /* Change the number of bits */ |
1816 | if (cflag & CSIZE) { | 1816 | switch (cflag & CSIZE) { |
1817 | switch (cflag & CSIZE) { | 1817 | case CS5: |
1818 | case CS5: | 1818 | lData = LCR_BITS_5; |
1819 | lData = LCR_BITS_5; | 1819 | break; |
1820 | break; | ||
1821 | 1820 | ||
1822 | case CS6: | 1821 | case CS6: |
1823 | lData = LCR_BITS_6; | 1822 | lData = LCR_BITS_6; |
1824 | break; | 1823 | break; |
1825 | 1824 | ||
1826 | case CS7: | 1825 | case CS7: |
1827 | lData = LCR_BITS_7; | 1826 | lData = LCR_BITS_7; |
1828 | break; | 1827 | break; |
1829 | default: | 1828 | |
1830 | case CS8: | 1829 | default: |
1831 | lData = LCR_BITS_8; | 1830 | case CS8: |
1832 | break; | 1831 | lData = LCR_BITS_8; |
1833 | } | 1832 | break; |
1834 | } | 1833 | } |
1834 | |||
1835 | /* Change the Parity bit */ | 1835 | /* Change the Parity bit */ |
1836 | if (cflag & PARENB) { | 1836 | if (cflag & PARENB) { |
1837 | if (cflag & PARODD) { | 1837 | if (cflag & PARODD) { |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c3d94853b4ab..cc7a24154490 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb); | |||
85 | #define HUAWEI_PRODUCT_K4505 0x1464 | 85 | #define HUAWEI_PRODUCT_K4505 0x1464 |
86 | #define HUAWEI_PRODUCT_K3765 0x1465 | 86 | #define HUAWEI_PRODUCT_K3765 0x1465 |
87 | #define HUAWEI_PRODUCT_K4605 0x14C6 | 87 | #define HUAWEI_PRODUCT_K4605 0x14C6 |
88 | #define HUAWEI_PRODUCT_E173S6 0x1C07 | ||
88 | 89 | ||
89 | #define QUANTA_VENDOR_ID 0x0408 | 90 | #define QUANTA_VENDOR_ID 0x0408 |
90 | #define QUANTA_PRODUCT_Q101 0xEA02 | 91 | #define QUANTA_PRODUCT_Q101 0xEA02 |
@@ -250,6 +251,7 @@ static void option_instat_callback(struct urb *urb); | |||
250 | #define ZTE_PRODUCT_MF628 0x0015 | 251 | #define ZTE_PRODUCT_MF628 0x0015 |
251 | #define ZTE_PRODUCT_MF626 0x0031 | 252 | #define ZTE_PRODUCT_MF626 0x0031 |
252 | #define ZTE_PRODUCT_MC2718 0xffe8 | 253 | #define ZTE_PRODUCT_MC2718 0xffe8 |
254 | #define ZTE_PRODUCT_AC2726 0xfff1 | ||
253 | 255 | ||
254 | #define BENQ_VENDOR_ID 0x04a5 | 256 | #define BENQ_VENDOR_ID 0x04a5 |
255 | #define BENQ_PRODUCT_H10 0x4068 | 257 | #define BENQ_PRODUCT_H10 0x4068 |
@@ -572,6 +574,8 @@ static const struct usb_device_id option_ids[] = { | |||
572 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, | 574 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, |
573 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), | 575 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), |
574 | .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, | 576 | .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, |
577 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff), | ||
578 | .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, | ||
575 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), | 579 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), |
576 | .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, | 580 | .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, |
577 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, | 581 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, |
@@ -634,6 +638,10 @@ static const struct usb_device_id option_ids[] = { | |||
634 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, | 638 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, |
635 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, | 639 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, |
636 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, | 640 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, |
641 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) }, | ||
642 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) }, | ||
643 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) }, | ||
644 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) }, | ||
637 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, | 645 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, |
638 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, | 646 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, |
639 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, | 647 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, |
@@ -688,6 +696,10 @@ static const struct usb_device_id option_ids[] = { | |||
688 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, | 696 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, |
689 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, | 697 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, |
690 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, | 698 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, |
699 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) }, | ||
700 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) }, | ||
701 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) }, | ||
702 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) }, | ||
691 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, | 703 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, |
692 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, | 704 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, |
693 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, | 705 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, |
@@ -742,6 +754,10 @@ static const struct usb_device_id option_ids[] = { | |||
742 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, | 754 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, |
743 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, | 755 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, |
744 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, | 756 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, |
757 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) }, | ||
758 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) }, | ||
759 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) }, | ||
760 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) }, | ||
745 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, | 761 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, |
746 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, | 762 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, |
747 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, | 763 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, |
@@ -796,6 +812,10 @@ static const struct usb_device_id option_ids[] = { | |||
796 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, | 812 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, |
797 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, | 813 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, |
798 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, | 814 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, |
815 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) }, | ||
816 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) }, | ||
817 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) }, | ||
818 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) }, | ||
799 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, | 819 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, |
800 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, | 820 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, |
801 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, | 821 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, |
@@ -850,6 +870,10 @@ static const struct usb_device_id option_ids[] = { | |||
850 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, | 870 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, |
851 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, | 871 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, |
852 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, | 872 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, |
873 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) }, | ||
874 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) }, | ||
875 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) }, | ||
876 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) }, | ||
853 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, | 877 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, |
854 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, | 878 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, |
855 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, | 879 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, |
@@ -904,6 +928,10 @@ static const struct usb_device_id option_ids[] = { | |||
904 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, | 928 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, |
905 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, | 929 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, |
906 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, | 930 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, |
931 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) }, | ||
932 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) }, | ||
933 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) }, | ||
934 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) }, | ||
907 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, | 935 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, |
908 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, | 936 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, |
909 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, | 937 | { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, |
@@ -1426,6 +1454,7 @@ static const struct usb_device_id option_ids[] = { | |||
1426 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, | 1454 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, |
1427 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, | 1455 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, |
1428 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, | 1456 | { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, |
1457 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, | ||
1429 | 1458 | ||
1430 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 1459 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
1431 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | 1460 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 1e6de4cd079d..1e3318dfa1cb 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -361,23 +361,21 @@ static void pl2303_set_termios(struct tty_struct *tty, | |||
361 | 0, 0, buf, 7, 100); | 361 | 0, 0, buf, 7, 100); |
362 | dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); | 362 | dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); |
363 | 363 | ||
364 | if (C_CSIZE(tty)) { | 364 | switch (C_CSIZE(tty)) { |
365 | switch (C_CSIZE(tty)) { | 365 | case CS5: |
366 | case CS5: | 366 | buf[6] = 5; |
367 | buf[6] = 5; | 367 | break; |
368 | break; | 368 | case CS6: |
369 | case CS6: | 369 | buf[6] = 6; |
370 | buf[6] = 6; | 370 | break; |
371 | break; | 371 | case CS7: |
372 | case CS7: | 372 | buf[6] = 7; |
373 | buf[6] = 7; | 373 | break; |
374 | break; | 374 | default: |
375 | default: | 375 | case CS8: |
376 | case CS8: | 376 | buf[6] = 8; |
377 | buf[6] = 8; | ||
378 | } | ||
379 | dev_dbg(&port->dev, "data bits = %d\n", buf[6]); | ||
380 | } | 377 | } |
378 | dev_dbg(&port->dev, "data bits = %d\n", buf[6]); | ||
381 | 379 | ||
382 | /* For reference buf[0]:buf[3] baud rate value */ | 380 | /* For reference buf[0]:buf[3] baud rate value */ |
383 | pl2303_encode_baudrate(tty, port, &buf[0]); | 381 | pl2303_encode_baudrate(tty, port, &buf[0]); |
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 4abac28b5992..5b793c352267 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c | |||
@@ -348,22 +348,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty, | |||
348 | } | 348 | } |
349 | 349 | ||
350 | /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ | 350 | /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ |
351 | if (cflag & CSIZE) { | 351 | switch (cflag & CSIZE) { |
352 | switch (cflag & CSIZE) { | 352 | case CS5: |
353 | case CS5: | 353 | buf[1] |= SET_UART_FORMAT_SIZE_5; |
354 | buf[1] |= SET_UART_FORMAT_SIZE_5; | 354 | break; |
355 | break; | 355 | case CS6: |
356 | case CS6: | 356 | buf[1] |= SET_UART_FORMAT_SIZE_6; |
357 | buf[1] |= SET_UART_FORMAT_SIZE_6; | 357 | break; |
358 | break; | 358 | case CS7: |
359 | case CS7: | 359 | buf[1] |= SET_UART_FORMAT_SIZE_7; |
360 | buf[1] |= SET_UART_FORMAT_SIZE_7; | 360 | break; |
361 | break; | 361 | default: |
362 | default: | 362 | case CS8: |
363 | case CS8: | 363 | buf[1] |= SET_UART_FORMAT_SIZE_8; |
364 | buf[1] |= SET_UART_FORMAT_SIZE_8; | 364 | break; |
365 | break; | ||
366 | } | ||
367 | } | 365 | } |
368 | 366 | ||
369 | /* Set Stop bit2 : 0:1bit 1:2bit */ | 367 | /* Set Stop bit2 : 0:1bit 1:2bit */ |
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index fca4c752a4ed..eae2c873b39f 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c | |||
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = { | |||
281 | { USB_DEVICE(0x19d2, 0xfffd) }, | 281 | { USB_DEVICE(0x19d2, 0xfffd) }, |
282 | { USB_DEVICE(0x19d2, 0xfffc) }, | 282 | { USB_DEVICE(0x19d2, 0xfffc) }, |
283 | { USB_DEVICE(0x19d2, 0xfffb) }, | 283 | { USB_DEVICE(0x19d2, 0xfffb) }, |
284 | /* AC2726, AC8710_V3 */ | 284 | /* AC8710_V3 */ |
285 | { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) }, | ||
286 | { USB_DEVICE(0x19d2, 0xfff6) }, | 285 | { USB_DEVICE(0x19d2, 0xfff6) }, |
287 | { USB_DEVICE(0x19d2, 0xfff7) }, | 286 | { USB_DEVICE(0x19d2, 0xfff7) }, |
288 | { USB_DEVICE(0x19d2, 0xfff8) }, | 287 | { USB_DEVICE(0x19d2, 0xfff8) }, |
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index e538b72c4e3a..f14e7929ba22 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c | |||
@@ -97,18 +97,12 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work); | |||
97 | 97 | ||
98 | static void wusb_dev_free(struct wusb_dev *wusb_dev) | 98 | static void wusb_dev_free(struct wusb_dev *wusb_dev) |
99 | { | 99 | { |
100 | if (wusb_dev) { | 100 | kfree(wusb_dev); |
101 | kfree(wusb_dev->set_gtk_req); | ||
102 | usb_free_urb(wusb_dev->set_gtk_urb); | ||
103 | kfree(wusb_dev); | ||
104 | } | ||
105 | } | 101 | } |
106 | 102 | ||
107 | static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) | 103 | static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) |
108 | { | 104 | { |
109 | struct wusb_dev *wusb_dev; | 105 | struct wusb_dev *wusb_dev; |
110 | struct urb *urb; | ||
111 | struct usb_ctrlrequest *req; | ||
112 | 106 | ||
113 | wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); | 107 | wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); |
114 | if (wusb_dev == NULL) | 108 | if (wusb_dev == NULL) |
@@ -118,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) | |||
118 | 112 | ||
119 | INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); | 113 | INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); |
120 | 114 | ||
121 | urb = usb_alloc_urb(0, GFP_KERNEL); | ||
122 | if (urb == NULL) | ||
123 | goto err; | ||
124 | wusb_dev->set_gtk_urb = urb; | ||
125 | |||
126 | req = kmalloc(sizeof(*req), GFP_KERNEL); | ||
127 | if (req == NULL) | ||
128 | goto err; | ||
129 | wusb_dev->set_gtk_req = req; | ||
130 | |||
131 | req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; | ||
132 | req->bRequest = USB_REQ_SET_DESCRIPTOR; | ||
133 | req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); | ||
134 | req->wIndex = 0; | ||
135 | req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); | ||
136 | |||
137 | return wusb_dev; | 115 | return wusb_dev; |
138 | err: | 116 | err: |
139 | wusb_dev_free(wusb_dev); | 117 | wusb_dev_free(wusb_dev); |
@@ -411,9 +389,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | |||
411 | /* | 389 | /* |
412 | * Refresh the list of keep alives to emit in the MMC | 390 | * Refresh the list of keep alives to emit in the MMC |
413 | * | 391 | * |
414 | * Some devices don't respond to keep alives unless they've been | ||
415 | * authenticated, so skip unauthenticated devices. | ||
416 | * | ||
417 | * We only publish the first four devices that have a coming timeout | 392 | * We only publish the first four devices that have a coming timeout |
418 | * condition. Then when we are done processing those, we go for the | 393 | * condition. Then when we are done processing those, we go for the |
419 | * next ones. We ignore the ones that have timed out already (they'll | 394 | * next ones. We ignore the ones that have timed out already (they'll |
@@ -448,7 +423,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) | |||
448 | 423 | ||
449 | if (wusb_dev == NULL) | 424 | if (wusb_dev == NULL) |
450 | continue; | 425 | continue; |
451 | if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) | 426 | if (wusb_dev->usb_dev == NULL) |
452 | continue; | 427 | continue; |
453 | 428 | ||
454 | if (time_after(jiffies, wusb_dev->entry_ts + tt)) { | 429 | if (time_after(jiffies, wusb_dev->entry_ts + tt)) { |
@@ -524,11 +499,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) | |||
524 | * | 499 | * |
525 | * @wusbhc shall be referenced and unlocked | 500 | * @wusbhc shall be referenced and unlocked |
526 | */ | 501 | */ |
527 | static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | 502 | static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr) |
528 | { | 503 | { |
504 | struct wusb_dev *wusb_dev; | ||
505 | |||
529 | mutex_lock(&wusbhc->mutex); | 506 | mutex_lock(&wusbhc->mutex); |
530 | wusb_dev->entry_ts = jiffies; | 507 | wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); |
531 | __wusbhc_keep_alive(wusbhc); | 508 | if (wusb_dev == NULL) { |
509 | dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n", | ||
510 | srcaddr); | ||
511 | } else { | ||
512 | wusb_dev->entry_ts = jiffies; | ||
513 | __wusbhc_keep_alive(wusbhc); | ||
514 | } | ||
532 | mutex_unlock(&wusbhc->mutex); | 515 | mutex_unlock(&wusbhc->mutex); |
533 | } | 516 | } |
534 | 517 | ||
@@ -582,14 +565,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, | |||
582 | * | 565 | * |
583 | * @wusbhc shall be referenced and unlocked | 566 | * @wusbhc shall be referenced and unlocked |
584 | */ | 567 | */ |
585 | static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | 568 | static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr) |
586 | { | 569 | { |
587 | struct device *dev = wusbhc->dev; | 570 | struct device *dev = wusbhc->dev; |
588 | 571 | struct wusb_dev *wusb_dev; | |
589 | dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); | ||
590 | 572 | ||
591 | mutex_lock(&wusbhc->mutex); | 573 | mutex_lock(&wusbhc->mutex); |
592 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); | 574 | wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); |
575 | if (wusb_dev == NULL) { | ||
576 | dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n", | ||
577 | srcaddr); | ||
578 | } else { | ||
579 | dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", | ||
580 | wusb_dev->addr); | ||
581 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, | ||
582 | wusb_dev->port_idx)); | ||
583 | } | ||
593 | mutex_unlock(&wusbhc->mutex); | 584 | mutex_unlock(&wusbhc->mutex); |
594 | } | 585 | } |
595 | 586 | ||
@@ -611,30 +602,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, | |||
611 | struct wusb_dn_hdr *dn_hdr, size_t size) | 602 | struct wusb_dn_hdr *dn_hdr, size_t size) |
612 | { | 603 | { |
613 | struct device *dev = wusbhc->dev; | 604 | struct device *dev = wusbhc->dev; |
614 | struct wusb_dev *wusb_dev; | ||
615 | 605 | ||
616 | if (size < sizeof(struct wusb_dn_hdr)) { | 606 | if (size < sizeof(struct wusb_dn_hdr)) { |
617 | dev_err(dev, "DN data shorter than DN header (%d < %d)\n", | 607 | dev_err(dev, "DN data shorter than DN header (%d < %d)\n", |
618 | (int)size, (int)sizeof(struct wusb_dn_hdr)); | 608 | (int)size, (int)sizeof(struct wusb_dn_hdr)); |
619 | return; | 609 | return; |
620 | } | 610 | } |
621 | |||
622 | wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); | ||
623 | if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { | ||
624 | dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", | ||
625 | dn_hdr->bType, srcaddr); | ||
626 | return; | ||
627 | } | ||
628 | |||
629 | switch (dn_hdr->bType) { | 611 | switch (dn_hdr->bType) { |
630 | case WUSB_DN_CONNECT: | 612 | case WUSB_DN_CONNECT: |
631 | wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); | 613 | wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); |
632 | break; | 614 | break; |
633 | case WUSB_DN_ALIVE: | 615 | case WUSB_DN_ALIVE: |
634 | wusbhc_handle_dn_alive(wusbhc, wusb_dev); | 616 | wusbhc_handle_dn_alive(wusbhc, srcaddr); |
635 | break; | 617 | break; |
636 | case WUSB_DN_DISCONNECT: | 618 | case WUSB_DN_DISCONNECT: |
637 | wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); | 619 | wusbhc_handle_dn_disconnect(wusbhc, srcaddr); |
638 | break; | 620 | break; |
639 | case WUSB_DN_MASAVAILCHANGED: | 621 | case WUSB_DN_MASAVAILCHANGED: |
640 | case WUSB_DN_RWAKE: | 622 | case WUSB_DN_RWAKE: |
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index dd88441c8f78..4c40d0dbf53d 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c | |||
@@ -29,19 +29,16 @@ | |||
29 | #include <linux/export.h> | 29 | #include <linux/export.h> |
30 | #include "wusbhc.h" | 30 | #include "wusbhc.h" |
31 | 31 | ||
32 | static void wusbhc_set_gtk_callback(struct urb *urb); | 32 | static void wusbhc_gtk_rekey_work(struct work_struct *work); |
33 | static void wusbhc_gtk_rekey_done_work(struct work_struct *work); | ||
34 | 33 | ||
35 | int wusbhc_sec_create(struct wusbhc *wusbhc) | 34 | int wusbhc_sec_create(struct wusbhc *wusbhc) |
36 | { | 35 | { |
37 | wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); | 36 | wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); |
38 | wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; | 37 | wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; |
39 | wusbhc->gtk.descr.bReserved = 0; | 38 | wusbhc->gtk.descr.bReserved = 0; |
39 | wusbhc->gtk_index = 0; | ||
40 | 40 | ||
41 | wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, | 41 | INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work); |
42 | WUSB_KEY_INDEX_ORIGINATOR_HOST); | ||
43 | |||
44 | INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work); | ||
45 | 42 | ||
46 | return 0; | 43 | return 0; |
47 | } | 44 | } |
@@ -113,7 +110,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc) | |||
113 | wusbhc_generate_gtk(wusbhc); | 110 | wusbhc_generate_gtk(wusbhc); |
114 | 111 | ||
115 | result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, | 112 | result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, |
116 | &wusbhc->gtk.descr.bKeyData, key_size); | 113 | &wusbhc->gtk.descr.bKeyData, key_size); |
117 | if (result < 0) | 114 | if (result < 0) |
118 | dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", | 115 | dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", |
119 | result); | 116 | result); |
@@ -129,7 +126,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc) | |||
129 | */ | 126 | */ |
130 | void wusbhc_sec_stop(struct wusbhc *wusbhc) | 127 | void wusbhc_sec_stop(struct wusbhc *wusbhc) |
131 | { | 128 | { |
132 | cancel_work_sync(&wusbhc->gtk_rekey_done_work); | 129 | cancel_work_sync(&wusbhc->gtk_rekey_work); |
133 | } | 130 | } |
134 | 131 | ||
135 | 132 | ||
@@ -185,12 +182,14 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value) | |||
185 | static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | 182 | static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) |
186 | { | 183 | { |
187 | struct usb_device *usb_dev = wusb_dev->usb_dev; | 184 | struct usb_device *usb_dev = wusb_dev->usb_dev; |
185 | u8 key_index = wusb_key_index(wusbhc->gtk_index, | ||
186 | WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); | ||
188 | 187 | ||
189 | return usb_control_msg( | 188 | return usb_control_msg( |
190 | usb_dev, usb_sndctrlpipe(usb_dev, 0), | 189 | usb_dev, usb_sndctrlpipe(usb_dev, 0), |
191 | USB_REQ_SET_DESCRIPTOR, | 190 | USB_REQ_SET_DESCRIPTOR, |
192 | USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, | 191 | USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, |
193 | USB_DT_KEY << 8 | wusbhc->gtk_index, 0, | 192 | USB_DT_KEY << 8 | key_index, 0, |
194 | &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, | 193 | &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, |
195 | 1000); | 194 | 1000); |
196 | } | 195 | } |
@@ -520,24 +519,55 @@ error_kzalloc: | |||
520 | * Once all connected and authenticated devices have received the new | 519 | * Once all connected and authenticated devices have received the new |
521 | * GTK, switch the host to using it. | 520 | * GTK, switch the host to using it. |
522 | */ | 521 | */ |
523 | static void wusbhc_gtk_rekey_done_work(struct work_struct *work) | 522 | static void wusbhc_gtk_rekey_work(struct work_struct *work) |
524 | { | 523 | { |
525 | struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); | 524 | struct wusbhc *wusbhc = container_of(work, |
525 | struct wusbhc, gtk_rekey_work); | ||
526 | size_t key_size = sizeof(wusbhc->gtk.data); | 526 | size_t key_size = sizeof(wusbhc->gtk.data); |
527 | int port_idx; | ||
528 | struct wusb_dev *wusb_dev, *wusb_dev_next; | ||
529 | LIST_HEAD(rekey_list); | ||
527 | 530 | ||
528 | mutex_lock(&wusbhc->mutex); | 531 | mutex_lock(&wusbhc->mutex); |
532 | /* generate the new key */ | ||
533 | wusbhc_generate_gtk(wusbhc); | ||
534 | /* roll the gtk index. */ | ||
535 | wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1); | ||
536 | /* | ||
537 | * Save all connected devices on a list while holding wusbhc->mutex and | ||
538 | * take a reference to each one. Then submit the set key request to | ||
539 | * them after releasing the lock in order to avoid a deadlock. | ||
540 | */ | ||
541 | for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) { | ||
542 | wusb_dev = wusbhc->port[port_idx].wusb_dev; | ||
543 | if (!wusb_dev || !wusb_dev->usb_dev | ||
544 | || !wusb_dev->usb_dev->authenticated) | ||
545 | continue; | ||
529 | 546 | ||
530 | if (--wusbhc->pending_set_gtks == 0) | 547 | wusb_dev_get(wusb_dev); |
531 | wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); | 548 | list_add_tail(&wusb_dev->rekey_node, &rekey_list); |
532 | 549 | } | |
533 | mutex_unlock(&wusbhc->mutex); | 550 | mutex_unlock(&wusbhc->mutex); |
534 | } | ||
535 | 551 | ||
536 | static void wusbhc_set_gtk_callback(struct urb *urb) | 552 | /* Submit the rekey requests without holding wusbhc->mutex. */ |
537 | { | 553 | list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list, |
538 | struct wusbhc *wusbhc = urb->context; | 554 | rekey_node) { |
555 | list_del_init(&wusb_dev->rekey_node); | ||
556 | dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n", | ||
557 | __func__, wusb_dev->port_idx); | ||
558 | |||
559 | if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) { | ||
560 | dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n", | ||
561 | __func__, wusb_dev->port_idx); | ||
562 | } | ||
563 | wusb_dev_put(wusb_dev); | ||
564 | } | ||
539 | 565 | ||
540 | queue_work(wusbd, &wusbhc->gtk_rekey_done_work); | 566 | /* Switch the host controller to use the new GTK. */ |
567 | mutex_lock(&wusbhc->mutex); | ||
568 | wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, | ||
569 | &wusbhc->gtk.descr.bKeyData, key_size); | ||
570 | mutex_unlock(&wusbhc->mutex); | ||
541 | } | 571 | } |
542 | 572 | ||
543 | /** | 573 | /** |
@@ -553,26 +583,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb) | |||
553 | */ | 583 | */ |
554 | void wusbhc_gtk_rekey(struct wusbhc *wusbhc) | 584 | void wusbhc_gtk_rekey(struct wusbhc *wusbhc) |
555 | { | 585 | { |
556 | static const size_t key_size = sizeof(wusbhc->gtk.data); | 586 | /* |
557 | int p; | 587 | * We need to submit a URB to the downstream WUSB devices in order to |
558 | 588 | * change the group key. This can't be done while holding the | |
559 | wusbhc_generate_gtk(wusbhc); | 589 | * wusbhc->mutex since that is also taken in the urb_enqueue routine |
560 | 590 | * and will cause a deadlock. Instead, queue a work item to do | |
561 | for (p = 0; p < wusbhc->ports_max; p++) { | 591 | * it when the lock is not held |
562 | struct wusb_dev *wusb_dev; | 592 | */ |
563 | 593 | queue_work(wusbd, &wusbhc->gtk_rekey_work); | |
564 | wusb_dev = wusbhc->port[p].wusb_dev; | ||
565 | if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated) | ||
566 | continue; | ||
567 | |||
568 | usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev, | ||
569 | usb_sndctrlpipe(wusb_dev->usb_dev, 0), | ||
570 | (void *)wusb_dev->set_gtk_req, | ||
571 | &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, | ||
572 | wusbhc_set_gtk_callback, wusbhc); | ||
573 | if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0) | ||
574 | wusbhc->pending_set_gtks++; | ||
575 | } | ||
576 | if (wusbhc->pending_set_gtks == 0) | ||
577 | wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); | ||
578 | } | 594 | } |
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h index 711b1952b114..6bd3b819a6b5 100644 --- a/drivers/usb/wusbcore/wusbhc.h +++ b/drivers/usb/wusbcore/wusbhc.h | |||
@@ -97,6 +97,7 @@ struct wusb_dev { | |||
97 | struct kref refcnt; | 97 | struct kref refcnt; |
98 | struct wusbhc *wusbhc; | 98 | struct wusbhc *wusbhc; |
99 | struct list_head cack_node; /* Connect-Ack list */ | 99 | struct list_head cack_node; /* Connect-Ack list */ |
100 | struct list_head rekey_node; /* GTK rekey list */ | ||
100 | u8 port_idx; | 101 | u8 port_idx; |
101 | u8 addr; | 102 | u8 addr; |
102 | u8 beacon_type:4; | 103 | u8 beacon_type:4; |
@@ -107,8 +108,6 @@ struct wusb_dev { | |||
107 | struct usb_wireless_cap_descriptor *wusb_cap_descr; | 108 | struct usb_wireless_cap_descriptor *wusb_cap_descr; |
108 | struct uwb_mas_bm availability; | 109 | struct uwb_mas_bm availability; |
109 | struct work_struct devconnect_acked_work; | 110 | struct work_struct devconnect_acked_work; |
110 | struct urb *set_gtk_urb; | ||
111 | struct usb_ctrlrequest *set_gtk_req; | ||
112 | struct usb_device *usb_dev; | 111 | struct usb_device *usb_dev; |
113 | }; | 112 | }; |
114 | 113 | ||
@@ -296,8 +295,7 @@ struct wusbhc { | |||
296 | } __attribute__((packed)) gtk; | 295 | } __attribute__((packed)) gtk; |
297 | u8 gtk_index; | 296 | u8 gtk_index; |
298 | u32 gtk_tkid; | 297 | u32 gtk_tkid; |
299 | struct work_struct gtk_rekey_done_work; | 298 | struct work_struct gtk_rekey_work; |
300 | int pending_set_gtks; | ||
301 | 299 | ||
302 | struct usb_encryption_descriptor *ccm1_etd; | 300 | struct usb_encryption_descriptor *ccm1_etd; |
303 | }; | 301 | }; |
diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 9dbea2223401..7d44d669d5b6 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c | |||
@@ -91,6 +91,15 @@ extern boot_infos_t *boot_infos; | |||
91 | #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 | 91 | #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 |
92 | #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 | 92 | #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 |
93 | 93 | ||
94 | #define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp))) | ||
95 | |||
96 | static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value) | ||
97 | { | ||
98 | u32 bpp = info->var.bits_per_pixel; | ||
99 | |||
100 | return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp); | ||
101 | } | ||
102 | |||
94 | /* | 103 | /* |
95 | * Set a single color register. The values supplied are already | 104 | * Set a single color register. The values supplied are already |
96 | * rounded down to the hardware's capabilities (according to the | 105 | * rounded down to the hardware's capabilities (according to the |
@@ -120,7 +129,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | |||
120 | mask <<= info->var.transp.offset; | 129 | mask <<= info->var.transp.offset; |
121 | value |= mask; | 130 | value |= mask; |
122 | } | 131 | } |
123 | pal[regno] = value; | 132 | pal[regno] = offb_cmap_byteswap(info, value); |
124 | return 0; | 133 | return 0; |
125 | } | 134 | } |
126 | 135 | ||
@@ -301,7 +310,7 @@ static struct fb_ops offb_ops = { | |||
301 | static void __iomem *offb_map_reg(struct device_node *np, int index, | 310 | static void __iomem *offb_map_reg(struct device_node *np, int index, |
302 | unsigned long offset, unsigned long size) | 311 | unsigned long offset, unsigned long size) |
303 | { | 312 | { |
304 | const u32 *addrp; | 313 | const __be32 *addrp; |
305 | u64 asize, taddr; | 314 | u64 asize, taddr; |
306 | unsigned int flags; | 315 | unsigned int flags; |
307 | 316 | ||
@@ -369,7 +378,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp | |||
369 | } | 378 | } |
370 | of_node_put(pciparent); | 379 | of_node_put(pciparent); |
371 | } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { | 380 | } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { |
372 | const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; | 381 | #ifdef __BIG_ENDIAN |
382 | const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; | ||
383 | #else | ||
384 | const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 }; | ||
385 | #endif | ||
373 | u64 io_addr = of_translate_address(dp, io_of_addr); | 386 | u64 io_addr = of_translate_address(dp, io_of_addr); |
374 | if (io_addr != OF_BAD_ADDR) { | 387 | if (io_addr != OF_BAD_ADDR) { |
375 | par->cmap_adr = ioremap(io_addr + 0x3c8, 2); | 388 | par->cmap_adr = ioremap(io_addr + 0x3c8, 2); |
@@ -535,7 +548,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) | |||
535 | unsigned int flags, rsize, addr_prop = 0; | 548 | unsigned int flags, rsize, addr_prop = 0; |
536 | unsigned long max_size = 0; | 549 | unsigned long max_size = 0; |
537 | u64 rstart, address = OF_BAD_ADDR; | 550 | u64 rstart, address = OF_BAD_ADDR; |
538 | const u32 *pp, *addrp, *up; | 551 | const __be32 *pp, *addrp, *up; |
539 | u64 asize; | 552 | u64 asize; |
540 | int foreign_endian = 0; | 553 | int foreign_endian = 0; |
541 | 554 | ||
@@ -551,25 +564,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) | |||
551 | if (pp == NULL) | 564 | if (pp == NULL) |
552 | pp = of_get_property(dp, "depth", &len); | 565 | pp = of_get_property(dp, "depth", &len); |
553 | if (pp && len == sizeof(u32)) | 566 | if (pp && len == sizeof(u32)) |
554 | depth = *pp; | 567 | depth = be32_to_cpup(pp); |
555 | 568 | ||
556 | pp = of_get_property(dp, "linux,bootx-width", &len); | 569 | pp = of_get_property(dp, "linux,bootx-width", &len); |
557 | if (pp == NULL) | 570 | if (pp == NULL) |
558 | pp = of_get_property(dp, "width", &len); | 571 | pp = of_get_property(dp, "width", &len); |
559 | if (pp && len == sizeof(u32)) | 572 | if (pp && len == sizeof(u32)) |
560 | width = *pp; | 573 | width = be32_to_cpup(pp); |
561 | 574 | ||
562 | pp = of_get_property(dp, "linux,bootx-height", &len); | 575 | pp = of_get_property(dp, "linux,bootx-height", &len); |
563 | if (pp == NULL) | 576 | if (pp == NULL) |
564 | pp = of_get_property(dp, "height", &len); | 577 | pp = of_get_property(dp, "height", &len); |
565 | if (pp && len == sizeof(u32)) | 578 | if (pp && len == sizeof(u32)) |
566 | height = *pp; | 579 | height = be32_to_cpup(pp); |
567 | 580 | ||
568 | pp = of_get_property(dp, "linux,bootx-linebytes", &len); | 581 | pp = of_get_property(dp, "linux,bootx-linebytes", &len); |
569 | if (pp == NULL) | 582 | if (pp == NULL) |
570 | pp = of_get_property(dp, "linebytes", &len); | 583 | pp = of_get_property(dp, "linebytes", &len); |
571 | if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) | 584 | if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) |
572 | pitch = *pp; | 585 | pitch = be32_to_cpup(pp); |
573 | else | 586 | else |
574 | pitch = width * ((depth + 7) / 8); | 587 | pitch = width * ((depth + 7) / 8); |
575 | 588 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c444654fc33f..5c4a95b516cf 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb) | |||
285 | { | 285 | { |
286 | __le32 actual = cpu_to_le32(vb->num_pages); | 286 | __le32 actual = cpu_to_le32(vb->num_pages); |
287 | 287 | ||
288 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, | 288 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, |
289 | &actual); | 289 | &actual); |
290 | } | 290 | } |
291 | 291 | ||
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index a6a2cebb2587..cafa973c43be 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/watchdog.h> | 19 | #include <linux/watchdog.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
22 | #include <linux/miscdevice.h> | ||
23 | 22 | ||
24 | #define PM_RSTC 0x1c | 23 | #define PM_RSTC 0x1c |
25 | #define PM_WDOG 0x24 | 24 | #define PM_WDOG 0x24 |
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index 833e81311848..d1d07f2f69df 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c | |||
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/watchdog.h> | 31 | #include <linux/watchdog.h> |
33 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
34 | #include <linux/io.h> | 33 | #include <linux/io.h> |
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c index 70a240297c6d..07f88f54e5c0 100644 --- a/drivers/watchdog/ie6xx_wdt.c +++ b/drivers/watchdog/ie6xx_wdt.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
30 | #include <linux/watchdog.h> | 30 | #include <linux/watchdog.h> |
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
33 | #include <linux/debugfs.h> | 32 | #include <linux/debugfs.h> |
34 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c index 2de486a7eea1..3aa50cfa335f 100644 --- a/drivers/watchdog/jz4740_wdt.c +++ b/drivers/watchdog/jz4740_wdt.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/miscdevice.h> | ||
21 | #include <linux/watchdog.h> | 20 | #include <linux/watchdog.h> |
22 | #include <linux/init.h> | 21 | #include <linux/init.h> |
23 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index a1a3638c579c..20dc73844737 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/moduleparam.h> | 28 | #include <linux/moduleparam.h> |
29 | #include <linux/miscdevice.h> | ||
30 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
31 | #include <linux/watchdog.h> | 30 | #include <linux/watchdog.h> |
32 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 6d4f3998e1f6..bdb3f4a5b27c 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/miscdevice.h> | ||
23 | #include <linux/watchdog.h> | 22 | #include <linux/watchdog.h> |
24 | #include <linux/init.h> | 23 | #include <linux/init.h> |
25 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 44edca66d564..f7722a424676 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/moduleparam.h> | 16 | #include <linux/moduleparam.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/miscdevice.h> | ||
20 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
21 | #include <linux/watchdog.h> | 20 | #include <linux/watchdog.h> |
22 | #include <linux/init.h> | 21 | #include <linux/init.h> |
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 1bdcc313e1d9..5bec20f5dc2d 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/moduleparam.h> | 23 | #include <linux/moduleparam.h> |
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/miscdevice.h> | ||
27 | #include <linux/watchdog.h> | 26 | #include <linux/watchdog.h> |
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
29 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 53d37fea183e..d92c2d5859ce 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/watchdog.h> | 18 | #include <linux/watchdog.h> |
19 | #include <linux/miscdevice.h> | ||
20 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
21 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
22 | 21 | ||
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 3b9fff9dcf65..131193a7acdf 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c | |||
@@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void) | |||
409 | #if defined CONFIG_PNP | 409 | #if defined CONFIG_PNP |
410 | /* now that the user has specified an IO port and we haven't detected | 410 | /* now that the user has specified an IO port and we haven't detected |
411 | * any devices, disable pnp support */ | 411 | * any devices, disable pnp support */ |
412 | if (isapnp) | ||
413 | pnp_unregister_driver(&scl200wdt_pnp_driver); | ||
412 | isapnp = 0; | 414 | isapnp = 0; |
413 | pnp_unregister_driver(&scl200wdt_pnp_driver); | ||
414 | #endif | 415 | #endif |
415 | 416 | ||
416 | if (!request_region(io, io_len, SC1200_MODULE_NAME)) { | 417 | if (!request_region(io, io_len, SC1200_MODULE_NAME)) { |
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index f9b8e06f3558..af3528f84d65 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/miscdevice.h> | ||
30 | #include <linux/watchdog.h> | 29 | #include <linux/watchdog.h> |
31 | #include <linux/pm_runtime.h> | 30 | #include <linux/pm_runtime.h> |
32 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index ef2638fee4a8..c04a1aa158e2 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/moduleparam.h> | 42 | #include <linux/moduleparam.h> |
43 | #include <linux/types.h> | 43 | #include <linux/types.h> |
44 | #include <linux/timer.h> | 44 | #include <linux/timer.h> |
45 | #include <linux/miscdevice.h> | ||
46 | #include <linux/watchdog.h> | 45 | #include <linux/watchdog.h> |
47 | #include <linux/notifier.h> | 46 | #include <linux/notifier.h> |
48 | #include <linux/reboot.h> | 47 | #include <linux/reboot.h> |
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c index d667f6b51d35..bb64ae3f47da 100644 --- a/drivers/watchdog/stmp3xxx_rtc_wdt.c +++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/miscdevice.h> | ||
16 | #include <linux/watchdog.h> | 15 | #include <linux/watchdog.h> |
17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
18 | #include <linux/stmp3xxx_rtc_wdt.h> | 17 | #include <linux/stmp3xxx_rtc_wdt.h> |
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c index 0fd0e8ae62a8..6a447e321dd0 100644 --- a/drivers/watchdog/txx9wdt.c +++ b/drivers/watchdog/txx9wdt.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/miscdevice.h> | ||
17 | #include <linux/watchdog.h> | 16 | #include <linux/watchdog.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
19 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c index e029b5768f2c..5aed9d7ad47e 100644 --- a/drivers/watchdog/ux500_wdt.c +++ b/drivers/watchdog/ux500_wdt.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/miscdevice.h> | ||
16 | #include <linux/err.h> | 15 | #include <linux/err.h> |
17 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
18 | #include <linux/watchdog.h> | 17 | #include <linux/watchdog.h> |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 55ea73f7c70b..4c02e2b94103 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
350 | 350 | ||
351 | pfn = page_to_pfn(page); | 351 | pfn = page_to_pfn(page); |
352 | 352 | ||
353 | set_phys_to_machine(pfn, frame_list[i]); | ||
354 | |||
355 | #ifdef CONFIG_XEN_HAVE_PVMMU | 353 | #ifdef CONFIG_XEN_HAVE_PVMMU |
356 | /* Link back into the page tables if not highmem. */ | 354 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
357 | if (xen_pv_domain() && !PageHighMem(page)) { | 355 | set_phys_to_machine(pfn, frame_list[i]); |
358 | int ret; | 356 | |
359 | ret = HYPERVISOR_update_va_mapping( | 357 | /* Link back into the page tables if not highmem. */ |
360 | (unsigned long)__va(pfn << PAGE_SHIFT), | 358 | if (!PageHighMem(page)) { |
361 | mfn_pte(frame_list[i], PAGE_KERNEL), | 359 | int ret; |
362 | 0); | 360 | ret = HYPERVISOR_update_va_mapping( |
363 | BUG_ON(ret); | 361 | (unsigned long)__va(pfn << PAGE_SHIFT), |
362 | mfn_pte(frame_list[i], PAGE_KERNEL), | ||
363 | 0); | ||
364 | BUG_ON(ret); | ||
365 | } | ||
364 | } | 366 | } |
365 | #endif | 367 | #endif |
366 | 368 | ||
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
378 | enum bp_state state = BP_DONE; | 380 | enum bp_state state = BP_DONE; |
379 | unsigned long pfn, i; | 381 | unsigned long pfn, i; |
380 | struct page *page; | 382 | struct page *page; |
381 | struct page *scratch_page; | ||
382 | int ret; | 383 | int ret; |
383 | struct xen_memory_reservation reservation = { | 384 | struct xen_memory_reservation reservation = { |
384 | .address_bits = 0, | 385 | .address_bits = 0, |
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
411 | 412 | ||
412 | scrub_page(page); | 413 | scrub_page(page); |
413 | 414 | ||
415 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
414 | /* | 416 | /* |
415 | * Ballooned out frames are effectively replaced with | 417 | * Ballooned out frames are effectively replaced with |
416 | * a scratch frame. Ensure direct mappings and the | 418 | * a scratch frame. Ensure direct mappings and the |
417 | * p2m are consistent. | 419 | * p2m are consistent. |
418 | */ | 420 | */ |
419 | scratch_page = get_balloon_scratch_page(); | ||
420 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
421 | if (xen_pv_domain() && !PageHighMem(page)) { | ||
422 | ret = HYPERVISOR_update_va_mapping( | ||
423 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
424 | pfn_pte(page_to_pfn(scratch_page), | ||
425 | PAGE_KERNEL_RO), 0); | ||
426 | BUG_ON(ret); | ||
427 | } | ||
428 | #endif | ||
429 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 421 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
430 | unsigned long p; | 422 | unsigned long p; |
423 | struct page *scratch_page = get_balloon_scratch_page(); | ||
424 | |||
425 | if (!PageHighMem(page)) { | ||
426 | ret = HYPERVISOR_update_va_mapping( | ||
427 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
428 | pfn_pte(page_to_pfn(scratch_page), | ||
429 | PAGE_KERNEL_RO), 0); | ||
430 | BUG_ON(ret); | ||
431 | } | ||
431 | p = page_to_pfn(scratch_page); | 432 | p = page_to_pfn(scratch_page); |
432 | __set_phys_to_machine(pfn, pfn_to_mfn(p)); | 433 | __set_phys_to_machine(pfn, pfn_to_mfn(p)); |
434 | |||
435 | put_balloon_scratch_page(); | ||
433 | } | 436 | } |
434 | put_balloon_scratch_page(); | 437 | #endif |
435 | 438 | ||
436 | balloon_append(pfn_to_page(pfn)); | 439 | balloon_append(pfn_to_page(pfn)); |
437 | } | 440 | } |
@@ -627,15 +630,17 @@ static int __init balloon_init(void) | |||
627 | if (!xen_domain()) | 630 | if (!xen_domain()) |
628 | return -ENODEV; | 631 | return -ENODEV; |
629 | 632 | ||
630 | for_each_online_cpu(cpu) | 633 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
631 | { | 634 | for_each_online_cpu(cpu) |
632 | per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); | 635 | { |
633 | if (per_cpu(balloon_scratch_page, cpu) == NULL) { | 636 | per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); |
634 | pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); | 637 | if (per_cpu(balloon_scratch_page, cpu) == NULL) { |
635 | return -ENOMEM; | 638 | pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); |
639 | return -ENOMEM; | ||
640 | } | ||
636 | } | 641 | } |
642 | register_cpu_notifier(&balloon_cpu_notifier); | ||
637 | } | 643 | } |
638 | register_cpu_notifier(&balloon_cpu_notifier); | ||
639 | 644 | ||
640 | pr_info("Initialising balloon driver\n"); | 645 | pr_info("Initialising balloon driver\n"); |
641 | 646 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 028387192b60..aa846a48f400 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void) | |||
1176 | gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, | 1176 | gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, |
1177 | PAGE_SIZE * max_nr_gframes); | 1177 | PAGE_SIZE * max_nr_gframes); |
1178 | if (gnttab_shared.addr == NULL) { | 1178 | if (gnttab_shared.addr == NULL) { |
1179 | pr_warn("Failed to ioremap gnttab share frames!\n"); | 1179 | pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", |
1180 | xen_hvm_resume_frames); | ||
1180 | return -ENOMEM; | 1181 | return -ENOMEM; |
1181 | } | 1182 | } |
1182 | } | 1183 | } |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8e74590fa1bb..569a13b9e856 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma) | |||
533 | { | 533 | { |
534 | struct page **pages = vma->vm_private_data; | 534 | struct page **pages = vma->vm_private_data; |
535 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 535 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
536 | int rc; | ||
536 | 537 | ||
537 | if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) | 538 | if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) |
538 | return; | 539 | return; |
539 | 540 | ||
540 | xen_unmap_domain_mfn_range(vma, numpgs, pages); | 541 | rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); |
541 | free_xenballooned_pages(numpgs, pages); | 542 | if (rc == 0) |
543 | free_xenballooned_pages(numpgs, pages); | ||
544 | else | ||
545 | pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", | ||
546 | numpgs, rc); | ||
542 | kfree(pages); | 547 | kfree(pages); |
543 | } | 548 | } |
544 | 549 | ||
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx) | |||
244 | int i; | 244 | int i; |
245 | 245 | ||
246 | for (i = 0; i < ctx->nr_pages; i++) { | 246 | for (i = 0; i < ctx->nr_pages; i++) { |
247 | struct page *page; | ||
247 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, | 248 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
248 | page_count(ctx->ring_pages[i])); | 249 | page_count(ctx->ring_pages[i])); |
249 | put_page(ctx->ring_pages[i]); | 250 | page = ctx->ring_pages[i]; |
251 | if (!page) | ||
252 | continue; | ||
253 | ctx->ring_pages[i] = NULL; | ||
254 | put_page(page); | ||
250 | } | 255 | } |
251 | 256 | ||
252 | put_aio_ring_file(ctx); | 257 | put_aio_ring_file(ctx); |
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
280 | unsigned long flags; | 285 | unsigned long flags; |
281 | int rc; | 286 | int rc; |
282 | 287 | ||
288 | rc = 0; | ||
289 | |||
290 | /* Make sure the old page hasn't already been changed */ | ||
291 | spin_lock(&mapping->private_lock); | ||
292 | ctx = mapping->private_data; | ||
293 | if (ctx) { | ||
294 | pgoff_t idx; | ||
295 | spin_lock_irqsave(&ctx->completion_lock, flags); | ||
296 | idx = old->index; | ||
297 | if (idx < (pgoff_t)ctx->nr_pages) { | ||
298 | if (ctx->ring_pages[idx] != old) | ||
299 | rc = -EAGAIN; | ||
300 | } else | ||
301 | rc = -EINVAL; | ||
302 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | ||
303 | } else | ||
304 | rc = -EINVAL; | ||
305 | spin_unlock(&mapping->private_lock); | ||
306 | |||
307 | if (rc != 0) | ||
308 | return rc; | ||
309 | |||
283 | /* Writeback must be complete */ | 310 | /* Writeback must be complete */ |
284 | BUG_ON(PageWriteback(old)); | 311 | BUG_ON(PageWriteback(old)); |
285 | put_page(old); | 312 | get_page(new); |
286 | 313 | ||
287 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); | 314 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
288 | if (rc != MIGRATEPAGE_SUCCESS) { | 315 | if (rc != MIGRATEPAGE_SUCCESS) { |
289 | get_page(old); | 316 | put_page(new); |
290 | return rc; | 317 | return rc; |
291 | } | 318 | } |
292 | 319 | ||
293 | get_page(new); | ||
294 | |||
295 | /* We can potentially race against kioctx teardown here. Use the | 320 | /* We can potentially race against kioctx teardown here. Use the |
296 | * address_space's private data lock to protect the mapping's | 321 | * address_space's private data lock to protect the mapping's |
297 | * private_data. | 322 | * private_data. |
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
303 | spin_lock_irqsave(&ctx->completion_lock, flags); | 328 | spin_lock_irqsave(&ctx->completion_lock, flags); |
304 | migrate_page_copy(new, old); | 329 | migrate_page_copy(new, old); |
305 | idx = old->index; | 330 | idx = old->index; |
306 | if (idx < (pgoff_t)ctx->nr_pages) | 331 | if (idx < (pgoff_t)ctx->nr_pages) { |
307 | ctx->ring_pages[idx] = new; | 332 | /* And only do the move if things haven't changed */ |
333 | if (ctx->ring_pages[idx] == old) | ||
334 | ctx->ring_pages[idx] = new; | ||
335 | else | ||
336 | rc = -EAGAIN; | ||
337 | } else | ||
338 | rc = -EINVAL; | ||
308 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | 339 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
309 | } else | 340 | } else |
310 | rc = -EBUSY; | 341 | rc = -EBUSY; |
311 | spin_unlock(&mapping->private_lock); | 342 | spin_unlock(&mapping->private_lock); |
312 | 343 | ||
344 | if (rc == MIGRATEPAGE_SUCCESS) | ||
345 | put_page(old); | ||
346 | else | ||
347 | put_page(new); | ||
348 | |||
313 | return rc; | 349 | return rc; |
314 | } | 350 | } |
315 | #endif | 351 | #endif |
@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
326 | struct aio_ring *ring; | 362 | struct aio_ring *ring; |
327 | unsigned nr_events = ctx->max_reqs; | 363 | unsigned nr_events = ctx->max_reqs; |
328 | struct mm_struct *mm = current->mm; | 364 | struct mm_struct *mm = current->mm; |
329 | unsigned long size, populate; | 365 | unsigned long size, unused; |
330 | int nr_pages; | 366 | int nr_pages; |
331 | int i; | 367 | int i; |
332 | struct file *file; | 368 | struct file *file; |
@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
347 | return -EAGAIN; | 383 | return -EAGAIN; |
348 | } | 384 | } |
349 | 385 | ||
386 | ctx->aio_ring_file = file; | ||
387 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
388 | / sizeof(struct io_event); | ||
389 | |||
390 | ctx->ring_pages = ctx->internal_pages; | ||
391 | if (nr_pages > AIO_RING_PAGES) { | ||
392 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | ||
393 | GFP_KERNEL); | ||
394 | if (!ctx->ring_pages) { | ||
395 | put_aio_ring_file(ctx); | ||
396 | return -ENOMEM; | ||
397 | } | ||
398 | } | ||
399 | |||
350 | for (i = 0; i < nr_pages; i++) { | 400 | for (i = 0; i < nr_pages; i++) { |
351 | struct page *page; | 401 | struct page *page; |
352 | page = find_or_create_page(file->f_inode->i_mapping, | 402 | page = find_or_create_page(file->f_inode->i_mapping, |
@@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
358 | SetPageUptodate(page); | 408 | SetPageUptodate(page); |
359 | SetPageDirty(page); | 409 | SetPageDirty(page); |
360 | unlock_page(page); | 410 | unlock_page(page); |
411 | |||
412 | ctx->ring_pages[i] = page; | ||
361 | } | 413 | } |
362 | ctx->aio_ring_file = file; | 414 | ctx->nr_pages = i; |
363 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
364 | / sizeof(struct io_event); | ||
365 | 415 | ||
366 | ctx->ring_pages = ctx->internal_pages; | 416 | if (unlikely(i != nr_pages)) { |
367 | if (nr_pages > AIO_RING_PAGES) { | 417 | aio_free_ring(ctx); |
368 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | 418 | return -EAGAIN; |
369 | GFP_KERNEL); | ||
370 | if (!ctx->ring_pages) { | ||
371 | put_aio_ring_file(ctx); | ||
372 | return -ENOMEM; | ||
373 | } | ||
374 | } | 419 | } |
375 | 420 | ||
376 | ctx->mmap_size = nr_pages * PAGE_SIZE; | 421 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
@@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
379 | down_write(&mm->mmap_sem); | 424 | down_write(&mm->mmap_sem); |
380 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, | 425 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
381 | PROT_READ | PROT_WRITE, | 426 | PROT_READ | PROT_WRITE, |
382 | MAP_SHARED | MAP_POPULATE, 0, &populate); | 427 | MAP_SHARED, 0, &unused); |
428 | up_write(&mm->mmap_sem); | ||
383 | if (IS_ERR((void *)ctx->mmap_base)) { | 429 | if (IS_ERR((void *)ctx->mmap_base)) { |
384 | up_write(&mm->mmap_sem); | ||
385 | ctx->mmap_size = 0; | 430 | ctx->mmap_size = 0; |
386 | aio_free_ring(ctx); | 431 | aio_free_ring(ctx); |
387 | return -EAGAIN; | 432 | return -EAGAIN; |
@@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
389 | 434 | ||
390 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); | 435 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
391 | 436 | ||
392 | /* We must do this while still holding mmap_sem for write, as we | ||
393 | * need to be protected against userspace attempting to mremap() | ||
394 | * or munmap() the ring buffer. | ||
395 | */ | ||
396 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, | ||
397 | 1, 0, ctx->ring_pages, NULL); | ||
398 | |||
399 | /* Dropping the reference here is safe as the page cache will hold | ||
400 | * onto the pages for us. It is also required so that page migration | ||
401 | * can unmap the pages and get the right reference count. | ||
402 | */ | ||
403 | for (i = 0; i < ctx->nr_pages; i++) | ||
404 | put_page(ctx->ring_pages[i]); | ||
405 | |||
406 | up_write(&mm->mmap_sem); | ||
407 | |||
408 | if (unlikely(ctx->nr_pages != nr_pages)) { | ||
409 | aio_free_ring(ctx); | ||
410 | return -EAGAIN; | ||
411 | } | ||
412 | |||
413 | ctx->user_id = ctx->mmap_base; | 437 | ctx->user_id = ctx->mmap_base; |
414 | ctx->nr_events = nr_events; /* trusted copy */ | 438 | ctx->nr_events = nr_events; /* trusted copy */ |
415 | 439 | ||
@@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
652 | aio_nr += ctx->max_reqs; | 676 | aio_nr += ctx->max_reqs; |
653 | spin_unlock(&aio_nr_lock); | 677 | spin_unlock(&aio_nr_lock); |
654 | 678 | ||
655 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ | 679 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
680 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ | ||
656 | 681 | ||
657 | err = ioctx_add_table(ctx, mm); | 682 | err = ioctx_add_table(ctx, mm); |
658 | if (err) | 683 | if (err) |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 45d98d01028f..9c01509dd8ab 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | |||
767 | if (!path) | 767 | if (!path) |
768 | return -ENOMEM; | 768 | return -ENOMEM; |
769 | 769 | ||
770 | if (metadata) { | ||
771 | key.objectid = bytenr; | ||
772 | key.type = BTRFS_METADATA_ITEM_KEY; | ||
773 | key.offset = offset; | ||
774 | } else { | ||
775 | key.objectid = bytenr; | ||
776 | key.type = BTRFS_EXTENT_ITEM_KEY; | ||
777 | key.offset = offset; | ||
778 | } | ||
779 | |||
780 | if (!trans) { | 770 | if (!trans) { |
781 | path->skip_locking = 1; | 771 | path->skip_locking = 1; |
782 | path->search_commit_root = 1; | 772 | path->search_commit_root = 1; |
783 | } | 773 | } |
774 | |||
775 | search_again: | ||
776 | key.objectid = bytenr; | ||
777 | key.offset = offset; | ||
778 | if (metadata) | ||
779 | key.type = BTRFS_METADATA_ITEM_KEY; | ||
780 | else | ||
781 | key.type = BTRFS_EXTENT_ITEM_KEY; | ||
782 | |||
784 | again: | 783 | again: |
785 | ret = btrfs_search_slot(trans, root->fs_info->extent_root, | 784 | ret = btrfs_search_slot(trans, root->fs_info->extent_root, |
786 | &key, path, 0, 0); | 785 | &key, path, 0, 0); |
@@ -788,7 +787,6 @@ again: | |||
788 | goto out_free; | 787 | goto out_free; |
789 | 788 | ||
790 | if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { | 789 | if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { |
791 | metadata = 0; | ||
792 | if (path->slots[0]) { | 790 | if (path->slots[0]) { |
793 | path->slots[0]--; | 791 | path->slots[0]--; |
794 | btrfs_item_key_to_cpu(path->nodes[0], &key, | 792 | btrfs_item_key_to_cpu(path->nodes[0], &key, |
@@ -855,7 +853,7 @@ again: | |||
855 | mutex_lock(&head->mutex); | 853 | mutex_lock(&head->mutex); |
856 | mutex_unlock(&head->mutex); | 854 | mutex_unlock(&head->mutex); |
857 | btrfs_put_delayed_ref(&head->node); | 855 | btrfs_put_delayed_ref(&head->node); |
858 | goto again; | 856 | goto search_again; |
859 | } | 857 | } |
860 | if (head->extent_op && head->extent_op->update_flags) | 858 | if (head->extent_op && head->extent_op->update_flags) |
861 | extent_flags |= head->extent_op->flags_to_set; | 859 | extent_flags |= head->extent_op->flags_to_set; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a111622598b0..21da5762b0b1 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, | |||
2121 | 2121 | ||
2122 | err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); | 2122 | err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); |
2123 | if (err == -EINTR) | 2123 | if (err == -EINTR) |
2124 | goto out; | 2124 | goto out_drop_write; |
2125 | dentry = lookup_one_len(vol_args->name, parent, namelen); | 2125 | dentry = lookup_one_len(vol_args->name, parent, namelen); |
2126 | if (IS_ERR(dentry)) { | 2126 | if (IS_ERR(dentry)) { |
2127 | err = PTR_ERR(dentry); | 2127 | err = PTR_ERR(dentry); |
@@ -2284,6 +2284,7 @@ out_dput: | |||
2284 | dput(dentry); | 2284 | dput(dentry); |
2285 | out_unlock_dir: | 2285 | out_unlock_dir: |
2286 | mutex_unlock(&dir->i_mutex); | 2286 | mutex_unlock(&dir->i_mutex); |
2287 | out_drop_write: | ||
2287 | mnt_drop_write_file(file); | 2288 | mnt_drop_write_file(file); |
2288 | out: | 2289 | out: |
2289 | kfree(vol_args); | 2290 | kfree(vol_args); |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ce459a7cb16d..429c73c374b8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid) | |||
571 | root_objectid == BTRFS_CHUNK_TREE_OBJECTID || | 571 | root_objectid == BTRFS_CHUNK_TREE_OBJECTID || |
572 | root_objectid == BTRFS_DEV_TREE_OBJECTID || | 572 | root_objectid == BTRFS_DEV_TREE_OBJECTID || |
573 | root_objectid == BTRFS_TREE_LOG_OBJECTID || | 573 | root_objectid == BTRFS_TREE_LOG_OBJECTID || |
574 | root_objectid == BTRFS_CSUM_TREE_OBJECTID) | 574 | root_objectid == BTRFS_CSUM_TREE_OBJECTID || |
575 | root_objectid == BTRFS_UUID_TREE_OBJECTID || | ||
576 | root_objectid == BTRFS_QUOTA_TREE_OBJECTID) | ||
575 | return 1; | 577 | return 1; |
576 | return 0; | 578 | return 0; |
577 | } | 579 | } |
@@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) | |||
1264 | } | 1266 | } |
1265 | 1267 | ||
1266 | /* | 1268 | /* |
1267 | * helper to update/delete the 'address of tree root -> reloc tree' | 1269 | * helper to delete the 'address of tree root -> reloc tree' |
1268 | * mapping | 1270 | * mapping |
1269 | */ | 1271 | */ |
1270 | static int __update_reloc_root(struct btrfs_root *root, int del) | 1272 | static void __del_reloc_root(struct btrfs_root *root) |
1271 | { | 1273 | { |
1272 | struct rb_node *rb_node; | 1274 | struct rb_node *rb_node; |
1273 | struct mapping_node *node = NULL; | 1275 | struct mapping_node *node = NULL; |
@@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del) | |||
1275 | 1277 | ||
1276 | spin_lock(&rc->reloc_root_tree.lock); | 1278 | spin_lock(&rc->reloc_root_tree.lock); |
1277 | rb_node = tree_search(&rc->reloc_root_tree.rb_root, | 1279 | rb_node = tree_search(&rc->reloc_root_tree.rb_root, |
1278 | root->commit_root->start); | 1280 | root->node->start); |
1279 | if (rb_node) { | 1281 | if (rb_node) { |
1280 | node = rb_entry(rb_node, struct mapping_node, rb_node); | 1282 | node = rb_entry(rb_node, struct mapping_node, rb_node); |
1281 | rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); | 1283 | rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); |
@@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del) | |||
1283 | spin_unlock(&rc->reloc_root_tree.lock); | 1285 | spin_unlock(&rc->reloc_root_tree.lock); |
1284 | 1286 | ||
1285 | if (!node) | 1287 | if (!node) |
1286 | return 0; | 1288 | return; |
1287 | BUG_ON((struct btrfs_root *)node->data != root); | 1289 | BUG_ON((struct btrfs_root *)node->data != root); |
1288 | 1290 | ||
1289 | if (!del) { | 1291 | spin_lock(&root->fs_info->trans_lock); |
1290 | spin_lock(&rc->reloc_root_tree.lock); | 1292 | list_del_init(&root->root_list); |
1291 | node->bytenr = root->node->start; | 1293 | spin_unlock(&root->fs_info->trans_lock); |
1292 | rb_node = tree_insert(&rc->reloc_root_tree.rb_root, | 1294 | kfree(node); |
1293 | node->bytenr, &node->rb_node); | 1295 | } |
1294 | spin_unlock(&rc->reloc_root_tree.lock); | 1296 | |
1295 | if (rb_node) | 1297 | /* |
1296 | backref_tree_panic(rb_node, -EEXIST, node->bytenr); | 1298 | * helper to update the 'address of tree root -> reloc tree' |
1297 | } else { | 1299 | * mapping |
1298 | spin_lock(&root->fs_info->trans_lock); | 1300 | */ |
1299 | list_del_init(&root->root_list); | 1301 | static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) |
1300 | spin_unlock(&root->fs_info->trans_lock); | 1302 | { |
1301 | kfree(node); | 1303 | struct rb_node *rb_node; |
1304 | struct mapping_node *node = NULL; | ||
1305 | struct reloc_control *rc = root->fs_info->reloc_ctl; | ||
1306 | |||
1307 | spin_lock(&rc->reloc_root_tree.lock); | ||
1308 | rb_node = tree_search(&rc->reloc_root_tree.rb_root, | ||
1309 | root->node->start); | ||
1310 | if (rb_node) { | ||
1311 | node = rb_entry(rb_node, struct mapping_node, rb_node); | ||
1312 | rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); | ||
1302 | } | 1313 | } |
1314 | spin_unlock(&rc->reloc_root_tree.lock); | ||
1315 | |||
1316 | if (!node) | ||
1317 | return 0; | ||
1318 | BUG_ON((struct btrfs_root *)node->data != root); | ||
1319 | |||
1320 | spin_lock(&rc->reloc_root_tree.lock); | ||
1321 | node->bytenr = new_bytenr; | ||
1322 | rb_node = tree_insert(&rc->reloc_root_tree.rb_root, | ||
1323 | node->bytenr, &node->rb_node); | ||
1324 | spin_unlock(&rc->reloc_root_tree.lock); | ||
1325 | if (rb_node) | ||
1326 | backref_tree_panic(rb_node, -EEXIST, node->bytenr); | ||
1303 | return 0; | 1327 | return 0; |
1304 | } | 1328 | } |
1305 | 1329 | ||
@@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, | |||
1420 | { | 1444 | { |
1421 | struct btrfs_root *reloc_root; | 1445 | struct btrfs_root *reloc_root; |
1422 | struct btrfs_root_item *root_item; | 1446 | struct btrfs_root_item *root_item; |
1423 | int del = 0; | ||
1424 | int ret; | 1447 | int ret; |
1425 | 1448 | ||
1426 | if (!root->reloc_root) | 1449 | if (!root->reloc_root) |
@@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, | |||
1432 | if (root->fs_info->reloc_ctl->merge_reloc_tree && | 1455 | if (root->fs_info->reloc_ctl->merge_reloc_tree && |
1433 | btrfs_root_refs(root_item) == 0) { | 1456 | btrfs_root_refs(root_item) == 0) { |
1434 | root->reloc_root = NULL; | 1457 | root->reloc_root = NULL; |
1435 | del = 1; | 1458 | __del_reloc_root(reloc_root); |
1436 | } | 1459 | } |
1437 | 1460 | ||
1438 | __update_reloc_root(reloc_root, del); | ||
1439 | |||
1440 | if (reloc_root->commit_root != reloc_root->node) { | 1461 | if (reloc_root->commit_root != reloc_root->node) { |
1441 | btrfs_set_root_node(root_item, reloc_root->node); | 1462 | btrfs_set_root_node(root_item, reloc_root->node); |
1442 | free_extent_buffer(reloc_root->commit_root); | 1463 | free_extent_buffer(reloc_root->commit_root); |
@@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list) | |||
2287 | while (!list_empty(list)) { | 2308 | while (!list_empty(list)) { |
2288 | reloc_root = list_entry(list->next, struct btrfs_root, | 2309 | reloc_root = list_entry(list->next, struct btrfs_root, |
2289 | root_list); | 2310 | root_list); |
2290 | __update_reloc_root(reloc_root, 1); | 2311 | __del_reloc_root(reloc_root); |
2291 | free_extent_buffer(reloc_root->node); | 2312 | free_extent_buffer(reloc_root->node); |
2292 | free_extent_buffer(reloc_root->commit_root); | 2313 | free_extent_buffer(reloc_root->commit_root); |
2293 | kfree(reloc_root); | 2314 | kfree(reloc_root); |
@@ -2332,7 +2353,7 @@ again: | |||
2332 | 2353 | ||
2333 | ret = merge_reloc_root(rc, root); | 2354 | ret = merge_reloc_root(rc, root); |
2334 | if (ret) { | 2355 | if (ret) { |
2335 | __update_reloc_root(reloc_root, 1); | 2356 | __del_reloc_root(reloc_root); |
2336 | free_extent_buffer(reloc_root->node); | 2357 | free_extent_buffer(reloc_root->node); |
2337 | free_extent_buffer(reloc_root->commit_root); | 2358 | free_extent_buffer(reloc_root->commit_root); |
2338 | kfree(reloc_root); | 2359 | kfree(reloc_root); |
@@ -2388,6 +2409,13 @@ out: | |||
2388 | btrfs_std_error(root->fs_info, ret); | 2409 | btrfs_std_error(root->fs_info, ret); |
2389 | if (!list_empty(&reloc_roots)) | 2410 | if (!list_empty(&reloc_roots)) |
2390 | free_reloc_roots(&reloc_roots); | 2411 | free_reloc_roots(&reloc_roots); |
2412 | |||
2413 | /* new reloc root may be added */ | ||
2414 | mutex_lock(&root->fs_info->reloc_mutex); | ||
2415 | list_splice_init(&rc->reloc_roots, &reloc_roots); | ||
2416 | mutex_unlock(&root->fs_info->reloc_mutex); | ||
2417 | if (!list_empty(&reloc_roots)) | ||
2418 | free_reloc_roots(&reloc_roots); | ||
2391 | } | 2419 | } |
2392 | 2420 | ||
2393 | BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); | 2421 | BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); |
@@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, | |||
4522 | BUG_ON(rc->stage == UPDATE_DATA_PTRS && | 4550 | BUG_ON(rc->stage == UPDATE_DATA_PTRS && |
4523 | root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); | 4551 | root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); |
4524 | 4552 | ||
4553 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { | ||
4554 | if (buf == root->node) | ||
4555 | __update_reloc_root(root, cow->start); | ||
4556 | } | ||
4557 | |||
4525 | level = btrfs_header_level(buf); | 4558 | level = btrfs_header_level(buf); |
4526 | if (btrfs_header_generation(buf) <= | 4559 | if (btrfs_header_generation(buf) <= |
4527 | btrfs_root_last_snapshot(&root->root_item)) | 4560 | btrfs_root_last_snapshot(&root->root_item)) |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 6837fe87f3a6..945d1db98f26 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | |||
4723 | } | 4723 | } |
4724 | 4724 | ||
4725 | if (!access_ok(VERIFY_READ, arg->clone_sources, | 4725 | if (!access_ok(VERIFY_READ, arg->clone_sources, |
4726 | sizeof(*arg->clone_sources * | 4726 | sizeof(*arg->clone_sources) * |
4727 | arg->clone_sources_count))) { | 4727 | arg->clone_sources_count)) { |
4728 | ret = -EFAULT; | 4728 | ret = -EFAULT; |
4729 | goto out; | 4729 | goto out; |
4730 | } | 4730 | } |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2d8ac1bf0cf9..d71a11d13dfa 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
432 | } else { | 432 | } else { |
433 | printk(KERN_INFO "btrfs: setting nodatacow\n"); | 433 | printk(KERN_INFO "btrfs: setting nodatacow\n"); |
434 | } | 434 | } |
435 | info->compress_type = BTRFS_COMPRESS_NONE; | ||
436 | btrfs_clear_opt(info->mount_opt, COMPRESS); | 435 | btrfs_clear_opt(info->mount_opt, COMPRESS); |
437 | btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); | 436 | btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); |
438 | btrfs_set_opt(info->mount_opt, NODATACOW); | 437 | btrfs_set_opt(info->mount_opt, NODATACOW); |
@@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
461 | btrfs_set_fs_incompat(info, COMPRESS_LZO); | 460 | btrfs_set_fs_incompat(info, COMPRESS_LZO); |
462 | } else if (strncmp(args[0].from, "no", 2) == 0) { | 461 | } else if (strncmp(args[0].from, "no", 2) == 0) { |
463 | compress_type = "no"; | 462 | compress_type = "no"; |
464 | info->compress_type = BTRFS_COMPRESS_NONE; | ||
465 | btrfs_clear_opt(info->mount_opt, COMPRESS); | 463 | btrfs_clear_opt(info->mount_opt, COMPRESS); |
466 | btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); | 464 | btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); |
467 | compress_force = false; | 465 | compress_force = false; |
@@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
474 | btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); | 472 | btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); |
475 | pr_info("btrfs: force %s compression\n", | 473 | pr_info("btrfs: force %s compression\n", |
476 | compress_type); | 474 | compress_type); |
477 | } else | 475 | } else if (btrfs_test_opt(root, COMPRESS)) { |
478 | pr_info("btrfs: use %s compression\n", | 476 | pr_info("btrfs: use %s compression\n", |
479 | compress_type); | 477 | compress_type); |
478 | } | ||
480 | break; | 479 | break; |
481 | case Opt_ssd: | 480 | case Opt_ssd: |
482 | printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); | 481 | printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 1e561c059539..ec3ba43b9faa 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page) | |||
210 | if (err < 0) { | 210 | if (err < 0) { |
211 | SetPageError(page); | 211 | SetPageError(page); |
212 | goto out; | 212 | goto out; |
213 | } else if (err < PAGE_CACHE_SIZE) { | 213 | } else { |
214 | if (err < PAGE_CACHE_SIZE) { | ||
214 | /* zero fill remainder of page */ | 215 | /* zero fill remainder of page */ |
215 | zero_user_segment(page, err, PAGE_CACHE_SIZE); | 216 | zero_user_segment(page, err, PAGE_CACHE_SIZE); |
217 | } else { | ||
218 | flush_dcache_page(page); | ||
219 | } | ||
216 | } | 220 | } |
217 | SetPageUptodate(page); | 221 | SetPageUptodate(page); |
218 | 222 | ||
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9a8e396aed89..278fd2891288 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
978 | struct ceph_mds_reply_inode *ininfo; | 978 | struct ceph_mds_reply_inode *ininfo; |
979 | struct ceph_vino vino; | 979 | struct ceph_vino vino; |
980 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); | 980 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
981 | int i = 0; | ||
982 | int err = 0; | 981 | int err = 0; |
983 | 982 | ||
984 | dout("fill_trace %p is_dentry %d is_target %d\n", req, | 983 | dout("fill_trace %p is_dentry %d is_target %d\n", req, |
@@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1039 | } | 1038 | } |
1040 | } | 1039 | } |
1041 | 1040 | ||
1041 | if (rinfo->head->is_target) { | ||
1042 | vino.ino = le64_to_cpu(rinfo->targeti.in->ino); | ||
1043 | vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); | ||
1044 | |||
1045 | in = ceph_get_inode(sb, vino); | ||
1046 | if (IS_ERR(in)) { | ||
1047 | err = PTR_ERR(in); | ||
1048 | goto done; | ||
1049 | } | ||
1050 | req->r_target_inode = in; | ||
1051 | |||
1052 | err = fill_inode(in, &rinfo->targeti, NULL, | ||
1053 | session, req->r_request_started, | ||
1054 | (le32_to_cpu(rinfo->head->result) == 0) ? | ||
1055 | req->r_fmode : -1, | ||
1056 | &req->r_caps_reservation); | ||
1057 | if (err < 0) { | ||
1058 | pr_err("fill_inode badness %p %llx.%llx\n", | ||
1059 | in, ceph_vinop(in)); | ||
1060 | goto done; | ||
1061 | } | ||
1062 | } | ||
1063 | |||
1042 | /* | 1064 | /* |
1043 | * ignore null lease/binding on snapdir ENOENT, or else we | 1065 | * ignore null lease/binding on snapdir ENOENT, or else we |
1044 | * will have trouble splicing in the virtual snapdir later | 1066 | * will have trouble splicing in the virtual snapdir later |
@@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1108 | ceph_dentry(req->r_old_dentry)->offset); | 1130 | ceph_dentry(req->r_old_dentry)->offset); |
1109 | 1131 | ||
1110 | dn = req->r_old_dentry; /* use old_dentry */ | 1132 | dn = req->r_old_dentry; /* use old_dentry */ |
1111 | in = dn->d_inode; | ||
1112 | } | 1133 | } |
1113 | 1134 | ||
1114 | /* null dentry? */ | 1135 | /* null dentry? */ |
@@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1130 | } | 1151 | } |
1131 | 1152 | ||
1132 | /* attach proper inode */ | 1153 | /* attach proper inode */ |
1133 | ininfo = rinfo->targeti.in; | 1154 | if (!dn->d_inode) { |
1134 | vino.ino = le64_to_cpu(ininfo->ino); | 1155 | ihold(in); |
1135 | vino.snap = le64_to_cpu(ininfo->snapid); | ||
1136 | in = dn->d_inode; | ||
1137 | if (!in) { | ||
1138 | in = ceph_get_inode(sb, vino); | ||
1139 | if (IS_ERR(in)) { | ||
1140 | pr_err("fill_trace bad get_inode " | ||
1141 | "%llx.%llx\n", vino.ino, vino.snap); | ||
1142 | err = PTR_ERR(in); | ||
1143 | d_drop(dn); | ||
1144 | goto done; | ||
1145 | } | ||
1146 | dn = splice_dentry(dn, in, &have_lease, true); | 1156 | dn = splice_dentry(dn, in, &have_lease, true); |
1147 | if (IS_ERR(dn)) { | 1157 | if (IS_ERR(dn)) { |
1148 | err = PTR_ERR(dn); | 1158 | err = PTR_ERR(dn); |
1149 | goto done; | 1159 | goto done; |
1150 | } | 1160 | } |
1151 | req->r_dentry = dn; /* may have spliced */ | 1161 | req->r_dentry = dn; /* may have spliced */ |
1152 | ihold(in); | 1162 | } else if (dn->d_inode && dn->d_inode != in) { |
1153 | } else if (ceph_ino(in) == vino.ino && | ||
1154 | ceph_snap(in) == vino.snap) { | ||
1155 | ihold(in); | ||
1156 | } else { | ||
1157 | dout(" %p links to %p %llx.%llx, not %llx.%llx\n", | 1163 | dout(" %p links to %p %llx.%llx, not %llx.%llx\n", |
1158 | dn, in, ceph_ino(in), ceph_snap(in), | 1164 | dn, dn->d_inode, ceph_vinop(dn->d_inode), |
1159 | vino.ino, vino.snap); | 1165 | ceph_vinop(in)); |
1160 | have_lease = false; | 1166 | have_lease = false; |
1161 | in = NULL; | ||
1162 | } | 1167 | } |
1163 | 1168 | ||
1164 | if (have_lease) | 1169 | if (have_lease) |
1165 | update_dentry_lease(dn, rinfo->dlease, session, | 1170 | update_dentry_lease(dn, rinfo->dlease, session, |
1166 | req->r_request_started); | 1171 | req->r_request_started); |
1167 | dout(" final dn %p\n", dn); | 1172 | dout(" final dn %p\n", dn); |
1168 | i++; | 1173 | } else if (!req->r_aborted && |
1169 | } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || | 1174 | (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || |
1170 | req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { | 1175 | req->r_op == CEPH_MDS_OP_MKSNAP)) { |
1171 | struct dentry *dn = req->r_dentry; | 1176 | struct dentry *dn = req->r_dentry; |
1172 | 1177 | ||
1173 | /* fill out a snapdir LOOKUPSNAP dentry */ | 1178 | /* fill out a snapdir LOOKUPSNAP dentry */ |
@@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1177 | ininfo = rinfo->targeti.in; | 1182 | ininfo = rinfo->targeti.in; |
1178 | vino.ino = le64_to_cpu(ininfo->ino); | 1183 | vino.ino = le64_to_cpu(ininfo->ino); |
1179 | vino.snap = le64_to_cpu(ininfo->snapid); | 1184 | vino.snap = le64_to_cpu(ininfo->snapid); |
1180 | in = ceph_get_inode(sb, vino); | ||
1181 | if (IS_ERR(in)) { | ||
1182 | pr_err("fill_inode get_inode badness %llx.%llx\n", | ||
1183 | vino.ino, vino.snap); | ||
1184 | err = PTR_ERR(in); | ||
1185 | d_delete(dn); | ||
1186 | goto done; | ||
1187 | } | ||
1188 | dout(" linking snapped dir %p to dn %p\n", in, dn); | 1185 | dout(" linking snapped dir %p to dn %p\n", in, dn); |
1186 | ihold(in); | ||
1189 | dn = splice_dentry(dn, in, NULL, true); | 1187 | dn = splice_dentry(dn, in, NULL, true); |
1190 | if (IS_ERR(dn)) { | 1188 | if (IS_ERR(dn)) { |
1191 | err = PTR_ERR(dn); | 1189 | err = PTR_ERR(dn); |
1192 | goto done; | 1190 | goto done; |
1193 | } | 1191 | } |
1194 | req->r_dentry = dn; /* may have spliced */ | 1192 | req->r_dentry = dn; /* may have spliced */ |
1195 | ihold(in); | ||
1196 | rinfo->head->is_dentry = 1; /* fool notrace handlers */ | ||
1197 | } | ||
1198 | |||
1199 | if (rinfo->head->is_target) { | ||
1200 | vino.ino = le64_to_cpu(rinfo->targeti.in->ino); | ||
1201 | vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); | ||
1202 | |||
1203 | if (in == NULL || ceph_ino(in) != vino.ino || | ||
1204 | ceph_snap(in) != vino.snap) { | ||
1205 | in = ceph_get_inode(sb, vino); | ||
1206 | if (IS_ERR(in)) { | ||
1207 | err = PTR_ERR(in); | ||
1208 | goto done; | ||
1209 | } | ||
1210 | } | ||
1211 | req->r_target_inode = in; | ||
1212 | |||
1213 | err = fill_inode(in, | ||
1214 | &rinfo->targeti, NULL, | ||
1215 | session, req->r_request_started, | ||
1216 | (le32_to_cpu(rinfo->head->result) == 0) ? | ||
1217 | req->r_fmode : -1, | ||
1218 | &req->r_caps_reservation); | ||
1219 | if (err < 0) { | ||
1220 | pr_err("fill_inode badness %p %llx.%llx\n", | ||
1221 | in, ceph_vinop(in)); | ||
1222 | goto done; | ||
1223 | } | ||
1224 | } | 1193 | } |
1225 | |||
1226 | done: | 1194 | done: |
1227 | dout("fill_trace done err=%d\n", err); | 1195 | dout("fill_trace done err=%d\n", err); |
1228 | return err; | 1196 | return err; |
@@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
1272 | struct qstr dname; | 1240 | struct qstr dname; |
1273 | struct dentry *dn; | 1241 | struct dentry *dn; |
1274 | struct inode *in; | 1242 | struct inode *in; |
1275 | int err = 0, i; | 1243 | int err = 0, ret, i; |
1276 | struct inode *snapdir = NULL; | 1244 | struct inode *snapdir = NULL; |
1277 | struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; | 1245 | struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; |
1278 | struct ceph_dentry_info *di; | 1246 | struct ceph_dentry_info *di; |
@@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
1305 | ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); | 1273 | ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); |
1306 | } | 1274 | } |
1307 | 1275 | ||
1276 | /* FIXME: release caps/leases if error occurs */ | ||
1308 | for (i = 0; i < rinfo->dir_nr; i++) { | 1277 | for (i = 0; i < rinfo->dir_nr; i++) { |
1309 | struct ceph_vino vino; | 1278 | struct ceph_vino vino; |
1310 | 1279 | ||
@@ -1329,9 +1298,10 @@ retry_lookup: | |||
1329 | err = -ENOMEM; | 1298 | err = -ENOMEM; |
1330 | goto out; | 1299 | goto out; |
1331 | } | 1300 | } |
1332 | err = ceph_init_dentry(dn); | 1301 | ret = ceph_init_dentry(dn); |
1333 | if (err < 0) { | 1302 | if (ret < 0) { |
1334 | dput(dn); | 1303 | dput(dn); |
1304 | err = ret; | ||
1335 | goto out; | 1305 | goto out; |
1336 | } | 1306 | } |
1337 | } else if (dn->d_inode && | 1307 | } else if (dn->d_inode && |
@@ -1351,9 +1321,6 @@ retry_lookup: | |||
1351 | spin_unlock(&parent->d_lock); | 1321 | spin_unlock(&parent->d_lock); |
1352 | } | 1322 | } |
1353 | 1323 | ||
1354 | di = dn->d_fsdata; | ||
1355 | di->offset = ceph_make_fpos(frag, i + r_readdir_offset); | ||
1356 | |||
1357 | /* inode */ | 1324 | /* inode */ |
1358 | if (dn->d_inode) { | 1325 | if (dn->d_inode) { |
1359 | in = dn->d_inode; | 1326 | in = dn->d_inode; |
@@ -1366,26 +1333,39 @@ retry_lookup: | |||
1366 | err = PTR_ERR(in); | 1333 | err = PTR_ERR(in); |
1367 | goto out; | 1334 | goto out; |
1368 | } | 1335 | } |
1369 | dn = splice_dentry(dn, in, NULL, false); | ||
1370 | if (IS_ERR(dn)) | ||
1371 | dn = NULL; | ||
1372 | } | 1336 | } |
1373 | 1337 | ||
1374 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, | 1338 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, |
1375 | req->r_request_started, -1, | 1339 | req->r_request_started, -1, |
1376 | &req->r_caps_reservation) < 0) { | 1340 | &req->r_caps_reservation) < 0) { |
1377 | pr_err("fill_inode badness on %p\n", in); | 1341 | pr_err("fill_inode badness on %p\n", in); |
1342 | if (!dn->d_inode) | ||
1343 | iput(in); | ||
1344 | d_drop(dn); | ||
1378 | goto next_item; | 1345 | goto next_item; |
1379 | } | 1346 | } |
1380 | if (dn) | 1347 | |
1381 | update_dentry_lease(dn, rinfo->dir_dlease[i], | 1348 | if (!dn->d_inode) { |
1382 | req->r_session, | 1349 | dn = splice_dentry(dn, in, NULL, false); |
1383 | req->r_request_started); | 1350 | if (IS_ERR(dn)) { |
1351 | err = PTR_ERR(dn); | ||
1352 | dn = NULL; | ||
1353 | goto next_item; | ||
1354 | } | ||
1355 | } | ||
1356 | |||
1357 | di = dn->d_fsdata; | ||
1358 | di->offset = ceph_make_fpos(frag, i + r_readdir_offset); | ||
1359 | |||
1360 | update_dentry_lease(dn, rinfo->dir_dlease[i], | ||
1361 | req->r_session, | ||
1362 | req->r_request_started); | ||
1384 | next_item: | 1363 | next_item: |
1385 | if (dn) | 1364 | if (dn) |
1386 | dput(dn); | 1365 | dput(dn); |
1387 | } | 1366 | } |
1388 | req->r_did_prepopulate = true; | 1367 | if (err == 0) |
1368 | req->r_did_prepopulate = true; | ||
1389 | 1369 | ||
1390 | out: | 1370 | out: |
1391 | if (snapdir) { | 1371 | if (snapdir) { |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index aa3397620342..2c29db6a247e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, | |||
477 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask); | 477 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask); |
478 | extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); | 478 | extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); |
479 | extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); | 479 | extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); |
480 | extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr, | 480 | extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon, |
481 | const unsigned char *path, | 481 | struct cifs_sb_info *cifs_sb, |
482 | struct cifs_sb_info *cifs_sb, unsigned int xid); | 482 | struct cifs_fattr *fattr, |
483 | const unsigned char *path); | ||
483 | extern int mdfour(unsigned char *, unsigned char *, int); | 484 | extern int mdfour(unsigned char *, unsigned char *, int); |
484 | extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, | 485 | extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, |
485 | const struct nls_table *codepage); | 486 | const struct nls_table *codepage); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 124aa0230c1b..d707edb6b852 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -4010,7 +4010,7 @@ QFileInfoRetry: | |||
4010 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4010 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4011 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4011 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4012 | if (rc) { | 4012 | if (rc) { |
4013 | cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); | 4013 | cifs_dbg(FYI, "Send error in QFileInfo = %d", rc); |
4014 | } else { /* decode response */ | 4014 | } else { /* decode response */ |
4015 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4015 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4016 | 4016 | ||
@@ -4179,7 +4179,7 @@ UnixQFileInfoRetry: | |||
4179 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4179 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4180 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4180 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4181 | if (rc) { | 4181 | if (rc) { |
4182 | cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); | 4182 | cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc); |
4183 | } else { /* decode response */ | 4183 | } else { /* decode response */ |
4184 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4184 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4185 | 4185 | ||
@@ -4263,7 +4263,7 @@ UnixQPathInfoRetry: | |||
4263 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4263 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4264 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4264 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4265 | if (rc) { | 4265 | if (rc) { |
4266 | cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); | 4266 | cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc); |
4267 | } else { /* decode response */ | 4267 | } else { /* decode response */ |
4268 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4268 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4269 | 4269 | ||
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 11ff5f116b20..a514e0a65f69 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -193,7 +193,7 @@ check_name(struct dentry *direntry) | |||
193 | static int | 193 | static int |
194 | cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, | 194 | cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, |
195 | struct tcon_link *tlink, unsigned oflags, umode_t mode, | 195 | struct tcon_link *tlink, unsigned oflags, umode_t mode, |
196 | __u32 *oplock, struct cifs_fid *fid, int *created) | 196 | __u32 *oplock, struct cifs_fid *fid) |
197 | { | 197 | { |
198 | int rc = -ENOENT; | 198 | int rc = -ENOENT; |
199 | int create_options = CREATE_NOT_DIR; | 199 | int create_options = CREATE_NOT_DIR; |
@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, | |||
349 | .device = 0, | 349 | .device = 0, |
350 | }; | 350 | }; |
351 | 351 | ||
352 | *created |= FILE_CREATED; | ||
353 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { | 352 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { |
354 | args.uid = current_fsuid(); | 353 | args.uid = current_fsuid(); |
355 | if (inode->i_mode & S_ISGID) | 354 | if (inode->i_mode & S_ISGID) |
@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, | |||
480 | cifs_add_pending_open(&fid, tlink, &open); | 479 | cifs_add_pending_open(&fid, tlink, &open); |
481 | 480 | ||
482 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, | 481 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, |
483 | &oplock, &fid, opened); | 482 | &oplock, &fid); |
484 | 483 | ||
485 | if (rc) { | 484 | if (rc) { |
486 | cifs_del_pending_open(&open); | 485 | cifs_del_pending_open(&open); |
487 | goto out; | 486 | goto out; |
488 | } | 487 | } |
489 | 488 | ||
489 | if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) | ||
490 | *opened |= FILE_CREATED; | ||
491 | |||
490 | rc = finish_open(file, direntry, generic_file_open, opened); | 492 | rc = finish_open(file, direntry, generic_file_open, opened); |
491 | if (rc) { | 493 | if (rc) { |
492 | if (server->ops->close) | 494 | if (server->ops->close) |
@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
529 | struct TCP_Server_Info *server; | 531 | struct TCP_Server_Info *server; |
530 | struct cifs_fid fid; | 532 | struct cifs_fid fid; |
531 | __u32 oplock; | 533 | __u32 oplock; |
532 | int created = FILE_CREATED; | ||
533 | 534 | ||
534 | cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n", | 535 | cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n", |
535 | inode, direntry->d_name.name, direntry); | 536 | inode, direntry->d_name.name, direntry); |
@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
546 | server->ops->new_lease_key(&fid); | 547 | server->ops->new_lease_key(&fid); |
547 | 548 | ||
548 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, | 549 | rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, |
549 | &oplock, &fid, &created); | 550 | &oplock, &fid); |
550 | if (!rc && server->ops->close) | 551 | if (!rc && server->ops->close) |
551 | server->ops->close(xid, tcon, &fid); | 552 | server->ops->close(xid, tcon, &fid); |
552 | 553 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 36f9ebb93ceb..49719b8228e5 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
383 | 383 | ||
384 | /* check for Minshall+French symlinks */ | 384 | /* check for Minshall+French symlinks */ |
385 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { | 385 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { |
386 | int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid); | 386 | int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr, |
387 | full_path); | ||
387 | if (tmprc) | 388 | if (tmprc) |
388 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); | 389 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); |
389 | } | 390 | } |
@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, | |||
799 | 800 | ||
800 | /* check for Minshall+French symlinks */ | 801 | /* check for Minshall+French symlinks */ |
801 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { | 802 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { |
802 | tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid); | 803 | tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr, |
804 | full_path); | ||
803 | if (tmprc) | 805 | if (tmprc) |
804 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); | 806 | cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc); |
805 | } | 807 | } |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index cc0234710ddb..92aee08483a5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, | |||
354 | 354 | ||
355 | 355 | ||
356 | int | 356 | int |
357 | CIFSCheckMFSymlink(struct cifs_fattr *fattr, | 357 | CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon, |
358 | const unsigned char *path, | 358 | struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, |
359 | struct cifs_sb_info *cifs_sb, unsigned int xid) | 359 | const unsigned char *path) |
360 | { | 360 | { |
361 | int rc = 0; | 361 | int rc; |
362 | u8 *buf = NULL; | 362 | u8 *buf = NULL; |
363 | unsigned int link_len = 0; | 363 | unsigned int link_len = 0; |
364 | unsigned int bytes_read = 0; | 364 | unsigned int bytes_read = 0; |
365 | struct cifs_tcon *ptcon; | ||
366 | 365 | ||
367 | if (!CIFSCouldBeMFSymlink(fattr)) | 366 | if (!CIFSCouldBeMFSymlink(fattr)) |
368 | /* it's not a symlink */ | 367 | /* it's not a symlink */ |
369 | return 0; | 368 | return 0; |
370 | 369 | ||
371 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | 370 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); |
372 | if (!buf) { | 371 | if (!buf) |
373 | rc = -ENOMEM; | 372 | return -ENOMEM; |
374 | goto out; | ||
375 | } | ||
376 | 373 | ||
377 | ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); | 374 | if (tcon->ses->server->ops->query_mf_symlink) |
378 | if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) | 375 | rc = tcon->ses->server->ops->query_mf_symlink(path, buf, |
379 | rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, | 376 | &bytes_read, cifs_sb, xid); |
380 | &bytes_read, cifs_sb, xid); | ||
381 | else | 377 | else |
382 | goto out; | 378 | rc = -ENOSYS; |
383 | 379 | ||
384 | if (rc != 0) | 380 | if (rc) |
385 | goto out; | 381 | goto out; |
386 | 382 | ||
387 | if (bytes_read == 0) /* not a symlink */ | 383 | if (bytes_read == 0) /* not a symlink */ |
diff --git a/fs/dcache.c b/fs/dcache.c index 4bdb300b16e2..6055d61811d3 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -192,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char | |||
192 | if (!tcount) | 192 | if (!tcount) |
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | mask = ~(~0ul << tcount*8); | 195 | mask = bytemask_from_count(tcount); |
196 | return unlikely(!!((a ^ b) & mask)); | 196 | return unlikely(!!((a ^ b) & mask)); |
197 | } | 197 | } |
198 | 198 | ||
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8b5e2584c840..af903128891c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -1907,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1907 | } | 1907 | } |
1908 | } | 1908 | } |
1909 | } | 1909 | } |
1910 | if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) { | ||
1911 | tep = tf.file->private_data; | ||
1912 | mutex_lock_nested(&tep->mtx, 1); | ||
1913 | } | ||
1914 | 1910 | ||
1915 | /* | 1911 | /* |
1916 | * Try to lookup the file inside our RB tree, Since we grabbed "mtx" | 1912 | * Try to lookup the file inside our RB tree, Since we grabbed "mtx" |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 288534920fe5..20d6697bd638 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, | |||
1493 | sb->s_blocksize - offset : towrite; | 1493 | sb->s_blocksize - offset : towrite; |
1494 | 1494 | ||
1495 | tmp_bh.b_state = 0; | 1495 | tmp_bh.b_state = 0; |
1496 | tmp_bh.b_size = sb->s_blocksize; | ||
1496 | err = ext2_get_block(inode, blk, &tmp_bh, 1); | 1497 | err = ext2_get_block(inode, blk, &tmp_bh, 1); |
1497 | if (err < 0) | 1498 | if (err < 0) |
1498 | goto out; | 1499 | goto out; |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e6185031c1cc..ece55565b9cd 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -268,6 +268,16 @@ struct ext4_io_submit { | |||
268 | /* Translate # of blks to # of clusters */ | 268 | /* Translate # of blks to # of clusters */ |
269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ | 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ |
270 | (sbi)->s_cluster_bits) | 270 | (sbi)->s_cluster_bits) |
271 | /* Mask out the low bits to get the starting block of the cluster */ | ||
272 | #define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ | ||
273 | ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
274 | #define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ | ||
275 | ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
276 | /* Get the cluster offset */ | ||
277 | #define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ | ||
278 | ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
279 | #define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ | ||
280 | ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
271 | 281 | ||
272 | /* | 282 | /* |
273 | * Structure of a blocks group descriptor | 283 | * Structure of a blocks group descriptor |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 17ac112ab101..3fe29de832c8 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, | |||
259 | if (WARN_ON_ONCE(err)) { | 259 | if (WARN_ON_ONCE(err)) { |
260 | ext4_journal_abort_handle(where, line, __func__, bh, | 260 | ext4_journal_abort_handle(where, line, __func__, bh, |
261 | handle, err); | 261 | handle, err); |
262 | ext4_error_inode(inode, where, line, | ||
263 | bh->b_blocknr, | ||
264 | "journal_dirty_metadata failed: " | ||
265 | "handle type %u started at line %u, " | ||
266 | "credits %u/%u, errcode %d", | ||
267 | handle->h_type, | ||
268 | handle->h_line_no, | ||
269 | handle->h_requested_credits, | ||
270 | handle->h_buffer_credits, err); | ||
262 | } | 271 | } |
263 | } else { | 272 | } else { |
264 | if (inode) | 273 | if (inode) |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 35f65cf4f318..3384dc4bed40 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) | |||
360 | { | 360 | { |
361 | ext4_fsblk_t block = ext4_ext_pblock(ext); | 361 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
362 | int len = ext4_ext_get_actual_len(ext); | 362 | int len = ext4_ext_get_actual_len(ext); |
363 | ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); | ||
364 | ext4_lblk_t last = lblock + len - 1; | ||
363 | 365 | ||
364 | if (len == 0) | 366 | if (lblock > last) |
365 | return 0; | 367 | return 0; |
366 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); | 368 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
367 | } | 369 | } |
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode, | |||
387 | if (depth == 0) { | 389 | if (depth == 0) { |
388 | /* leaf entries */ | 390 | /* leaf entries */ |
389 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); | 391 | struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
392 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; | ||
393 | ext4_fsblk_t pblock = 0; | ||
394 | ext4_lblk_t lblock = 0; | ||
395 | ext4_lblk_t prev = 0; | ||
396 | int len = 0; | ||
390 | while (entries) { | 397 | while (entries) { |
391 | if (!ext4_valid_extent(inode, ext)) | 398 | if (!ext4_valid_extent(inode, ext)) |
392 | return 0; | 399 | return 0; |
400 | |||
401 | /* Check for overlapping extents */ | ||
402 | lblock = le32_to_cpu(ext->ee_block); | ||
403 | len = ext4_ext_get_actual_len(ext); | ||
404 | if ((lblock <= prev) && prev) { | ||
405 | pblock = ext4_ext_pblock(ext); | ||
406 | es->s_last_error_block = cpu_to_le64(pblock); | ||
407 | return 0; | ||
408 | } | ||
393 | ext++; | 409 | ext++; |
394 | entries--; | 410 | entries--; |
411 | prev = lblock + len - 1; | ||
395 | } | 412 | } |
396 | } else { | 413 | } else { |
397 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); | 414 | struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
1834 | depth = ext_depth(inode); | 1851 | depth = ext_depth(inode); |
1835 | if (!path[depth].p_ext) | 1852 | if (!path[depth].p_ext) |
1836 | goto out; | 1853 | goto out; |
1837 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | 1854 | b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
1838 | b2 &= ~(sbi->s_cluster_ratio - 1); | ||
1839 | 1855 | ||
1840 | /* | 1856 | /* |
1841 | * get the next allocated block if the extent in the path | 1857 | * get the next allocated block if the extent in the path |
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
1845 | b2 = ext4_ext_next_allocated_block(path); | 1861 | b2 = ext4_ext_next_allocated_block(path); |
1846 | if (b2 == EXT_MAX_BLOCKS) | 1862 | if (b2 == EXT_MAX_BLOCKS) |
1847 | goto out; | 1863 | goto out; |
1848 | b2 &= ~(sbi->s_cluster_ratio - 1); | 1864 | b2 = EXT4_LBLK_CMASK(sbi, b2); |
1849 | } | 1865 | } |
1850 | 1866 | ||
1851 | /* check for wrap through zero on extent logical start block*/ | 1867 | /* check for wrap through zero on extent logical start block*/ |
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |||
2504 | * extent, we have to mark the cluster as used (store negative | 2520 | * extent, we have to mark the cluster as used (store negative |
2505 | * cluster number in partial_cluster). | 2521 | * cluster number in partial_cluster). |
2506 | */ | 2522 | */ |
2507 | unaligned = pblk & (sbi->s_cluster_ratio - 1); | 2523 | unaligned = EXT4_PBLK_COFF(sbi, pblk); |
2508 | if (unaligned && (ee_len == num) && | 2524 | if (unaligned && (ee_len == num) && |
2509 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) | 2525 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) |
2510 | *partial_cluster = EXT4_B2C(sbi, pblk); | 2526 | *partial_cluster = EXT4_B2C(sbi, pblk); |
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2598 | * accidentally freeing it later on | 2614 | * accidentally freeing it later on |
2599 | */ | 2615 | */ |
2600 | pblk = ext4_ext_pblock(ex); | 2616 | pblk = ext4_ext_pblock(ex); |
2601 | if (pblk & (sbi->s_cluster_ratio - 1)) | 2617 | if (EXT4_PBLK_COFF(sbi, pblk)) |
2602 | *partial_cluster = | 2618 | *partial_cluster = |
2603 | -((long long)EXT4_B2C(sbi, pblk)); | 2619 | -((long long)EXT4_B2C(sbi, pblk)); |
2604 | ex--; | 2620 | ex--; |
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) | |||
3753 | { | 3769 | { |
3754 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 3770 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
3755 | ext4_lblk_t lblk_start, lblk_end; | 3771 | ext4_lblk_t lblk_start, lblk_end; |
3756 | lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); | 3772 | lblk_start = EXT4_LBLK_CMASK(sbi, lblk); |
3757 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; | 3773 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; |
3758 | 3774 | ||
3759 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); | 3775 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); |
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
3812 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); | 3828 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); |
3813 | 3829 | ||
3814 | /* Check towards left side */ | 3830 | /* Check towards left side */ |
3815 | c_offset = lblk_start & (sbi->s_cluster_ratio - 1); | 3831 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start); |
3816 | if (c_offset) { | 3832 | if (c_offset) { |
3817 | lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); | 3833 | lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); |
3818 | lblk_to = lblk_from + c_offset - 1; | 3834 | lblk_to = lblk_from + c_offset - 1; |
3819 | 3835 | ||
3820 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) | 3836 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) |
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
3822 | } | 3838 | } |
3823 | 3839 | ||
3824 | /* Now check towards right. */ | 3840 | /* Now check towards right. */ |
3825 | c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); | 3841 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); |
3826 | if (allocated_clusters && c_offset) { | 3842 | if (allocated_clusters && c_offset) { |
3827 | lblk_from = lblk_start + num_blks; | 3843 | lblk_from = lblk_start + num_blks; |
3828 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; | 3844 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; |
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
4030 | struct ext4_ext_path *path) | 4046 | struct ext4_ext_path *path) |
4031 | { | 4047 | { |
4032 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4048 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
4033 | ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4049 | ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4034 | ext4_lblk_t ex_cluster_start, ex_cluster_end; | 4050 | ext4_lblk_t ex_cluster_start, ex_cluster_end; |
4035 | ext4_lblk_t rr_cluster_start; | 4051 | ext4_lblk_t rr_cluster_start; |
4036 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); | 4052 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
4048 | (rr_cluster_start == ex_cluster_start)) { | 4064 | (rr_cluster_start == ex_cluster_start)) { |
4049 | if (rr_cluster_start == ex_cluster_end) | 4065 | if (rr_cluster_start == ex_cluster_end) |
4050 | ee_start += ee_len - 1; | 4066 | ee_start += ee_len - 1; |
4051 | map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + | 4067 | map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; |
4052 | c_offset; | ||
4053 | map->m_len = min(map->m_len, | 4068 | map->m_len = min(map->m_len, |
4054 | (unsigned) sbi->s_cluster_ratio - c_offset); | 4069 | (unsigned) sbi->s_cluster_ratio - c_offset); |
4055 | /* | 4070 | /* |
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
4203 | */ | 4218 | */ |
4204 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; | 4219 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; |
4205 | newex.ee_block = cpu_to_le32(map->m_lblk); | 4220 | newex.ee_block = cpu_to_le32(map->m_lblk); |
4206 | cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4221 | cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4207 | 4222 | ||
4208 | /* | 4223 | /* |
4209 | * If we are doing bigalloc, check to see if the extent returned | 4224 | * If we are doing bigalloc, check to see if the extent returned |
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
4271 | * needed so that future calls to get_implied_cluster_alloc() | 4286 | * needed so that future calls to get_implied_cluster_alloc() |
4272 | * work correctly. | 4287 | * work correctly. |
4273 | */ | 4288 | */ |
4274 | offset = map->m_lblk & (sbi->s_cluster_ratio - 1); | 4289 | offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
4275 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); | 4290 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); |
4276 | ar.goal -= offset; | 4291 | ar.goal -= offset; |
4277 | ar.logical -= offset; | 4292 | ar.logical -= offset; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 075763474118..61d49ff22c81 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file, | |||
1206 | */ | 1206 | */ |
1207 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | 1207 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) |
1208 | { | 1208 | { |
1209 | int retries = 0; | ||
1210 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1209 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1211 | struct ext4_inode_info *ei = EXT4_I(inode); | 1210 | struct ext4_inode_info *ei = EXT4_I(inode); |
1212 | unsigned int md_needed; | 1211 | unsigned int md_needed; |
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | |||
1218 | * in order to allocate nrblocks | 1217 | * in order to allocate nrblocks |
1219 | * worse case is one extent per block | 1218 | * worse case is one extent per block |
1220 | */ | 1219 | */ |
1221 | repeat: | ||
1222 | spin_lock(&ei->i_block_reservation_lock); | 1220 | spin_lock(&ei->i_block_reservation_lock); |
1223 | /* | 1221 | /* |
1224 | * ext4_calc_metadata_amount() has side effects, which we have | 1222 | * ext4_calc_metadata_amount() has side effects, which we have |
@@ -1238,10 +1236,6 @@ repeat: | |||
1238 | ei->i_da_metadata_calc_len = save_len; | 1236 | ei->i_da_metadata_calc_len = save_len; |
1239 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1237 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
1240 | spin_unlock(&ei->i_block_reservation_lock); | 1238 | spin_unlock(&ei->i_block_reservation_lock); |
1241 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
1242 | cond_resched(); | ||
1243 | goto repeat; | ||
1244 | } | ||
1245 | return -ENOSPC; | 1239 | return -ENOSPC; |
1246 | } | 1240 | } |
1247 | ei->i_reserved_meta_blocks += md_needed; | 1241 | ei->i_reserved_meta_blocks += md_needed; |
@@ -1255,7 +1249,6 @@ repeat: | |||
1255 | */ | 1249 | */ |
1256 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | 1250 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) |
1257 | { | 1251 | { |
1258 | int retries = 0; | ||
1259 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1252 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1260 | struct ext4_inode_info *ei = EXT4_I(inode); | 1253 | struct ext4_inode_info *ei = EXT4_I(inode); |
1261 | unsigned int md_needed; | 1254 | unsigned int md_needed; |
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | |||
1277 | * in order to allocate nrblocks | 1270 | * in order to allocate nrblocks |
1278 | * worse case is one extent per block | 1271 | * worse case is one extent per block |
1279 | */ | 1272 | */ |
1280 | repeat: | ||
1281 | spin_lock(&ei->i_block_reservation_lock); | 1273 | spin_lock(&ei->i_block_reservation_lock); |
1282 | /* | 1274 | /* |
1283 | * ext4_calc_metadata_amount() has side effects, which we have | 1275 | * ext4_calc_metadata_amount() has side effects, which we have |
@@ -1297,10 +1289,6 @@ repeat: | |||
1297 | ei->i_da_metadata_calc_len = save_len; | 1289 | ei->i_da_metadata_calc_len = save_len; |
1298 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1290 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
1299 | spin_unlock(&ei->i_block_reservation_lock); | 1291 | spin_unlock(&ei->i_block_reservation_lock); |
1300 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
1301 | cond_resched(); | ||
1302 | goto repeat; | ||
1303 | } | ||
1304 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); | 1292 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
1305 | return -ENOSPC; | 1293 | return -ENOSPC; |
1306 | } | 1294 | } |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4d113efa024c..04a5c7504be9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head) | |||
3442 | { | 3442 | { |
3443 | struct ext4_prealloc_space *pa; | 3443 | struct ext4_prealloc_space *pa; |
3444 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); | 3444 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); |
3445 | |||
3446 | BUG_ON(atomic_read(&pa->pa_count)); | ||
3447 | BUG_ON(pa->pa_deleted == 0); | ||
3445 | kmem_cache_free(ext4_pspace_cachep, pa); | 3448 | kmem_cache_free(ext4_pspace_cachep, pa); |
3446 | } | 3449 | } |
3447 | 3450 | ||
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |||
3455 | ext4_group_t grp; | 3458 | ext4_group_t grp; |
3456 | ext4_fsblk_t grp_blk; | 3459 | ext4_fsblk_t grp_blk; |
3457 | 3460 | ||
3458 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) | ||
3459 | return; | ||
3460 | |||
3461 | /* in this short window concurrent discard can set pa_deleted */ | 3461 | /* in this short window concurrent discard can set pa_deleted */ |
3462 | spin_lock(&pa->pa_lock); | 3462 | spin_lock(&pa->pa_lock); |
3463 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { | ||
3464 | spin_unlock(&pa->pa_lock); | ||
3465 | return; | ||
3466 | } | ||
3467 | |||
3463 | if (pa->pa_deleted == 1) { | 3468 | if (pa->pa_deleted == 1) { |
3464 | spin_unlock(&pa->pa_lock); | 3469 | spin_unlock(&pa->pa_lock); |
3465 | return; | 3470 | return; |
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, | |||
4121 | ext4_get_group_no_and_offset(sb, goal, &group, &block); | 4126 | ext4_get_group_no_and_offset(sb, goal, &group, &block); |
4122 | 4127 | ||
4123 | /* set up allocation goals */ | 4128 | /* set up allocation goals */ |
4124 | ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); | 4129 | ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); |
4125 | ac->ac_status = AC_STATUS_CONTINUE; | 4130 | ac->ac_status = AC_STATUS_CONTINUE; |
4126 | ac->ac_sb = sb; | 4131 | ac->ac_sb = sb; |
4127 | ac->ac_inode = ar->inode; | 4132 | ac->ac_inode = ar->inode; |
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
4663 | * blocks at the beginning or the end unless we are explicitly | 4668 | * blocks at the beginning or the end unless we are explicitly |
4664 | * requested to avoid doing so. | 4669 | * requested to avoid doing so. |
4665 | */ | 4670 | */ |
4666 | overflow = block & (sbi->s_cluster_ratio - 1); | 4671 | overflow = EXT4_PBLK_COFF(sbi, block); |
4667 | if (overflow) { | 4672 | if (overflow) { |
4668 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { | 4673 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { |
4669 | overflow = sbi->s_cluster_ratio - overflow; | 4674 | overflow = sbi->s_cluster_ratio - overflow; |
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
4677 | count += overflow; | 4682 | count += overflow; |
4678 | } | 4683 | } |
4679 | } | 4684 | } |
4680 | overflow = count & (sbi->s_cluster_ratio - 1); | 4685 | overflow = EXT4_LBLK_COFF(sbi, count); |
4681 | if (overflow) { | 4686 | if (overflow) { |
4682 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { | 4687 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { |
4683 | if (count > overflow) | 4688 | if (count > overflow) |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c977f4e4e63b..1f7784de05b6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb) | |||
792 | } | 792 | } |
793 | 793 | ||
794 | ext4_es_unregister_shrinker(sbi); | 794 | ext4_es_unregister_shrinker(sbi); |
795 | del_timer(&sbi->s_err_report); | 795 | del_timer_sync(&sbi->s_err_report); |
796 | ext4_release_system_zone(sb); | 796 | ext4_release_system_zone(sb); |
797 | ext4_mb_release(sb); | 797 | ext4_mb_release(sb); |
798 | ext4_ext_release(sb); | 798 | ext4_ext_release(sb); |
@@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb) | |||
3316 | } | 3316 | } |
3317 | 3317 | ||
3318 | 3318 | ||
3319 | static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) | 3319 | static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb) |
3320 | { | 3320 | { |
3321 | ext4_fsblk_t resv_clusters; | 3321 | ext4_fsblk_t resv_clusters; |
3322 | 3322 | ||
3323 | /* | 3323 | /* |
3324 | * There's no need to reserve anything when we aren't using extents. | ||
3325 | * The space estimates are exact, there are no unwritten extents, | ||
3326 | * hole punching doesn't need new metadata... This is needed especially | ||
3327 | * to keep ext2/3 backward compatibility. | ||
3328 | */ | ||
3329 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) | ||
3330 | return 0; | ||
3331 | /* | ||
3324 | * By default we reserve 2% or 4096 clusters, whichever is smaller. | 3332 | * By default we reserve 2% or 4096 clusters, whichever is smaller. |
3325 | * This should cover the situations where we can not afford to run | 3333 | * This should cover the situations where we can not afford to run |
3326 | * out of space like for example punch hole, or converting | 3334 | * out of space like for example punch hole, or converting |
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) | |||
3328 | * allocation would require 1, or 2 blocks, higher numbers are | 3336 | * allocation would require 1, or 2 blocks, higher numbers are |
3329 | * very rare. | 3337 | * very rare. |
3330 | */ | 3338 | */ |
3331 | resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; | 3339 | resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >> |
3340 | EXT4_SB(sb)->s_cluster_bits; | ||
3332 | 3341 | ||
3333 | do_div(resv_clusters, 50); | 3342 | do_div(resv_clusters, 50); |
3334 | resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); | 3343 | resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); |
@@ -4071,10 +4080,10 @@ no_journal: | |||
4071 | "available"); | 4080 | "available"); |
4072 | } | 4081 | } |
4073 | 4082 | ||
4074 | err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); | 4083 | err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb)); |
4075 | if (err) { | 4084 | if (err) { |
4076 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " | 4085 | ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " |
4077 | "reserved pool", ext4_calculate_resv_clusters(sbi)); | 4086 | "reserved pool", ext4_calculate_resv_clusters(sb)); |
4078 | goto failed_mount4a; | 4087 | goto failed_mount4a; |
4079 | } | 4088 | } |
4080 | 4089 | ||
@@ -4184,7 +4193,7 @@ failed_mount_wq: | |||
4184 | } | 4193 | } |
4185 | failed_mount3: | 4194 | failed_mount3: |
4186 | ext4_es_unregister_shrinker(sbi); | 4195 | ext4_es_unregister_shrinker(sbi); |
4187 | del_timer(&sbi->s_err_report); | 4196 | del_timer_sync(&sbi->s_err_report); |
4188 | if (sbi->s_flex_groups) | 4197 | if (sbi->s_flex_groups) |
4189 | ext4_kvfree(sbi->s_flex_groups); | 4198 | ext4_kvfree(sbi->s_flex_groups); |
4190 | percpu_counter_destroy(&sbi->s_freeclusters_counter); | 4199 | percpu_counter_destroy(&sbi->s_freeclusters_counter); |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index b7fc035a6943..73f3e4ee4037 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
986 | { | 986 | { |
987 | struct file *file = iocb->ki_filp; | 987 | struct file *file = iocb->ki_filp; |
988 | struct inode *inode = file->f_mapping->host; | 988 | struct inode *inode = file->f_mapping->host; |
989 | struct address_space *mapping = inode->i_mapping; | ||
989 | struct gfs2_inode *ip = GFS2_I(inode); | 990 | struct gfs2_inode *ip = GFS2_I(inode); |
990 | struct gfs2_holder gh; | 991 | struct gfs2_holder gh; |
991 | int rv; | 992 | int rv; |
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
1006 | if (rv != 1) | 1007 | if (rv != 1) |
1007 | goto out; /* dio not valid, fall back to buffered i/o */ | 1008 | goto out; /* dio not valid, fall back to buffered i/o */ |
1008 | 1009 | ||
1010 | /* | ||
1011 | * Now since we are holding a deferred (CW) lock at this point, you | ||
1012 | * might be wondering why this is ever needed. There is a case however | ||
1013 | * where we've granted a deferred local lock against a cached exclusive | ||
1014 | * glock. That is ok provided all granted local locks are deferred, but | ||
1015 | * it also means that it is possible to encounter pages which are | ||
1016 | * cached and possibly also mapped. So here we check for that and sort | ||
1017 | * them out ahead of the dio. The glock state machine will take care of | ||
1018 | * everything else. | ||
1019 | * | ||
1020 | * If in fact the cached glock state (gl->gl_state) is deferred (CW) in | ||
1021 | * the first place, mapping->nr_pages will always be zero. | ||
1022 | */ | ||
1023 | if (mapping->nrpages) { | ||
1024 | loff_t lstart = offset & (PAGE_CACHE_SIZE - 1); | ||
1025 | loff_t len = iov_length(iov, nr_segs); | ||
1026 | loff_t end = PAGE_ALIGN(offset + len) - 1; | ||
1027 | |||
1028 | rv = 0; | ||
1029 | if (len == 0) | ||
1030 | goto out; | ||
1031 | if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) | ||
1032 | unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); | ||
1033 | rv = filemap_write_and_wait_range(mapping, lstart, end); | ||
1034 | if (rv) | ||
1035 | return rv; | ||
1036 | truncate_inode_pages_range(mapping, lstart, end); | ||
1037 | } | ||
1038 | |||
1009 | rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1039 | rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1010 | offset, nr_segs, gfs2_get_block_direct, | 1040 | offset, nr_segs, gfs2_get_block_direct, |
1011 | NULL, NULL, 0); | 1041 | NULL, NULL, 0); |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index c8420f7e4db6..6f7a47c05259 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1655,6 +1655,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) | |||
1655 | struct task_struct *gh_owner = NULL; | 1655 | struct task_struct *gh_owner = NULL; |
1656 | char flags_buf[32]; | 1656 | char flags_buf[32]; |
1657 | 1657 | ||
1658 | rcu_read_lock(); | ||
1658 | if (gh->gh_owner_pid) | 1659 | if (gh->gh_owner_pid) |
1659 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); | 1660 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); |
1660 | gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", | 1661 | gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", |
@@ -1664,6 +1665,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) | |||
1664 | gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, | 1665 | gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, |
1665 | gh_owner ? gh_owner->comm : "(ended)", | 1666 | gh_owner ? gh_owner->comm : "(ended)", |
1666 | (void *)gh->gh_ip); | 1667 | (void *)gh->gh_ip); |
1668 | rcu_read_unlock(); | ||
1667 | return 0; | 1669 | return 0; |
1668 | } | 1670 | } |
1669 | 1671 | ||
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index db908f697139..f88dcd925010 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -192,8 +192,11 @@ static void inode_go_sync(struct gfs2_glock *gl) | |||
192 | 192 | ||
193 | if (ip && !S_ISREG(ip->i_inode.i_mode)) | 193 | if (ip && !S_ISREG(ip->i_inode.i_mode)) |
194 | ip = NULL; | 194 | ip = NULL; |
195 | if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) | 195 | if (ip) { |
196 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); | 196 | if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) |
197 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); | ||
198 | inode_dio_wait(&ip->i_inode); | ||
199 | } | ||
197 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | 200 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) |
198 | return; | 201 | return; |
199 | 202 | ||
@@ -410,6 +413,9 @@ static int inode_go_lock(struct gfs2_holder *gh) | |||
410 | return error; | 413 | return error; |
411 | } | 414 | } |
412 | 415 | ||
416 | if (gh->gh_state != LM_ST_DEFERRED) | ||
417 | inode_dio_wait(&ip->i_inode); | ||
418 | |||
413 | if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && | 419 | if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && |
414 | (gl->gl_state == LM_ST_EXCLUSIVE) && | 420 | (gl->gl_state == LM_ST_EXCLUSIVE) && |
415 | (gh->gh_state == LM_ST_EXCLUSIVE)) { | 421 | (gh->gh_state == LM_ST_EXCLUSIVE)) { |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 610613fb65b5..9dcb9777a5f8 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) | |||
551 | struct buffer_head *bh = bd->bd_bh; | 551 | struct buffer_head *bh = bd->bd_bh; |
552 | struct gfs2_glock *gl = bd->bd_gl; | 552 | struct gfs2_glock *gl = bd->bd_gl; |
553 | 553 | ||
554 | gfs2_remove_from_ail(bd); | ||
555 | bd->bd_bh = NULL; | ||
556 | bh->b_private = NULL; | 554 | bh->b_private = NULL; |
557 | bd->bd_blkno = bh->b_blocknr; | 555 | bd->bd_blkno = bh->b_blocknr; |
556 | gfs2_remove_from_ail(bd); /* drops ref on bh */ | ||
557 | bd->bd_bh = NULL; | ||
558 | bd->bd_ops = &gfs2_revoke_lops; | 558 | bd->bd_ops = &gfs2_revoke_lops; |
559 | sdp->sd_log_num_revoke++; | 559 | sdp->sd_log_num_revoke++; |
560 | atomic_inc(&gl->gl_revokes); | 560 | atomic_inc(&gl->gl_revokes); |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 932415050540..52f177be3bf8 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int | |||
258 | struct address_space *mapping = bh->b_page->mapping; | 258 | struct address_space *mapping = bh->b_page->mapping; |
259 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); | 259 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
260 | struct gfs2_bufdata *bd = bh->b_private; | 260 | struct gfs2_bufdata *bd = bh->b_private; |
261 | int was_pinned = 0; | ||
261 | 262 | ||
262 | if (test_clear_buffer_pinned(bh)) { | 263 | if (test_clear_buffer_pinned(bh)) { |
263 | trace_gfs2_pin(bd, 0); | 264 | trace_gfs2_pin(bd, 0); |
@@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int | |||
273 | tr->tr_num_databuf_rm++; | 274 | tr->tr_num_databuf_rm++; |
274 | } | 275 | } |
275 | tr->tr_touched = 1; | 276 | tr->tr_touched = 1; |
277 | was_pinned = 1; | ||
276 | brelse(bh); | 278 | brelse(bh); |
277 | } | 279 | } |
278 | if (bd) { | 280 | if (bd) { |
279 | spin_lock(&sdp->sd_ail_lock); | 281 | spin_lock(&sdp->sd_ail_lock); |
280 | if (bd->bd_tr) { | 282 | if (bd->bd_tr) { |
281 | gfs2_trans_add_revoke(sdp, bd); | 283 | gfs2_trans_add_revoke(sdp, bd); |
284 | } else if (was_pinned) { | ||
285 | bh->b_private = NULL; | ||
286 | kmem_cache_free(gfs2_bufdata_cachep, bd); | ||
282 | } | 287 | } |
283 | spin_unlock(&sdp->sd_ail_lock); | 288 | spin_unlock(&sdp->sd_ail_lock); |
284 | } | 289 | } |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 82303b474958..52fa88314f5c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, | |||
1366 | if (IS_ERR(s)) | 1366 | if (IS_ERR(s)) |
1367 | goto error_bdev; | 1367 | goto error_bdev; |
1368 | 1368 | ||
1369 | if (s->s_root) | 1369 | if (s->s_root) { |
1370 | /* | ||
1371 | * s_umount nests inside bd_mutex during | ||
1372 | * __invalidate_device(). blkdev_put() acquires | ||
1373 | * bd_mutex and can't be called under s_umount. Drop | ||
1374 | * s_umount temporarily. This is safe as we're | ||
1375 | * holding an active reference. | ||
1376 | */ | ||
1377 | up_write(&s->s_umount); | ||
1370 | blkdev_put(bdev, mode); | 1378 | blkdev_put(bdev, mode); |
1379 | down_write(&s->s_umount); | ||
1380 | } | ||
1371 | 1381 | ||
1372 | memset(&args, 0, sizeof(args)); | 1382 | memset(&args, 0, sizeof(args)); |
1373 | args.ar_quota = GFS2_QUOTA_DEFAULT; | 1383 | args.ar_quota = GFS2_QUOTA_DEFAULT; |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 52032647dd4a..5fa344afb49a 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) | |||
702 | read_lock(&journal->j_state_lock); | 702 | read_lock(&journal->j_state_lock); |
703 | #ifdef CONFIG_JBD2_DEBUG | 703 | #ifdef CONFIG_JBD2_DEBUG |
704 | if (!tid_geq(journal->j_commit_request, tid)) { | 704 | if (!tid_geq(journal->j_commit_request, tid)) { |
705 | printk(KERN_EMERG | 705 | printk(KERN_ERR |
706 | "%s: error: j_commit_request=%d, tid=%d\n", | 706 | "%s: error: j_commit_request=%d, tid=%d\n", |
707 | __func__, journal->j_commit_request, tid); | 707 | __func__, journal->j_commit_request, tid); |
708 | } | 708 | } |
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) | |||
718 | } | 718 | } |
719 | read_unlock(&journal->j_state_lock); | 719 | read_unlock(&journal->j_state_lock); |
720 | 720 | ||
721 | if (unlikely(is_journal_aborted(journal))) { | 721 | if (unlikely(is_journal_aborted(journal))) |
722 | printk(KERN_EMERG "journal commit I/O error\n"); | ||
723 | err = -EIO; | 722 | err = -EIO; |
724 | } | ||
725 | return err; | 723 | return err; |
726 | } | 724 | } |
727 | 725 | ||
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal) | |||
1527 | if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && | 1525 | if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && |
1528 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { | 1526 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
1529 | /* Can't have checksum v1 and v2 on at the same time! */ | 1527 | /* Can't have checksum v1 and v2 on at the same time! */ |
1530 | printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " | 1528 | printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " |
1531 | "at the same time!\n"); | 1529 | "at the same time!\n"); |
1532 | goto out; | 1530 | goto out; |
1533 | } | 1531 | } |
1534 | 1532 | ||
1535 | if (!jbd2_verify_csum_type(journal, sb)) { | 1533 | if (!jbd2_verify_csum_type(journal, sb)) { |
1536 | printk(KERN_ERR "JBD: Unknown checksum type\n"); | 1534 | printk(KERN_ERR "JBD2: Unknown checksum type\n"); |
1537 | goto out; | 1535 | goto out; |
1538 | } | 1536 | } |
1539 | 1537 | ||
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal) | |||
1541 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { | 1539 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
1542 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); | 1540 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); |
1543 | if (IS_ERR(journal->j_chksum_driver)) { | 1541 | if (IS_ERR(journal->j_chksum_driver)) { |
1544 | printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); | 1542 | printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); |
1545 | err = PTR_ERR(journal->j_chksum_driver); | 1543 | err = PTR_ERR(journal->j_chksum_driver); |
1546 | journal->j_chksum_driver = NULL; | 1544 | journal->j_chksum_driver = NULL; |
1547 | goto out; | 1545 | goto out; |
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal) | |||
1550 | 1548 | ||
1551 | /* Check superblock checksum */ | 1549 | /* Check superblock checksum */ |
1552 | if (!jbd2_superblock_csum_verify(journal, sb)) { | 1550 | if (!jbd2_superblock_csum_verify(journal, sb)) { |
1553 | printk(KERN_ERR "JBD: journal checksum error\n"); | 1551 | printk(KERN_ERR "JBD2: journal checksum error\n"); |
1554 | goto out; | 1552 | goto out; |
1555 | } | 1553 | } |
1556 | 1554 | ||
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, | |||
1836 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", | 1834 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", |
1837 | 0, 0); | 1835 | 0, 0); |
1838 | if (IS_ERR(journal->j_chksum_driver)) { | 1836 | if (IS_ERR(journal->j_chksum_driver)) { |
1839 | printk(KERN_ERR "JBD: Cannot load crc32c " | 1837 | printk(KERN_ERR "JBD2: Cannot load crc32c " |
1840 | "driver.\n"); | 1838 | "driver.\n"); |
1841 | journal->j_chksum_driver = NULL; | 1839 | journal->j_chksum_driver = NULL; |
1842 | return 0; | 1840 | return 0; |
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void) | |||
2645 | #ifdef CONFIG_JBD2_DEBUG | 2643 | #ifdef CONFIG_JBD2_DEBUG |
2646 | int n = atomic_read(&nr_journal_heads); | 2644 | int n = atomic_read(&nr_journal_heads); |
2647 | if (n) | 2645 | if (n) |
2648 | printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n); | 2646 | printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); |
2649 | #endif | 2647 | #endif |
2650 | jbd2_remove_jbd_stats_proc_entry(); | 2648 | jbd2_remove_jbd_stats_proc_entry(); |
2651 | jbd2_journal_destroy_caches(); | 2649 | jbd2_journal_destroy_caches(); |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 3929c50428b1..3b6bb19d60b1 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal, | |||
594 | be32_to_cpu(tmp->h_sequence))) { | 594 | be32_to_cpu(tmp->h_sequence))) { |
595 | brelse(obh); | 595 | brelse(obh); |
596 | success = -EIO; | 596 | success = -EIO; |
597 | printk(KERN_ERR "JBD: Invalid " | 597 | printk(KERN_ERR "JBD2: Invalid " |
598 | "checksum recovering " | 598 | "checksum recovering " |
599 | "block %llu in log\n", | 599 | "block %llu in log\n", |
600 | blocknr); | 600 | blocknr); |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7aa9a32573bb..8360674c85bc 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -932,7 +932,7 @@ repeat: | |||
932 | jbd2_alloc(jh2bh(jh)->b_size, | 932 | jbd2_alloc(jh2bh(jh)->b_size, |
933 | GFP_NOFS); | 933 | GFP_NOFS); |
934 | if (!frozen_buffer) { | 934 | if (!frozen_buffer) { |
935 | printk(KERN_EMERG | 935 | printk(KERN_ERR |
936 | "%s: OOM for frozen_buffer\n", | 936 | "%s: OOM for frozen_buffer\n", |
937 | __func__); | 937 | __func__); |
938 | JBUFFER_TRACE(jh, "oom!"); | 938 | JBUFFER_TRACE(jh, "oom!"); |
@@ -1166,7 +1166,7 @@ repeat: | |||
1166 | if (!jh->b_committed_data) { | 1166 | if (!jh->b_committed_data) { |
1167 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); | 1167 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
1168 | if (!committed_data) { | 1168 | if (!committed_data) { |
1169 | printk(KERN_EMERG "%s: No memory for committed data\n", | 1169 | printk(KERN_ERR "%s: No memory for committed data\n", |
1170 | __func__); | 1170 | __func__); |
1171 | err = -ENOMEM; | 1171 | err = -ENOMEM; |
1172 | goto out; | 1172 | goto out; |
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1290 | * once a transaction -bzzz | 1290 | * once a transaction -bzzz |
1291 | */ | 1291 | */ |
1292 | jh->b_modified = 1; | 1292 | jh->b_modified = 1; |
1293 | J_ASSERT_JH(jh, handle->h_buffer_credits > 0); | 1293 | if (handle->h_buffer_credits <= 0) { |
1294 | ret = -ENOSPC; | ||
1295 | goto out_unlock_bh; | ||
1296 | } | ||
1294 | handle->h_buffer_credits--; | 1297 | handle->h_buffer_credits--; |
1295 | } | 1298 | } |
1296 | 1299 | ||
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1305 | JBUFFER_TRACE(jh, "fastpath"); | 1308 | JBUFFER_TRACE(jh, "fastpath"); |
1306 | if (unlikely(jh->b_transaction != | 1309 | if (unlikely(jh->b_transaction != |
1307 | journal->j_running_transaction)) { | 1310 | journal->j_running_transaction)) { |
1308 | printk(KERN_EMERG "JBD: %s: " | 1311 | printk(KERN_ERR "JBD2: %s: " |
1309 | "jh->b_transaction (%llu, %p, %u) != " | 1312 | "jh->b_transaction (%llu, %p, %u) != " |
1310 | "journal->j_running_transaction (%p, %u)", | 1313 | "journal->j_running_transaction (%p, %u)", |
1311 | journal->j_devname, | 1314 | journal->j_devname, |
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1332 | JBUFFER_TRACE(jh, "already on other transaction"); | 1335 | JBUFFER_TRACE(jh, "already on other transaction"); |
1333 | if (unlikely(jh->b_transaction != | 1336 | if (unlikely(jh->b_transaction != |
1334 | journal->j_committing_transaction)) { | 1337 | journal->j_committing_transaction)) { |
1335 | printk(KERN_EMERG "JBD: %s: " | 1338 | printk(KERN_ERR "JBD2: %s: " |
1336 | "jh->b_transaction (%llu, %p, %u) != " | 1339 | "jh->b_transaction (%llu, %p, %u) != " |
1337 | "journal->j_committing_transaction (%p, %u)", | 1340 | "journal->j_committing_transaction (%p, %u)", |
1338 | journal->j_devname, | 1341 | journal->j_devname, |
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1345 | ret = -EINVAL; | 1348 | ret = -EINVAL; |
1346 | } | 1349 | } |
1347 | if (unlikely(jh->b_next_transaction != transaction)) { | 1350 | if (unlikely(jh->b_next_transaction != transaction)) { |
1348 | printk(KERN_EMERG "JBD: %s: " | 1351 | printk(KERN_ERR "JBD2: %s: " |
1349 | "jh->b_next_transaction (%llu, %p, %u) != " | 1352 | "jh->b_next_transaction (%llu, %p, %u) != " |
1350 | "transaction (%p, %u)", | 1353 | "transaction (%p, %u)", |
1351 | journal->j_devname, | 1354 | journal->j_devname, |
@@ -1373,7 +1376,6 @@ out_unlock_bh: | |||
1373 | jbd2_journal_put_journal_head(jh); | 1376 | jbd2_journal_put_journal_head(jh); |
1374 | out: | 1377 | out: |
1375 | JBUFFER_TRACE(jh, "exit"); | 1378 | JBUFFER_TRACE(jh, "exit"); |
1376 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ | ||
1377 | return ret; | 1379 | return ret; |
1378 | } | 1380 | } |
1379 | 1381 | ||
diff --git a/fs/namei.c b/fs/namei.c index c53d3a9547f9..3531deebad30 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1598,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) | |||
1598 | * do a "get_unaligned()" if this helps and is sufficiently | 1598 | * do a "get_unaligned()" if this helps and is sufficiently |
1599 | * fast. | 1599 | * fast. |
1600 | * | 1600 | * |
1601 | * - Little-endian machines (so that we can generate the mask | ||
1602 | * of low bytes efficiently). Again, we *could* do a byte | ||
1603 | * swapping load on big-endian architectures if that is not | ||
1604 | * expensive enough to make the optimization worthless. | ||
1605 | * | ||
1606 | * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we | 1601 | * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we |
1607 | * do not trap on the (extremely unlikely) case of a page | 1602 | * do not trap on the (extremely unlikely) case of a page |
1608 | * crossing operation. | 1603 | * crossing operation. |
@@ -1646,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len) | |||
1646 | if (!len) | 1641 | if (!len) |
1647 | goto done; | 1642 | goto done; |
1648 | } | 1643 | } |
1649 | mask = ~(~0ul << len*8); | 1644 | mask = bytemask_from_count(len); |
1650 | hash += mask & a; | 1645 | hash += mask & a; |
1651 | done: | 1646 | done: |
1652 | return fold_hash(hash); | 1647 | return fold_hash(hash); |
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 9186c7ce0b14..b6af150c96b8 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | static void | 134 | static void |
135 | nfsd_reply_cache_unhash(struct svc_cacherep *rp) | ||
136 | { | ||
137 | hlist_del_init(&rp->c_hash); | ||
138 | list_del_init(&rp->c_lru); | ||
139 | } | ||
140 | |||
141 | static void | ||
135 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | 142 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) |
136 | { | 143 | { |
137 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { | 144 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) | |||
417 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); | 424 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); |
418 | if (nfsd_cache_entry_expired(rp) || | 425 | if (nfsd_cache_entry_expired(rp) || |
419 | num_drc_entries >= max_drc_entries) { | 426 | num_drc_entries >= max_drc_entries) { |
420 | lru_put_end(rp); | 427 | nfsd_reply_cache_unhash(rp); |
421 | prune_cache_entries(); | 428 | prune_cache_entries(); |
422 | goto search_cache; | 429 | goto search_cache; |
423 | } | 430 | } |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 28955d4b7218..124fc43c7090 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, | |||
292 | { | 292 | { |
293 | struct proc_dir_entry *pde = PDE(file_inode(file)); | 293 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
294 | unsigned long rv = -EIO; | 294 | unsigned long rv = -EIO; |
295 | unsigned long (*get_area)(struct file *, unsigned long, unsigned long, | 295 | |
296 | unsigned long, unsigned long) = NULL; | ||
297 | if (use_pde(pde)) { | 296 | if (use_pde(pde)) { |
297 | typeof(proc_reg_get_unmapped_area) *get_area; | ||
298 | |||
299 | get_area = pde->proc_fops->get_unmapped_area; | ||
298 | #ifdef CONFIG_MMU | 300 | #ifdef CONFIG_MMU |
299 | get_area = current->mm->get_unmapped_area; | 301 | if (!get_area) |
302 | get_area = current->mm->get_unmapped_area; | ||
300 | #endif | 303 | #endif |
301 | if (pde->proc_fops->get_unmapped_area) | 304 | |
302 | get_area = pde->proc_fops->get_unmapped_area; | ||
303 | if (get_area) | 305 | if (get_area) |
304 | rv = get_area(file, orig_addr, len, pgoff, flags); | 306 | rv = get_area(file, orig_addr, len, pgoff, flags); |
307 | else | ||
308 | rv = orig_addr; | ||
305 | unuse_pde(pde); | 309 | unuse_pde(pde); |
306 | } | 310 | } |
307 | return rv; | 311 | return rv; |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b8e93a40a5d3..78c3c2097787 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi) | |||
443 | pstore_get_records(0); | 443 | pstore_get_records(0); |
444 | 444 | ||
445 | kmsg_dump_register(&pstore_dumper); | 445 | kmsg_dump_register(&pstore_dumper); |
446 | pstore_register_console(); | 446 | |
447 | pstore_register_ftrace(); | 447 | if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { |
448 | pstore_register_console(); | ||
449 | pstore_register_ftrace(); | ||
450 | } | ||
448 | 451 | ||
449 | if (pstore_update_ms >= 0) { | 452 | if (pstore_update_ms >= 0) { |
450 | pstore_timer.expires = jiffies + | 453 | pstore_timer.expires = jiffies + |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b94f93685093..35e7d08fe629 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
609 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | 609 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; |
610 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; | 610 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; |
611 | struct sysfs_open_file *of; | 611 | struct sysfs_open_file *of; |
612 | bool has_read, has_write, has_mmap; | 612 | bool has_read, has_write; |
613 | int error = -EACCES; | 613 | int error = -EACCES; |
614 | 614 | ||
615 | /* need attr_sd for attr and ops, its parent for kobj */ | 615 | /* need attr_sd for attr and ops, its parent for kobj */ |
@@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
621 | 621 | ||
622 | has_read = battr->read || battr->mmap; | 622 | has_read = battr->read || battr->mmap; |
623 | has_write = battr->write || battr->mmap; | 623 | has_write = battr->write || battr->mmap; |
624 | has_mmap = battr->mmap; | ||
625 | } else { | 624 | } else { |
626 | const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); | 625 | const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); |
627 | 626 | ||
@@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
633 | 632 | ||
634 | has_read = ops->show; | 633 | has_read = ops->show; |
635 | has_write = ops->store; | 634 | has_write = ops->store; |
636 | has_mmap = false; | ||
637 | } | 635 | } |
638 | 636 | ||
639 | /* check perms and supported operations */ | 637 | /* check perms and supported operations */ |
@@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
661 | * open file has a separate mutex, it's okay as long as those don't | 659 | * open file has a separate mutex, it's okay as long as those don't |
662 | * happen on the same file. At this point, we can't easily give | 660 | * happen on the same file. At this point, we can't easily give |
663 | * each file a separate locking class. Let's differentiate on | 661 | * each file a separate locking class. Let's differentiate on |
664 | * whether the file has mmap or not for now. | 662 | * whether the file is bin or not for now. |
665 | */ | 663 | */ |
666 | if (has_mmap) | 664 | if (sysfs_is_bin(attr_sd)) |
667 | mutex_init(&of->mutex); | 665 | mutex_init(&of->mutex); |
668 | else | 666 | else |
669 | mutex_init(&of->mutex); | 667 | mutex_init(&of->mutex); |
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c index 739e0a52deda..5549d69ddb45 100644 --- a/fs/xfs/xfs_attr_remote.c +++ b/fs/xfs/xfs_attr_remote.c | |||
@@ -110,7 +110,7 @@ xfs_attr3_rmt_verify( | |||
110 | if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) | 110 | if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) |
111 | return false; | 111 | return false; |
112 | if (be32_to_cpu(rmt->rm_offset) + | 112 | if (be32_to_cpu(rmt->rm_offset) + |
113 | be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX) | 113 | be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX) |
114 | return false; | 114 | return false; |
115 | if (rmt->rm_owner == 0) | 115 | if (rmt->rm_owner == 0) |
116 | return false; | 116 | return false; |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3ef11b22e750..3b2c14b6f0fb 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent( | |||
1635 | * blocks at the end of the file which do not start at the previous data block, | 1635 | * blocks at the end of the file which do not start at the previous data block, |
1636 | * we will try to align the new blocks at stripe unit boundaries. | 1636 | * we will try to align the new blocks at stripe unit boundaries. |
1637 | * | 1637 | * |
1638 | * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be | 1638 | * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be |
1639 | * at, or past the EOF. | 1639 | * at, or past the EOF. |
1640 | */ | 1640 | */ |
1641 | STATIC int | 1641 | STATIC int |
@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof( | |||
1650 | bma->aeof = 0; | 1650 | bma->aeof = 0; |
1651 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | 1651 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, |
1652 | &is_empty); | 1652 | &is_empty); |
1653 | if (error || is_empty) | 1653 | if (error) |
1654 | return error; | 1654 | return error; |
1655 | 1655 | ||
1656 | if (is_empty) { | ||
1657 | bma->aeof = 1; | ||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1656 | /* | 1661 | /* |
1657 | * Check if we are allocation or past the last extent, or at least into | 1662 | * Check if we are allocation or past the last extent, or at least into |
1658 | * the last delayed allocated extent. | 1663 | * the last delayed allocated extent. |
@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc( | |||
3643 | int isaligned; | 3648 | int isaligned; |
3644 | int tryagain; | 3649 | int tryagain; |
3645 | int error; | 3650 | int error; |
3651 | int stripe_align; | ||
3646 | 3652 | ||
3647 | ASSERT(ap->length); | 3653 | ASSERT(ap->length); |
3648 | 3654 | ||
3649 | mp = ap->ip->i_mount; | 3655 | mp = ap->ip->i_mount; |
3656 | |||
3657 | /* stripe alignment for allocation is determined by mount parameters */ | ||
3658 | stripe_align = 0; | ||
3659 | if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) | ||
3660 | stripe_align = mp->m_swidth; | ||
3661 | else if (mp->m_dalign) | ||
3662 | stripe_align = mp->m_dalign; | ||
3663 | |||
3650 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; | 3664 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; |
3651 | if (unlikely(align)) { | 3665 | if (unlikely(align)) { |
3652 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, | 3666 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, |
@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc( | |||
3655 | ASSERT(!error); | 3669 | ASSERT(!error); |
3656 | ASSERT(ap->length); | 3670 | ASSERT(ap->length); |
3657 | } | 3671 | } |
3672 | |||
3673 | |||
3658 | nullfb = *ap->firstblock == NULLFSBLOCK; | 3674 | nullfb = *ap->firstblock == NULLFSBLOCK; |
3659 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); | 3675 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); |
3660 | if (nullfb) { | 3676 | if (nullfb) { |
@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc( | |||
3730 | */ | 3746 | */ |
3731 | if (!ap->flist->xbf_low && ap->aeof) { | 3747 | if (!ap->flist->xbf_low && ap->aeof) { |
3732 | if (!ap->offset) { | 3748 | if (!ap->offset) { |
3733 | args.alignment = mp->m_dalign; | 3749 | args.alignment = stripe_align; |
3734 | atype = args.type; | 3750 | atype = args.type; |
3735 | isaligned = 1; | 3751 | isaligned = 1; |
3736 | /* | 3752 | /* |
@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc( | |||
3755 | * of minlen+alignment+slop doesn't go up | 3771 | * of minlen+alignment+slop doesn't go up |
3756 | * between the calls. | 3772 | * between the calls. |
3757 | */ | 3773 | */ |
3758 | if (blen > mp->m_dalign && blen <= args.maxlen) | 3774 | if (blen > stripe_align && blen <= args.maxlen) |
3759 | nextminlen = blen - mp->m_dalign; | 3775 | nextminlen = blen - stripe_align; |
3760 | else | 3776 | else |
3761 | nextminlen = args.minlen; | 3777 | nextminlen = args.minlen; |
3762 | if (nextminlen + mp->m_dalign > args.minlen + 1) | 3778 | if (nextminlen + stripe_align > args.minlen + 1) |
3763 | args.minalignslop = | 3779 | args.minalignslop = |
3764 | nextminlen + mp->m_dalign - | 3780 | nextminlen + stripe_align - |
3765 | args.minlen - 1; | 3781 | args.minlen - 1; |
3766 | else | 3782 | else |
3767 | args.minalignslop = 0; | 3783 | args.minalignslop = 0; |
@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc( | |||
3783 | */ | 3799 | */ |
3784 | args.type = atype; | 3800 | args.type = atype; |
3785 | args.fsbno = ap->blkno; | 3801 | args.fsbno = ap->blkno; |
3786 | args.alignment = mp->m_dalign; | 3802 | args.alignment = stripe_align; |
3787 | args.minlen = nextminlen; | 3803 | args.minlen = nextminlen; |
3788 | args.minalignslop = 0; | 3804 | args.minalignslop = 0; |
3789 | isaligned = 1; | 3805 | isaligned = 1; |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5887e41c0323..82e0dab46ee5 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -287,6 +287,7 @@ xfs_bmapi_allocate( | |||
287 | INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); | 287 | INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); |
288 | queue_work(xfs_alloc_wq, &args->work); | 288 | queue_work(xfs_alloc_wq, &args->work); |
289 | wait_for_completion(&done); | 289 | wait_for_completion(&done); |
290 | destroy_work_on_stack(&args->work); | ||
290 | return args->result; | 291 | return args->result; |
291 | } | 292 | } |
292 | 293 | ||
@@ -1187,7 +1188,12 @@ xfs_zero_remaining_bytes( | |||
1187 | XFS_BUF_UNWRITE(bp); | 1188 | XFS_BUF_UNWRITE(bp); |
1188 | XFS_BUF_READ(bp); | 1189 | XFS_BUF_READ(bp); |
1189 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); | 1190 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); |
1190 | xfsbdstrat(mp, bp); | 1191 | |
1192 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
1193 | error = XFS_ERROR(EIO); | ||
1194 | break; | ||
1195 | } | ||
1196 | xfs_buf_iorequest(bp); | ||
1191 | error = xfs_buf_iowait(bp); | 1197 | error = xfs_buf_iowait(bp); |
1192 | if (error) { | 1198 | if (error) { |
1193 | xfs_buf_ioerror_alert(bp, | 1199 | xfs_buf_ioerror_alert(bp, |
@@ -1200,7 +1206,12 @@ xfs_zero_remaining_bytes( | |||
1200 | XFS_BUF_UNDONE(bp); | 1206 | XFS_BUF_UNDONE(bp); |
1201 | XFS_BUF_UNREAD(bp); | 1207 | XFS_BUF_UNREAD(bp); |
1202 | XFS_BUF_WRITE(bp); | 1208 | XFS_BUF_WRITE(bp); |
1203 | xfsbdstrat(mp, bp); | 1209 | |
1210 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
1211 | error = XFS_ERROR(EIO); | ||
1212 | break; | ||
1213 | } | ||
1214 | xfs_buf_iorequest(bp); | ||
1204 | error = xfs_buf_iowait(bp); | 1215 | error = xfs_buf_iowait(bp); |
1205 | if (error) { | 1216 | if (error) { |
1206 | xfs_buf_ioerror_alert(bp, | 1217 | xfs_buf_ioerror_alert(bp, |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77dcb00..afe7645e4b2b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -698,7 +698,11 @@ xfs_buf_read_uncached( | |||
698 | bp->b_flags |= XBF_READ; | 698 | bp->b_flags |= XBF_READ; |
699 | bp->b_ops = ops; | 699 | bp->b_ops = ops; |
700 | 700 | ||
701 | xfsbdstrat(target->bt_mount, bp); | 701 | if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { |
702 | xfs_buf_relse(bp); | ||
703 | return NULL; | ||
704 | } | ||
705 | xfs_buf_iorequest(bp); | ||
702 | xfs_buf_iowait(bp); | 706 | xfs_buf_iowait(bp); |
703 | return bp; | 707 | return bp; |
704 | } | 708 | } |
@@ -1089,7 +1093,7 @@ xfs_bioerror( | |||
1089 | * This is meant for userdata errors; metadata bufs come with | 1093 | * This is meant for userdata errors; metadata bufs come with |
1090 | * iodone functions attached, so that we can track down errors. | 1094 | * iodone functions attached, so that we can track down errors. |
1091 | */ | 1095 | */ |
1092 | STATIC int | 1096 | int |
1093 | xfs_bioerror_relse( | 1097 | xfs_bioerror_relse( |
1094 | struct xfs_buf *bp) | 1098 | struct xfs_buf *bp) |
1095 | { | 1099 | { |
@@ -1152,7 +1156,7 @@ xfs_bwrite( | |||
1152 | ASSERT(xfs_buf_islocked(bp)); | 1156 | ASSERT(xfs_buf_islocked(bp)); |
1153 | 1157 | ||
1154 | bp->b_flags |= XBF_WRITE; | 1158 | bp->b_flags |= XBF_WRITE; |
1155 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); | 1159 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL); |
1156 | 1160 | ||
1157 | xfs_bdstrat_cb(bp); | 1161 | xfs_bdstrat_cb(bp); |
1158 | 1162 | ||
@@ -1164,25 +1168,6 @@ xfs_bwrite( | |||
1164 | return error; | 1168 | return error; |
1165 | } | 1169 | } |
1166 | 1170 | ||
1167 | /* | ||
1168 | * Wrapper around bdstrat so that we can stop data from going to disk in case | ||
1169 | * we are shutting down the filesystem. Typically user data goes thru this | ||
1170 | * path; one of the exceptions is the superblock. | ||
1171 | */ | ||
1172 | void | ||
1173 | xfsbdstrat( | ||
1174 | struct xfs_mount *mp, | ||
1175 | struct xfs_buf *bp) | ||
1176 | { | ||
1177 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
1178 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | ||
1179 | xfs_bioerror_relse(bp); | ||
1180 | return; | ||
1181 | } | ||
1182 | |||
1183 | xfs_buf_iorequest(bp); | ||
1184 | } | ||
1185 | |||
1186 | STATIC void | 1171 | STATIC void |
1187 | _xfs_buf_ioend( | 1172 | _xfs_buf_ioend( |
1188 | xfs_buf_t *bp, | 1173 | xfs_buf_t *bp, |
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg( | |||
1516 | struct xfs_buf *bp; | 1501 | struct xfs_buf *bp; |
1517 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); | 1502 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); |
1518 | list_del_init(&bp->b_lru); | 1503 | list_del_init(&bp->b_lru); |
1504 | if (bp->b_flags & XBF_WRITE_FAIL) { | ||
1505 | xfs_alert(btp->bt_mount, | ||
1506 | "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" | ||
1507 | "Please run xfs_repair to determine the extent of the problem.", | ||
1508 | (long long)bp->b_bn); | ||
1509 | } | ||
1519 | xfs_buf_rele(bp); | 1510 | xfs_buf_rele(bp); |
1520 | } | 1511 | } |
1521 | if (loop++ != 0) | 1512 | if (loop++ != 0) |
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit( | |||
1799 | 1790 | ||
1800 | blk_start_plug(&plug); | 1791 | blk_start_plug(&plug); |
1801 | list_for_each_entry_safe(bp, n, io_list, b_list) { | 1792 | list_for_each_entry_safe(bp, n, io_list, b_list) { |
1802 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); | 1793 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); |
1803 | bp->b_flags |= XBF_WRITE; | 1794 | bp->b_flags |= XBF_WRITE; |
1804 | 1795 | ||
1805 | if (!wait) { | 1796 | if (!wait) { |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index e65683361017..1cf21a4a9f22 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
@@ -45,6 +45,7 @@ typedef enum { | |||
45 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ | 45 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ |
46 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ | 46 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ |
47 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ | 47 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ |
48 | #define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ | ||
48 | 49 | ||
49 | /* I/O hints for the BIO layer */ | 50 | /* I/O hints for the BIO layer */ |
50 | #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ | 51 | #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ |
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t; | |||
70 | { XBF_ASYNC, "ASYNC" }, \ | 71 | { XBF_ASYNC, "ASYNC" }, \ |
71 | { XBF_DONE, "DONE" }, \ | 72 | { XBF_DONE, "DONE" }, \ |
72 | { XBF_STALE, "STALE" }, \ | 73 | { XBF_STALE, "STALE" }, \ |
74 | { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ | ||
73 | { XBF_SYNCIO, "SYNCIO" }, \ | 75 | { XBF_SYNCIO, "SYNCIO" }, \ |
74 | { XBF_FUA, "FUA" }, \ | 76 | { XBF_FUA, "FUA" }, \ |
75 | { XBF_FLUSH, "FLUSH" }, \ | 77 | { XBF_FLUSH, "FLUSH" }, \ |
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t; | |||
80 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | 82 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
81 | { _XBF_COMPOUND, "COMPOUND" } | 83 | { _XBF_COMPOUND, "COMPOUND" } |
82 | 84 | ||
85 | |||
83 | /* | 86 | /* |
84 | * Internal state flags. | 87 | * Internal state flags. |
85 | */ | 88 | */ |
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); | |||
269 | 272 | ||
270 | /* Buffer Read and Write Routines */ | 273 | /* Buffer Read and Write Routines */ |
271 | extern int xfs_bwrite(struct xfs_buf *bp); | 274 | extern int xfs_bwrite(struct xfs_buf *bp); |
272 | |||
273 | extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | ||
274 | |||
275 | extern void xfs_buf_ioend(xfs_buf_t *, int); | 275 | extern void xfs_buf_ioend(xfs_buf_t *, int); |
276 | extern void xfs_buf_ioerror(xfs_buf_t *, int); | 276 | extern void xfs_buf_ioerror(xfs_buf_t *, int); |
277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); | 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); |
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, | |||
282 | #define xfs_buf_zero(bp, off, len) \ | 282 | #define xfs_buf_zero(bp, off, len) \ |
283 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) | 283 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) |
284 | 284 | ||
285 | extern int xfs_bioerror_relse(struct xfs_buf *); | ||
286 | |||
285 | static inline int xfs_buf_geterror(xfs_buf_t *bp) | 287 | static inline int xfs_buf_geterror(xfs_buf_t *bp) |
286 | { | 288 | { |
287 | return bp ? bp->b_error : ENOMEM; | 289 | return bp ? bp->b_error : ENOMEM; |
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void); | |||
301 | 303 | ||
302 | #define XFS_BUF_ZEROFLAGS(bp) \ | 304 | #define XFS_BUF_ZEROFLAGS(bp) \ |
303 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ | 305 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ |
304 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) | 306 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \ |
307 | XBF_WRITE_FAIL)) | ||
305 | 308 | ||
306 | void xfs_buf_stale(struct xfs_buf *bp); | 309 | void xfs_buf_stale(struct xfs_buf *bp); |
307 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) | 310 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a64f67ba25d3..2227b9b050bb 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -496,6 +496,14 @@ xfs_buf_item_unpin( | |||
496 | } | 496 | } |
497 | } | 497 | } |
498 | 498 | ||
499 | /* | ||
500 | * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 | ||
501 | * seconds so as to not spam logs too much on repeated detection of the same | ||
502 | * buffer being bad.. | ||
503 | */ | ||
504 | |||
505 | DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); | ||
506 | |||
499 | STATIC uint | 507 | STATIC uint |
500 | xfs_buf_item_push( | 508 | xfs_buf_item_push( |
501 | struct xfs_log_item *lip, | 509 | struct xfs_log_item *lip, |
@@ -524,6 +532,14 @@ xfs_buf_item_push( | |||
524 | 532 | ||
525 | trace_xfs_buf_item_push(bip); | 533 | trace_xfs_buf_item_push(bip); |
526 | 534 | ||
535 | /* has a previous flush failed due to IO errors? */ | ||
536 | if ((bp->b_flags & XBF_WRITE_FAIL) && | ||
537 | ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { | ||
538 | xfs_warn(bp->b_target->bt_mount, | ||
539 | "Detected failing async write on buffer block 0x%llx. Retrying async write.\n", | ||
540 | (long long)bp->b_bn); | ||
541 | } | ||
542 | |||
527 | if (!xfs_buf_delwri_queue(bp, buffer_list)) | 543 | if (!xfs_buf_delwri_queue(bp, buffer_list)) |
528 | rval = XFS_ITEM_FLUSHING; | 544 | rval = XFS_ITEM_FLUSHING; |
529 | xfs_buf_unlock(bp); | 545 | xfs_buf_unlock(bp); |
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks( | |||
1096 | 1112 | ||
1097 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ | 1113 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ |
1098 | 1114 | ||
1099 | if (!XFS_BUF_ISSTALE(bp)) { | 1115 | if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { |
1100 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; | 1116 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | |
1117 | XBF_DONE | XBF_WRITE_FAIL; | ||
1101 | xfs_buf_iorequest(bp); | 1118 | xfs_buf_iorequest(bp); |
1102 | } else { | 1119 | } else { |
1103 | xfs_buf_relse(bp); | 1120 | xfs_buf_relse(bp); |
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 56369d4509d5..48c7d18f68c3 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c | |||
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup( | |||
2067 | */ | 2067 | */ |
2068 | int /* error */ | 2068 | int /* error */ |
2069 | xfs_dir2_node_removename( | 2069 | xfs_dir2_node_removename( |
2070 | xfs_da_args_t *args) /* operation arguments */ | 2070 | struct xfs_da_args *args) /* operation arguments */ |
2071 | { | 2071 | { |
2072 | xfs_da_state_blk_t *blk; /* leaf block */ | 2072 | struct xfs_da_state_blk *blk; /* leaf block */ |
2073 | int error; /* error return value */ | 2073 | int error; /* error return value */ |
2074 | int rval; /* operation return value */ | 2074 | int rval; /* operation return value */ |
2075 | xfs_da_state_t *state; /* btree cursor */ | 2075 | struct xfs_da_state *state; /* btree cursor */ |
2076 | 2076 | ||
2077 | trace_xfs_dir2_node_removename(args); | 2077 | trace_xfs_dir2_node_removename(args); |
2078 | 2078 | ||
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename( | |||
2084 | state->mp = args->dp->i_mount; | 2084 | state->mp = args->dp->i_mount; |
2085 | state->blocksize = state->mp->m_dirblksize; | 2085 | state->blocksize = state->mp->m_dirblksize; |
2086 | state->node_ents = state->mp->m_dir_node_ents; | 2086 | state->node_ents = state->mp->m_dir_node_ents; |
2087 | /* | 2087 | |
2088 | * Look up the entry we're deleting, set up the cursor. | 2088 | /* Look up the entry we're deleting, set up the cursor. */ |
2089 | */ | ||
2090 | error = xfs_da3_node_lookup_int(state, &rval); | 2089 | error = xfs_da3_node_lookup_int(state, &rval); |
2091 | if (error) | 2090 | if (error) |
2092 | rval = error; | 2091 | goto out_free; |
2093 | /* | 2092 | |
2094 | * Didn't find it, upper layer screwed up. | 2093 | /* Didn't find it, upper layer screwed up. */ |
2095 | */ | ||
2096 | if (rval != EEXIST) { | 2094 | if (rval != EEXIST) { |
2097 | xfs_da_state_free(state); | 2095 | error = rval; |
2098 | return rval; | 2096 | goto out_free; |
2099 | } | 2097 | } |
2098 | |||
2100 | blk = &state->path.blk[state->path.active - 1]; | 2099 | blk = &state->path.blk[state->path.active - 1]; |
2101 | ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); | 2100 | ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); |
2102 | ASSERT(state->extravalid); | 2101 | ASSERT(state->extravalid); |
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename( | |||
2107 | error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, | 2106 | error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, |
2108 | &state->extrablk, &rval); | 2107 | &state->extrablk, &rval); |
2109 | if (error) | 2108 | if (error) |
2110 | return error; | 2109 | goto out_free; |
2111 | /* | 2110 | /* |
2112 | * Fix the hash values up the btree. | 2111 | * Fix the hash values up the btree. |
2113 | */ | 2112 | */ |
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename( | |||
2122 | */ | 2121 | */ |
2123 | if (!error) | 2122 | if (!error) |
2124 | error = xfs_dir2_node_to_leaf(state); | 2123 | error = xfs_dir2_node_to_leaf(state); |
2124 | out_free: | ||
2125 | xfs_da_state_free(state); | 2125 | xfs_da_state_free(state); |
2126 | return error; | 2126 | return error; |
2127 | } | 2127 | } |
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 8367d6dc18c9..4f11ef011139 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c | |||
@@ -157,7 +157,7 @@ xfs_ioc_trim( | |||
157 | struct xfs_mount *mp, | 157 | struct xfs_mount *mp, |
158 | struct fstrim_range __user *urange) | 158 | struct fstrim_range __user *urange) |
159 | { | 159 | { |
160 | struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; | 160 | struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); |
161 | unsigned int granularity = q->limits.discard_granularity; | 161 | unsigned int granularity = q->limits.discard_granularity; |
162 | struct fstrim_range range; | 162 | struct fstrim_range range; |
163 | xfs_daddr_t start, end, minlen; | 163 | xfs_daddr_t start, end, minlen; |
@@ -180,7 +180,8 @@ xfs_ioc_trim( | |||
180 | * matter as trimming blocks is an advisory interface. | 180 | * matter as trimming blocks is an advisory interface. |
181 | */ | 181 | */ |
182 | if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || | 182 | if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || |
183 | range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) | 183 | range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) || |
184 | range.len < mp->m_sb.sb_blocksize) | ||
184 | return -XFS_ERROR(EINVAL); | 185 | return -XFS_ERROR(EINVAL); |
185 | 186 | ||
186 | start = BTOBB(range.start); | 187 | start = BTOBB(range.start); |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index a6e54b3319bd..02fb943cbf22 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -220,6 +220,8 @@ xfs_growfs_data_private( | |||
220 | */ | 220 | */ |
221 | nfree = 0; | 221 | nfree = 0; |
222 | for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { | 222 | for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { |
223 | __be32 *agfl_bno; | ||
224 | |||
223 | /* | 225 | /* |
224 | * AG freespace header block | 226 | * AG freespace header block |
225 | */ | 227 | */ |
@@ -279,8 +281,10 @@ xfs_growfs_data_private( | |||
279 | agfl->agfl_seqno = cpu_to_be32(agno); | 281 | agfl->agfl_seqno = cpu_to_be32(agno); |
280 | uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); | 282 | uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); |
281 | } | 283 | } |
284 | |||
285 | agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); | ||
282 | for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) | 286 | for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) |
283 | agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); | 287 | agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); |
284 | 288 | ||
285 | error = xfs_bwrite(bp); | 289 | error = xfs_bwrite(bp); |
286 | xfs_buf_relse(bp); | 290 | xfs_buf_relse(bp); |
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 4d613401a5e0..33ad9a77791f 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c | |||
@@ -442,7 +442,8 @@ xfs_attrlist_by_handle( | |||
442 | return -XFS_ERROR(EPERM); | 442 | return -XFS_ERROR(EPERM); |
443 | if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) | 443 | if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) |
444 | return -XFS_ERROR(EFAULT); | 444 | return -XFS_ERROR(EFAULT); |
445 | if (al_hreq.buflen > XATTR_LIST_MAX) | 445 | if (al_hreq.buflen < sizeof(struct attrlist) || |
446 | al_hreq.buflen > XATTR_LIST_MAX) | ||
446 | return -XFS_ERROR(EINVAL); | 447 | return -XFS_ERROR(EINVAL); |
447 | 448 | ||
448 | /* | 449 | /* |
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index e8fb1231db81..a7992f8de9d3 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c | |||
@@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle( | |||
356 | if (copy_from_user(&al_hreq, arg, | 356 | if (copy_from_user(&al_hreq, arg, |
357 | sizeof(compat_xfs_fsop_attrlist_handlereq_t))) | 357 | sizeof(compat_xfs_fsop_attrlist_handlereq_t))) |
358 | return -XFS_ERROR(EFAULT); | 358 | return -XFS_ERROR(EFAULT); |
359 | if (al_hreq.buflen > XATTR_LIST_MAX) | 359 | if (al_hreq.buflen < sizeof(struct attrlist) || |
360 | al_hreq.buflen > XATTR_LIST_MAX) | ||
360 | return -XFS_ERROR(EINVAL); | 361 | return -XFS_ERROR(EINVAL); |
361 | 362 | ||
362 | /* | 363 | /* |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 27e0e544e963..104455b8046c 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
@@ -618,7 +618,8 @@ xfs_setattr_nonsize( | |||
618 | } | 618 | } |
619 | if (!gid_eq(igid, gid)) { | 619 | if (!gid_eq(igid, gid)) { |
620 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { | 620 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { |
621 | ASSERT(!XFS_IS_PQUOTA_ON(mp)); | 621 | ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) || |
622 | !XFS_IS_PQUOTA_ON(mp)); | ||
622 | ASSERT(mask & ATTR_GID); | 623 | ASSERT(mask & ATTR_GID); |
623 | ASSERT(gdqp); | 624 | ASSERT(gdqp); |
624 | olddquot2 = xfs_qm_vop_chown(tp, ip, | 625 | olddquot2 = xfs_qm_vop_chown(tp, ip, |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b6b669df40f3..eae16920655b 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -193,7 +193,10 @@ xlog_bread_noalign( | |||
193 | bp->b_io_length = nbblks; | 193 | bp->b_io_length = nbblks; |
194 | bp->b_error = 0; | 194 | bp->b_error = 0; |
195 | 195 | ||
196 | xfsbdstrat(log->l_mp, bp); | 196 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) |
197 | return XFS_ERROR(EIO); | ||
198 | |||
199 | xfs_buf_iorequest(bp); | ||
197 | error = xfs_buf_iowait(bp); | 200 | error = xfs_buf_iowait(bp); |
198 | if (error) | 201 | if (error) |
199 | xfs_buf_ioerror_alert(bp, __func__); | 202 | xfs_buf_ioerror_alert(bp, __func__); |
@@ -4397,7 +4400,13 @@ xlog_do_recover( | |||
4397 | XFS_BUF_READ(bp); | 4400 | XFS_BUF_READ(bp); |
4398 | XFS_BUF_UNASYNC(bp); | 4401 | XFS_BUF_UNASYNC(bp); |
4399 | bp->b_ops = &xfs_sb_buf_ops; | 4402 | bp->b_ops = &xfs_sb_buf_ops; |
4400 | xfsbdstrat(log->l_mp, bp); | 4403 | |
4404 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { | ||
4405 | xfs_buf_relse(bp); | ||
4406 | return XFS_ERROR(EIO); | ||
4407 | } | ||
4408 | |||
4409 | xfs_buf_iorequest(bp); | ||
4401 | error = xfs_buf_iowait(bp); | 4410 | error = xfs_buf_iowait(bp); |
4402 | if (error) { | 4411 | if (error) { |
4403 | xfs_buf_ioerror_alert(bp, __func__); | 4412 | xfs_buf_ioerror_alert(bp, __func__); |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 14a4996cfec6..dd88f0e27bd8 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -134,8 +134,6 @@ xfs_qm_dqpurge( | |||
134 | { | 134 | { |
135 | struct xfs_mount *mp = dqp->q_mount; | 135 | struct xfs_mount *mp = dqp->q_mount; |
136 | struct xfs_quotainfo *qi = mp->m_quotainfo; | 136 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
137 | struct xfs_dquot *gdqp = NULL; | ||
138 | struct xfs_dquot *pdqp = NULL; | ||
139 | 137 | ||
140 | xfs_dqlock(dqp); | 138 | xfs_dqlock(dqp); |
141 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { | 139 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { |
@@ -143,21 +141,6 @@ xfs_qm_dqpurge( | |||
143 | return EAGAIN; | 141 | return EAGAIN; |
144 | } | 142 | } |
145 | 143 | ||
146 | /* | ||
147 | * If this quota has a hint attached, prepare for releasing it now. | ||
148 | */ | ||
149 | gdqp = dqp->q_gdquot; | ||
150 | if (gdqp) { | ||
151 | xfs_dqlock(gdqp); | ||
152 | dqp->q_gdquot = NULL; | ||
153 | } | ||
154 | |||
155 | pdqp = dqp->q_pdquot; | ||
156 | if (pdqp) { | ||
157 | xfs_dqlock(pdqp); | ||
158 | dqp->q_pdquot = NULL; | ||
159 | } | ||
160 | |||
161 | dqp->dq_flags |= XFS_DQ_FREEING; | 144 | dqp->dq_flags |= XFS_DQ_FREEING; |
162 | 145 | ||
163 | xfs_dqflock(dqp); | 146 | xfs_dqflock(dqp); |
@@ -206,11 +189,47 @@ xfs_qm_dqpurge( | |||
206 | XFS_STATS_DEC(xs_qm_dquot_unused); | 189 | XFS_STATS_DEC(xs_qm_dquot_unused); |
207 | 190 | ||
208 | xfs_qm_dqdestroy(dqp); | 191 | xfs_qm_dqdestroy(dqp); |
192 | return 0; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Release the group or project dquot pointers the user dquots maybe carrying | ||
197 | * around as a hint, and proceed to purge the user dquot cache if requested. | ||
198 | */ | ||
199 | STATIC int | ||
200 | xfs_qm_dqpurge_hints( | ||
201 | struct xfs_dquot *dqp, | ||
202 | void *data) | ||
203 | { | ||
204 | struct xfs_dquot *gdqp = NULL; | ||
205 | struct xfs_dquot *pdqp = NULL; | ||
206 | uint flags = *((uint *)data); | ||
207 | |||
208 | xfs_dqlock(dqp); | ||
209 | if (dqp->dq_flags & XFS_DQ_FREEING) { | ||
210 | xfs_dqunlock(dqp); | ||
211 | return EAGAIN; | ||
212 | } | ||
213 | |||
214 | /* If this quota has a hint attached, prepare for releasing it now */ | ||
215 | gdqp = dqp->q_gdquot; | ||
216 | if (gdqp) | ||
217 | dqp->q_gdquot = NULL; | ||
218 | |||
219 | pdqp = dqp->q_pdquot; | ||
220 | if (pdqp) | ||
221 | dqp->q_pdquot = NULL; | ||
222 | |||
223 | xfs_dqunlock(dqp); | ||
209 | 224 | ||
210 | if (gdqp) | 225 | if (gdqp) |
211 | xfs_qm_dqput(gdqp); | 226 | xfs_qm_dqrele(gdqp); |
212 | if (pdqp) | 227 | if (pdqp) |
213 | xfs_qm_dqput(pdqp); | 228 | xfs_qm_dqrele(pdqp); |
229 | |||
230 | if (flags & XFS_QMOPT_UQUOTA) | ||
231 | return xfs_qm_dqpurge(dqp, NULL); | ||
232 | |||
214 | return 0; | 233 | return 0; |
215 | } | 234 | } |
216 | 235 | ||
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all( | |||
222 | struct xfs_mount *mp, | 241 | struct xfs_mount *mp, |
223 | uint flags) | 242 | uint flags) |
224 | { | 243 | { |
225 | if (flags & XFS_QMOPT_UQUOTA) | 244 | /* |
226 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); | 245 | * We have to release group/project dquot hint(s) from the user dquot |
246 | * at first if they are there, otherwise we would run into an infinite | ||
247 | * loop while walking through radix tree to purge other type of dquots | ||
248 | * since their refcount is not zero if the user dquot refers to them | ||
249 | * as hint. | ||
250 | * | ||
251 | * Call the special xfs_qm_dqpurge_hints() will end up go through the | ||
252 | * general xfs_qm_dqpurge() against user dquot cache if requested. | ||
253 | */ | ||
254 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); | ||
255 | |||
227 | if (flags & XFS_QMOPT_GQUOTA) | 256 | if (flags & XFS_QMOPT_GQUOTA) |
228 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); | 257 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); |
229 | if (flags & XFS_QMOPT_PQUOTA) | 258 | if (flags & XFS_QMOPT_PQUOTA) |
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach( | |||
2082 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2111 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
2083 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 2112 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
2084 | 2113 | ||
2085 | if (udqp) { | 2114 | if (udqp && XFS_IS_UQUOTA_ON(mp)) { |
2086 | ASSERT(ip->i_udquot == NULL); | 2115 | ASSERT(ip->i_udquot == NULL); |
2087 | ASSERT(XFS_IS_UQUOTA_ON(mp)); | ||
2088 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); | 2116 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); |
2089 | 2117 | ||
2090 | ip->i_udquot = xfs_qm_dqhold(udqp); | 2118 | ip->i_udquot = xfs_qm_dqhold(udqp); |
2091 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); | 2119 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); |
2092 | } | 2120 | } |
2093 | if (gdqp) { | 2121 | if (gdqp && XFS_IS_GQUOTA_ON(mp)) { |
2094 | ASSERT(ip->i_gdquot == NULL); | 2122 | ASSERT(ip->i_gdquot == NULL); |
2095 | ASSERT(XFS_IS_GQUOTA_ON(mp)); | ||
2096 | ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); | 2123 | ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); |
2097 | ip->i_gdquot = xfs_qm_dqhold(gdqp); | 2124 | ip->i_gdquot = xfs_qm_dqhold(gdqp); |
2098 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); | 2125 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
2099 | } | 2126 | } |
2100 | if (pdqp) { | 2127 | if (pdqp && XFS_IS_PQUOTA_ON(mp)) { |
2101 | ASSERT(ip->i_pdquot == NULL); | 2128 | ASSERT(ip->i_pdquot == NULL); |
2102 | ASSERT(XFS_IS_PQUOTA_ON(mp)); | ||
2103 | ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); | 2129 | ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); |
2104 | 2130 | ||
2105 | ip->i_pdquot = xfs_qm_dqhold(pdqp); | 2131 | ip->i_pdquot = xfs_qm_dqhold(pdqp); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index c035d11b7734..647b6f1d8923 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map( | |||
314 | ASSERT(bp->b_iodone == NULL); | 314 | ASSERT(bp->b_iodone == NULL); |
315 | XFS_BUF_READ(bp); | 315 | XFS_BUF_READ(bp); |
316 | bp->b_ops = ops; | 316 | bp->b_ops = ops; |
317 | xfsbdstrat(tp->t_mountp, bp); | 317 | |
318 | /* | ||
319 | * XXX(hch): clean up the error handling here to be less | ||
320 | * of a mess.. | ||
321 | */ | ||
322 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
323 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | ||
324 | xfs_bioerror_relse(bp); | ||
325 | } else { | ||
326 | xfs_buf_iorequest(bp); | ||
327 | } | ||
328 | |||
318 | error = xfs_buf_iowait(bp); | 329 | error = xfs_buf_iowait(bp); |
319 | if (error) { | 330 | if (error) { |
320 | xfs_buf_ioerror_alert(bp, __func__); | 331 | xfs_buf_ioerror_alert(bp, __func__); |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c602c7718421..ddabed1f51c2 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -169,7 +169,8 @@ struct acpi_device_flags { | |||
169 | u32 ejectable:1; | 169 | u32 ejectable:1; |
170 | u32 power_manageable:1; | 170 | u32 power_manageable:1; |
171 | u32 match_driver:1; | 171 | u32 match_driver:1; |
172 | u32 reserved:27; | 172 | u32 no_hotplug:1; |
173 | u32 reserved:26; | ||
173 | }; | 174 | }; |
174 | 175 | ||
175 | /* File System */ | 176 | /* File System */ |
@@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj; | |||
344 | extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); | 345 | extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); |
345 | void acpi_bus_private_data_handler(acpi_handle, void *); | 346 | void acpi_bus_private_data_handler(acpi_handle, void *); |
346 | int acpi_bus_get_private_data(acpi_handle, void **); | 347 | int acpi_bus_get_private_data(acpi_handle, void **); |
348 | void acpi_bus_no_hotplug(acpi_handle handle); | ||
347 | extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); | 349 | extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); |
348 | extern int register_acpi_notifier(struct notifier_block *); | 350 | extern int register_acpi_notifier(struct notifier_block *); |
349 | extern int unregister_acpi_notifier(struct notifier_block *); | 351 | extern int unregister_acpi_notifier(struct notifier_block *); |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0e..db0923458940 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |||
217 | #endif | 217 | #endif |
218 | 218 | ||
219 | #ifndef pte_accessible | 219 | #ifndef pte_accessible |
220 | # define pte_accessible(pte) ((void)(pte),1) | 220 | # define pte_accessible(mm, pte) ((void)(pte), 1) |
221 | #endif | 221 | #endif |
222 | 222 | ||
223 | #ifndef flush_tlb_fix_spurious_fault | 223 | #ifndef flush_tlb_fix_spurious_fault |
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) | |||
599 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 599 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
600 | barrier(); | 600 | barrier(); |
601 | #endif | 601 | #endif |
602 | if (pmd_none(pmdval)) | 602 | if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) |
603 | return 1; | 603 | return 1; |
604 | if (unlikely(pmd_bad(pmdval))) { | 604 | if (unlikely(pmd_bad(pmdval))) { |
605 | if (!pmd_trans_huge(pmdval)) | 605 | pmd_clear_bad(pmd); |
606 | pmd_clear_bad(pmd); | ||
607 | return 1; | 606 | return 1; |
608 | } | 607 | } |
609 | return 0; | 608 | return 0; |
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8f..1cd3f5d767a8 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h | |||
@@ -3,13 +3,11 @@ | |||
3 | 3 | ||
4 | #include <linux/thread_info.h> | 4 | #include <linux/thread_info.h> |
5 | 5 | ||
6 | /* | 6 | #define PREEMPT_ENABLED (0) |
7 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | 7 | |
8 | * that think a non-zero value indicates we cannot preempt. | ||
9 | */ | ||
10 | static __always_inline int preempt_count(void) | 8 | static __always_inline int preempt_count(void) |
11 | { | 9 | { |
12 | return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; | 10 | return current_thread_info()->preempt_count; |
13 | } | 11 | } |
14 | 12 | ||
15 | static __always_inline int *preempt_count_ptr(void) | 13 | static __always_inline int *preempt_count_ptr(void) |
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) | |||
17 | return ¤t_thread_info()->preempt_count; | 15 | return ¤t_thread_info()->preempt_count; |
18 | } | 16 | } |
19 | 17 | ||
20 | /* | ||
21 | * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the | ||
22 | * alternative is loosing a reschedule. Better schedule too often -- also this | ||
23 | * should be a very rare operation. | ||
24 | */ | ||
25 | static __always_inline void preempt_count_set(int pc) | 18 | static __always_inline void preempt_count_set(int pc) |
26 | { | 19 | { |
27 | *preempt_count_ptr() = pc; | 20 | *preempt_count_ptr() = pc; |
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) | |||
41 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ | 34 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ |
42 | } while (0) | 35 | } while (0) |
43 | 36 | ||
44 | /* | ||
45 | * We fold the NEED_RESCHED bit into the preempt count such that | ||
46 | * preempt_enable() can decrement and test for needing to reschedule with a | ||
47 | * single instruction. | ||
48 | * | ||
49 | * We invert the actual bit, so that when the decrement hits 0 we know we both | ||
50 | * need to resched (the bit is cleared) and can resched (no preempt count). | ||
51 | */ | ||
52 | |||
53 | static __always_inline void set_preempt_need_resched(void) | 37 | static __always_inline void set_preempt_need_resched(void) |
54 | { | 38 | { |
55 | *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; | ||
56 | } | 39 | } |
57 | 40 | ||
58 | static __always_inline void clear_preempt_need_resched(void) | 41 | static __always_inline void clear_preempt_need_resched(void) |
59 | { | 42 | { |
60 | *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; | ||
61 | } | 43 | } |
62 | 44 | ||
63 | static __always_inline bool test_preempt_need_resched(void) | 45 | static __always_inline bool test_preempt_need_resched(void) |
64 | { | 46 | { |
65 | return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); | 47 | return false; |
66 | } | 48 | } |
67 | 49 | ||
68 | /* | 50 | /* |
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) | |||
81 | 63 | ||
82 | static __always_inline bool __preempt_count_dec_and_test(void) | 64 | static __always_inline bool __preempt_count_dec_and_test(void) |
83 | { | 65 | { |
84 | return !--*preempt_count_ptr(); | 66 | /* |
67 | * Because of load-store architectures cannot do per-cpu atomic | ||
68 | * operations; we cannot use PREEMPT_NEED_RESCHED because it might get | ||
69 | * lost. | ||
70 | */ | ||
71 | return !--*preempt_count_ptr() && tif_need_resched(); | ||
85 | } | 72 | } |
86 | 73 | ||
87 | /* | 74 | /* |
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) | |||
89 | */ | 76 | */ |
90 | static __always_inline bool should_resched(void) | 77 | static __always_inline bool should_resched(void) |
91 | { | 78 | { |
92 | return unlikely(!*preempt_count_ptr()); | 79 | return unlikely(!preempt_count() && tif_need_resched()); |
93 | } | 80 | } |
94 | 81 | ||
95 | #ifdef CONFIG_PREEMPT | 82 | #ifdef CONFIG_PREEMPT |
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 3f21f1b72e45..d3909effd725 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h | |||
@@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct | |||
49 | return (val + c->high_bits) & ~rhs; | 49 | return (val + c->high_bits) & ~rhs; |
50 | } | 50 | } |
51 | 51 | ||
52 | #ifndef zero_bytemask | ||
53 | #ifdef CONFIG_64BIT | ||
54 | #define zero_bytemask(mask) (~0ul << fls64(mask)) | ||
55 | #else | ||
56 | #define zero_bytemask(mask) (~0ul << fls(mask)) | ||
57 | #endif /* CONFIG_64BIT */ | ||
58 | #endif /* zero_bytemask */ | ||
59 | |||
52 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 60 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 64ebede184f1..6a626a507b8c 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
@@ -44,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) | |||
44 | if (sg_is_last(sg)) | 44 | if (sg_is_last(sg)) |
45 | return NULL; | 45 | return NULL; |
46 | 46 | ||
47 | return (++sg)->length ? sg : (void *)sg_page(sg); | 47 | return (++sg)->length ? sg : sg_chain_ptr(sg); |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline void scatterwalk_crypto_chain(struct scatterlist *head, | 50 | static inline void scatterwalk_crypto_chain(struct scatterlist *head, |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 87578c109e48..49376aec2fbb 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -600,7 +600,7 @@ | |||
600 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 600 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
601 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 601 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
602 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 602 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
603 | {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 603 | {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
604 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 604 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
605 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 605 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
606 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 606 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h index 9a193b84238a..a89df3be1686 100644 --- a/include/linux/assoc_array.h +++ b/include/linux/assoc_array.h | |||
@@ -41,10 +41,10 @@ struct assoc_array_ops { | |||
41 | /* Is this the object we're looking for? */ | 41 | /* Is this the object we're looking for? */ |
42 | bool (*compare_object)(const void *object, const void *index_key); | 42 | bool (*compare_object)(const void *object, const void *index_key); |
43 | 43 | ||
44 | /* How different are two objects, to a bit position in their keys? (or | 44 | /* How different is an object from an index key, to a bit position in |
45 | * -1 if they're the same) | 45 | * their keys? (or -1 if they're the same) |
46 | */ | 46 | */ |
47 | int (*diff_objects)(const void *a, const void *b); | 47 | int (*diff_objects)(const void *object, const void *index_key); |
48 | 48 | ||
49 | /* Method to free an object. */ | 49 | /* Method to free an object. */ |
50 | void (*free_object)(void *object); | 50 | void (*free_object)(void *object); |
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 669fef5c745a..3e0fbe441763 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h | |||
@@ -3,6 +3,6 @@ | |||
3 | 3 | ||
4 | #include <uapi/linux/auxvec.h> | 4 | #include <uapi/linux/auxvec.h> |
5 | 5 | ||
6 | #define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ | 6 | #define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ |
7 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ | 7 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ |
8 | #endif /* _LINUX_AUXVEC_H */ | 8 | #endif /* _LINUX_AUXVEC_H */ |
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 973ce10c40b6..dc1bd3dcf11f 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
@@ -28,8 +28,6 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #define uninitialized_var(x) x | ||
32 | |||
33 | #ifndef __HAVE_BUILTIN_BSWAP16__ | 31 | #ifndef __HAVE_BUILTIN_BSWAP16__ |
34 | /* icc has this, but it's called _bswap16 */ | 32 | /* icc has this, but it's called _bswap16 */ |
35 | #define __HAVE_BUILTIN_BSWAP16__ | 33 | #define __HAVE_BUILTIN_BSWAP16__ |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ee5fe9d77ae8..dc196bbcf227 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -280,14 +280,6 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) | |||
280 | policy->cpuinfo.max_freq); | 280 | policy->cpuinfo.max_freq); |
281 | } | 281 | } |
282 | 282 | ||
283 | #ifdef CONFIG_CPU_FREQ | ||
284 | void cpufreq_suspend(void); | ||
285 | void cpufreq_resume(void); | ||
286 | #else | ||
287 | static inline void cpufreq_suspend(void) {} | ||
288 | static inline void cpufreq_resume(void) {} | ||
289 | #endif | ||
290 | |||
291 | /********************************************************************* | 283 | /********************************************************************* |
292 | * CPUFREQ NOTIFIER INTERFACE * | 284 | * CPUFREQ NOTIFIER INTERFACE * |
293 | *********************************************************************/ | 285 | *********************************************************************/ |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 57e87e749a48..bf72e9ac6de0 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -29,8 +29,10 @@ struct vfsmount; | |||
29 | /* The hash is always the low bits of hash_len */ | 29 | /* The hash is always the low bits of hash_len */ |
30 | #ifdef __LITTLE_ENDIAN | 30 | #ifdef __LITTLE_ENDIAN |
31 | #define HASH_LEN_DECLARE u32 hash; u32 len; | 31 | #define HASH_LEN_DECLARE u32 hash; u32 len; |
32 | #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) | ||
32 | #else | 33 | #else |
33 | #define HASH_LEN_DECLARE u32 len; u32 hash; | 34 | #define HASH_LEN_DECLARE u32 len; u32 hash; |
35 | #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) | ||
34 | #endif | 36 | #endif |
35 | 37 | ||
36 | /* | 38 | /* |
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 206a2af6b62b..b914ca3f57ba 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h | |||
@@ -42,6 +42,8 @@ struct hid_sensor_hub_attribute_info { | |||
42 | s32 units; | 42 | s32 units; |
43 | s32 unit_expo; | 43 | s32 unit_expo; |
44 | s32 size; | 44 | s32 size; |
45 | s32 logical_minimum; | ||
46 | s32 logical_maximum; | ||
45 | }; | 47 | }; |
46 | 48 | ||
47 | /** | 49 | /** |
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 4f945d3ed49f..8323775ac21d 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h | |||
@@ -117,4 +117,16 @@ | |||
117 | #define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 | 117 | #define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 |
118 | #define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 | 118 | #define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 |
119 | 119 | ||
120 | /* Power state enumerations */ | ||
121 | #define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00 | ||
122 | #define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01 | ||
123 | #define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02 | ||
124 | #define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03 | ||
125 | #define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04 | ||
126 | #define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05 | ||
127 | |||
128 | /* Report State enumerations */ | ||
129 | #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00 | ||
130 | #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01 | ||
131 | |||
120 | #endif | 132 | #endif |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 9649ff0c63f8..bd7e98752222 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -142,7 +142,10 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) | |||
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | #define isolate_huge_page(p, l) false | 145 | static inline bool isolate_huge_page(struct page *page, struct list_head *list) |
146 | { | ||
147 | return false; | ||
148 | } | ||
146 | #define putback_active_hugepage(p) do {} while (0) | 149 | #define putback_active_hugepage(p) do {} while (0) |
147 | #define is_hugepage_active(x) false | 150 | #define is_hugepage_active(x) false |
148 | 151 | ||
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 5d89d1b808a6..c56c350324e4 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <uapi/linux/ipv6.h> | 4 | #include <uapi/linux/ipv6.h> |
5 | 5 | ||
6 | #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) | 6 | #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) |
7 | #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) | ||
7 | /* | 8 | /* |
8 | * This structure contains configuration options per IPv6 link. | 9 | * This structure contains configuration options per IPv6 link. |
9 | */ | 10 | */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d4e98d13eff4..ecb87544cc5d 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -193,7 +193,8 @@ extern int _cond_resched(void); | |||
193 | (__x < 0) ? -__x : __x; \ | 193 | (__x < 0) ? -__x : __x; \ |
194 | }) | 194 | }) |
195 | 195 | ||
196 | #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) | 196 | #if defined(CONFIG_MMU) && \ |
197 | (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) | ||
197 | void might_fault(void); | 198 | void might_fault(void); |
198 | #else | 199 | #else |
199 | static inline void might_fault(void) { } | 200 | static inline void might_fault(void) { } |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d78d28a733b1..5fd33dc1fe3a 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |||
198 | extern size_t vmcoreinfo_size; | 198 | extern size_t vmcoreinfo_size; |
199 | extern size_t vmcoreinfo_max_size; | 199 | extern size_t vmcoreinfo_max_size; |
200 | 200 | ||
201 | /* flag to track if kexec reboot is in progress */ | ||
202 | extern bool kexec_in_progress; | ||
203 | |||
201 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, | 204 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, |
202 | unsigned long long *crash_size, unsigned long long *crash_base); | 205 | unsigned long long *crash_size, unsigned long long *crash_base); |
203 | int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, | 206 | int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 0e23c26485f4..9b503376738f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -418,6 +418,7 @@ enum { | |||
418 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ | 418 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ |
419 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ | 419 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ |
420 | ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ | 420 | ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ |
421 | ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ | ||
421 | 422 | ||
422 | /* DMA mask for user DMA control: User visible values; DO NOT | 423 | /* DMA mask for user DMA control: User visible values; DO NOT |
423 | renumber */ | 424 | renumber */ |
diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c8929c3832db..4bfde0e99ed5 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | #define USE_CMPXCHG_LOCKREF \ | 20 | #define USE_CMPXCHG_LOCKREF \ |
21 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ | 21 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ |
22 | IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) | 22 | IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) |
23 | 23 | ||
24 | struct lockref { | 24 | struct lockref { |
25 | union { | 25 | union { |
diff --git a/include/linux/math64.h b/include/linux/math64.h index 69ed5f5e9f6e..c45c089bfdac 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h | |||
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | |||
133 | return ret; | 133 | return ret; |
134 | } | 134 | } |
135 | 135 | ||
136 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) | ||
137 | |||
138 | #ifndef mul_u64_u32_shr | ||
139 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | ||
140 | { | ||
141 | return (u64)(((unsigned __int128)a * mul) >> shift); | ||
142 | } | ||
143 | #endif /* mul_u64_u32_shr */ | ||
144 | |||
145 | #else | ||
146 | |||
147 | #ifndef mul_u64_u32_shr | ||
148 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | ||
149 | { | ||
150 | u32 ah, al; | ||
151 | u64 ret; | ||
152 | |||
153 | al = a; | ||
154 | ah = a >> 32; | ||
155 | |||
156 | ret = ((u64)al * mul) >> shift; | ||
157 | if (ah) | ||
158 | ret += ((u64)ah * mul) << (32 - shift); | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | #endif /* mul_u64_u32_shr */ | ||
163 | |||
164 | #endif | ||
165 | |||
136 | #endif /* _LINUX_MATH64_H */ | 166 | #endif /* _LINUX_MATH64_H */ |
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 2d0c9071bcfb..cab2dd279076 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h | |||
@@ -39,7 +39,8 @@ enum sec_device_type { | |||
39 | struct sec_pmic_dev { | 39 | struct sec_pmic_dev { |
40 | struct device *dev; | 40 | struct device *dev; |
41 | struct sec_platform_data *pdata; | 41 | struct sec_platform_data *pdata; |
42 | struct regmap *regmap; | 42 | struct regmap *regmap_pmic; |
43 | struct regmap *regmap_rtc; | ||
43 | struct i2c_client *i2c; | 44 | struct i2c_client *i2c; |
44 | struct i2c_client *rtc; | 45 | struct i2c_client *rtc; |
45 | 46 | ||
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index ad05ce60c1c9..2e5b194b9b19 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h | |||
@@ -22,6 +22,8 @@ | |||
22 | #define PHY_ID_KSZ8021 0x00221555 | 22 | #define PHY_ID_KSZ8021 0x00221555 |
23 | #define PHY_ID_KSZ8031 0x00221556 | 23 | #define PHY_ID_KSZ8031 0x00221556 |
24 | #define PHY_ID_KSZ8041 0x00221510 | 24 | #define PHY_ID_KSZ8041 0x00221510 |
25 | /* undocumented */ | ||
26 | #define PHY_ID_KSZ8041RNLI 0x00221537 | ||
25 | #define PHY_ID_KSZ8051 0x00221550 | 27 | #define PHY_ID_KSZ8051 0x00221550 |
26 | /* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ | 28 | /* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ |
27 | #define PHY_ID_KSZ8001 0x0022161A | 29 | #define PHY_ID_KSZ8001 0x0022161A |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b58b20d..f015c059e159 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
55 | struct page *newpage, struct page *page); | 55 | struct page *newpage, struct page *page); |
56 | extern int migrate_page_move_mapping(struct address_space *mapping, | 56 | extern int migrate_page_move_mapping(struct address_space *mapping, |
57 | struct page *newpage, struct page *page, | 57 | struct page *newpage, struct page *page, |
58 | struct buffer_head *head, enum migrate_mode mode); | 58 | struct buffer_head *head, enum migrate_mode mode, |
59 | int extra_count); | ||
59 | #else | 60 | #else |
60 | 61 | ||
61 | static inline void putback_lru_pages(struct list_head *l) {} | 62 | static inline void putback_lru_pages(struct list_head *l) {} |
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
90 | #endif /* CONFIG_MIGRATION */ | 91 | #endif /* CONFIG_MIGRATION */ |
91 | 92 | ||
92 | #ifdef CONFIG_NUMA_BALANCING | 93 | #ifdef CONFIG_NUMA_BALANCING |
94 | extern bool pmd_trans_migrating(pmd_t pmd); | ||
95 | extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); | ||
93 | extern int migrate_misplaced_page(struct page *page, | 96 | extern int migrate_misplaced_page(struct page *page, |
94 | struct vm_area_struct *vma, int node); | 97 | struct vm_area_struct *vma, int node); |
95 | extern bool migrate_ratelimited(int node); | 98 | extern bool migrate_ratelimited(int node); |
96 | #else | 99 | #else |
100 | static inline bool pmd_trans_migrating(pmd_t pmd) | ||
101 | { | ||
102 | return false; | ||
103 | } | ||
104 | static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) | ||
105 | { | ||
106 | } | ||
97 | static inline int migrate_misplaced_page(struct page *page, | 107 | static inline int migrate_misplaced_page(struct page *page, |
98 | struct vm_area_struct *vma, int node) | 108 | struct vm_area_struct *vma, int node) |
99 | { | 109 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..35527173cf50 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a | |||
1317 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ | 1317 | #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ |
1318 | 1318 | ||
1319 | #if USE_SPLIT_PTE_PTLOCKS | 1319 | #if USE_SPLIT_PTE_PTLOCKS |
1320 | #if BLOATED_SPINLOCKS | 1320 | #if ALLOC_SPLIT_PTLOCKS |
1321 | extern bool ptlock_alloc(struct page *page); | 1321 | extern bool ptlock_alloc(struct page *page); |
1322 | extern void ptlock_free(struct page *page); | 1322 | extern void ptlock_free(struct page *page); |
1323 | 1323 | ||
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) | |||
1325 | { | 1325 | { |
1326 | return page->ptl; | 1326 | return page->ptl; |
1327 | } | 1327 | } |
1328 | #else /* BLOATED_SPINLOCKS */ | 1328 | #else /* ALLOC_SPLIT_PTLOCKS */ |
1329 | static inline bool ptlock_alloc(struct page *page) | 1329 | static inline bool ptlock_alloc(struct page *page) |
1330 | { | 1330 | { |
1331 | return true; | 1331 | return true; |
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) | |||
1339 | { | 1339 | { |
1340 | return &page->ptl; | 1340 | return &page->ptl; |
1341 | } | 1341 | } |
1342 | #endif /* BLOATED_SPINLOCKS */ | 1342 | #endif /* ALLOC_SPLIT_PTLOCKS */ |
1343 | 1343 | ||
1344 | static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) | 1344 | static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) |
1345 | { | 1345 | { |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd299418a934..290901a8c1de 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -26,6 +26,7 @@ struct address_space; | |||
26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) | 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ | 27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ |
28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) | 28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) |
29 | #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * Each physical page in the system has a struct page associated with | 32 | * Each physical page in the system has a struct page associated with |
@@ -155,7 +156,7 @@ struct page { | |||
155 | * system if PG_buddy is set. | 156 | * system if PG_buddy is set. |
156 | */ | 157 | */ |
157 | #if USE_SPLIT_PTE_PTLOCKS | 158 | #if USE_SPLIT_PTE_PTLOCKS |
158 | #if BLOATED_SPINLOCKS | 159 | #if ALLOC_SPLIT_PTLOCKS |
159 | spinlock_t *ptl; | 160 | spinlock_t *ptl; |
160 | #else | 161 | #else |
161 | spinlock_t ptl; | 162 | spinlock_t ptl; |
@@ -443,6 +444,14 @@ struct mm_struct { | |||
443 | /* numa_scan_seq prevents two threads setting pte_numa */ | 444 | /* numa_scan_seq prevents two threads setting pte_numa */ |
444 | int numa_scan_seq; | 445 | int numa_scan_seq; |
445 | #endif | 446 | #endif |
447 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | ||
448 | /* | ||
449 | * An operation with batched TLB flushing is going on. Anything that | ||
450 | * can move process memory needs to flush the TLB when moving a | ||
451 | * PROT_NONE or PROT_NUMA mapped page. | ||
452 | */ | ||
453 | bool tlb_flush_pending; | ||
454 | #endif | ||
446 | struct uprobes_state uprobes_state; | 455 | struct uprobes_state uprobes_state; |
447 | }; | 456 | }; |
448 | 457 | ||
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) | |||
459 | return mm->cpu_vm_mask_var; | 468 | return mm->cpu_vm_mask_var; |
460 | } | 469 | } |
461 | 470 | ||
471 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | ||
472 | /* | ||
473 | * Memory barriers to keep this state in sync are graciously provided by | ||
474 | * the page table locks, outside of which no page table modifications happen. | ||
475 | * The barriers below prevent the compiler from re-ordering the instructions | ||
476 | * around the memory barriers that are already present in the code. | ||
477 | */ | ||
478 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | ||
479 | { | ||
480 | barrier(); | ||
481 | return mm->tlb_flush_pending; | ||
482 | } | ||
483 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | ||
484 | { | ||
485 | mm->tlb_flush_pending = true; | ||
486 | |||
487 | /* | ||
488 | * Guarantee that the tlb_flush_pending store does not leak into the | ||
489 | * critical section updating the page tables | ||
490 | */ | ||
491 | smp_mb__before_spinlock(); | ||
492 | } | ||
493 | /* Clearing is done after a TLB flush, which also provides a barrier. */ | ||
494 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | ||
495 | { | ||
496 | barrier(); | ||
497 | mm->tlb_flush_pending = false; | ||
498 | } | ||
499 | #else | ||
500 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | ||
501 | { | ||
502 | return false; | ||
503 | } | ||
504 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | ||
505 | { | ||
506 | } | ||
507 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | ||
508 | { | ||
509 | } | ||
510 | #endif | ||
511 | |||
462 | #endif /* _LINUX_MM_TYPES_H */ | 512 | #endif /* _LINUX_MM_TYPES_H */ |
diff --git a/include/linux/net.h b/include/linux/net.h index 4bcee94cef93..69be3e6079c8 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -181,7 +181,7 @@ struct proto_ops { | |||
181 | int offset, size_t size, int flags); | 181 | int offset, size_t size, int flags); |
182 | ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, | 182 | ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, |
183 | struct pipe_inode_info *pipe, size_t len, unsigned int flags); | 183 | struct pipe_inode_info *pipe, size_t len, unsigned int flags); |
184 | void (*set_peek_off)(struct sock *sk, int val); | 184 | int (*set_peek_off)(struct sock *sk, int val); |
185 | }; | 185 | }; |
186 | 186 | ||
187 | #define DECLARE_SOCKADDR(type, dst, src) \ | 187 | #define DECLARE_SOCKADDR(type, dst, src) \ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7f0ed423a360..ce2a1f5f9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -769,7 +769,8 @@ struct netdev_phys_port_id { | |||
769 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | 769 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) |
770 | * Required can not be NULL. | 770 | * Required can not be NULL. |
771 | * | 771 | * |
772 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | 772 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
773 | * void *accel_priv); | ||
773 | * Called to decide which queue to when device supports multiple | 774 | * Called to decide which queue to when device supports multiple |
774 | * transmit queues. | 775 | * transmit queues. |
775 | * | 776 | * |
@@ -990,7 +991,8 @@ struct net_device_ops { | |||
990 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, | 991 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
991 | struct net_device *dev); | 992 | struct net_device *dev); |
992 | u16 (*ndo_select_queue)(struct net_device *dev, | 993 | u16 (*ndo_select_queue)(struct net_device *dev, |
993 | struct sk_buff *skb); | 994 | struct sk_buff *skb, |
995 | void *accel_priv); | ||
994 | void (*ndo_change_rx_flags)(struct net_device *dev, | 996 | void (*ndo_change_rx_flags)(struct net_device *dev, |
995 | int flags); | 997 | int flags); |
996 | void (*ndo_set_rx_mode)(struct net_device *dev); | 998 | void (*ndo_set_rx_mode)(struct net_device *dev); |
@@ -1255,7 +1257,7 @@ struct net_device { | |||
1255 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 1257 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
1256 | unsigned char addr_assign_type; /* hw address assignment type */ | 1258 | unsigned char addr_assign_type; /* hw address assignment type */ |
1257 | unsigned char addr_len; /* hardware address length */ | 1259 | unsigned char addr_len; /* hardware address length */ |
1258 | unsigned char neigh_priv_len; | 1260 | unsigned short neigh_priv_len; |
1259 | unsigned short dev_id; /* Used to differentiate devices | 1261 | unsigned short dev_id; /* Used to differentiate devices |
1260 | * that share the same link | 1262 | * that share the same link |
1261 | * layer address | 1263 | * layer address |
@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
1529 | } | 1531 | } |
1530 | 1532 | ||
1531 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 1533 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
1532 | struct sk_buff *skb); | 1534 | struct sk_buff *skb, |
1535 | void *accel_priv); | ||
1533 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); | 1536 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); |
1534 | 1537 | ||
1535 | /* | 1538 | /* |
@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev); | |||
1819 | void dev_disable_lro(struct net_device *dev); | 1822 | void dev_disable_lro(struct net_device *dev); |
1820 | int dev_loopback_xmit(struct sk_buff *newskb); | 1823 | int dev_loopback_xmit(struct sk_buff *newskb); |
1821 | int dev_queue_xmit(struct sk_buff *skb); | 1824 | int dev_queue_xmit(struct sk_buff *skb); |
1825 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); | ||
1822 | int register_netdevice(struct net_device *dev); | 1826 | int register_netdevice(struct net_device *dev); |
1823 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | 1827 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
1824 | void unregister_netdevice_many(struct list_head *head); | 1828 | void unregister_netdevice_many(struct list_head *head); |
@@ -1912,6 +1916,15 @@ static inline int dev_parse_header(const struct sk_buff *skb, | |||
1912 | return dev->header_ops->parse(skb, haddr); | 1916 | return dev->header_ops->parse(skb, haddr); |
1913 | } | 1917 | } |
1914 | 1918 | ||
1919 | static inline int dev_rebuild_header(struct sk_buff *skb) | ||
1920 | { | ||
1921 | const struct net_device *dev = skb->dev; | ||
1922 | |||
1923 | if (!dev->header_ops || !dev->header_ops->rebuild) | ||
1924 | return 0; | ||
1925 | return dev->header_ops->rebuild(skb); | ||
1926 | } | ||
1927 | |||
1915 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 1928 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
1916 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); | 1929 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
1917 | static inline int unregister_gifconf(unsigned int family) | 1930 | static inline int unregister_gifconf(unsigned int family) |
@@ -2417,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier); | |||
2417 | int dev_get_phys_port_id(struct net_device *dev, | 2430 | int dev_get_phys_port_id(struct net_device *dev, |
2418 | struct netdev_phys_port_id *ppid); | 2431 | struct netdev_phys_port_id *ppid); |
2419 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2432 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2420 | struct netdev_queue *txq, void *accel_priv); | 2433 | struct netdev_queue *txq); |
2421 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 2434 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
2422 | 2435 | ||
2423 | extern int netdev_budget; | 2436 | extern int netdev_budget; |
@@ -3008,6 +3021,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev, | |||
3008 | dev->gso_max_size = size; | 3021 | dev->gso_max_size = size; |
3009 | } | 3022 | } |
3010 | 3023 | ||
3024 | static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, | ||
3025 | int pulled_hlen, u16 mac_offset, | ||
3026 | int mac_len) | ||
3027 | { | ||
3028 | skb->protocol = protocol; | ||
3029 | skb->encapsulation = 1; | ||
3030 | skb_push(skb, pulled_hlen); | ||
3031 | skb_reset_transport_header(skb); | ||
3032 | skb->mac_header = mac_offset; | ||
3033 | skb->network_header = skb->mac_header + mac_len; | ||
3034 | skb->mac_len = mac_len; | ||
3035 | } | ||
3036 | |||
3011 | static inline bool netif_is_macvlan(struct net_device *dev) | 3037 | static inline bool netif_is_macvlan(struct net_device *dev) |
3012 | { | 3038 | { |
3013 | return dev->priv_flags & IFF_MACVLAN; | 3039 | return dev->priv_flags & IFF_MACVLAN; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 1084a15175e0..a13d6825e586 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -960,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno); | |||
960 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 960 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
961 | int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); | 961 | int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); |
962 | int pci_select_bars(struct pci_dev *dev, unsigned long flags); | 962 | int pci_select_bars(struct pci_dev *dev, unsigned long flags); |
963 | bool pci_device_is_present(struct pci_dev *pdev); | ||
963 | 964 | ||
964 | /* ROM control related routines */ | 965 | /* ROM control related routines */ |
965 | int pci_enable_rom(struct pci_dev *pdev); | 966 | int pci_enable_rom(struct pci_dev *pdev); |
@@ -1567,65 +1568,65 @@ enum pci_fixup_pass { | |||
1567 | /* Anonymous variables would be nice... */ | 1568 | /* Anonymous variables would be nice... */ |
1568 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ | 1569 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ |
1569 | class_shift, hook) \ | 1570 | class_shift, hook) \ |
1570 | static const struct pci_fixup __pci_fixup_##name __used \ | 1571 | static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ |
1571 | __attribute__((__section__(#section), aligned((sizeof(void *))))) \ | 1572 | __attribute__((__section__(#section), aligned((sizeof(void *))))) \ |
1572 | = { vendor, device, class, class_shift, hook }; | 1573 | = { vendor, device, class, class_shift, hook }; |
1573 | 1574 | ||
1574 | #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ | 1575 | #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ |
1575 | class_shift, hook) \ | 1576 | class_shift, hook) \ |
1576 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 1577 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
1577 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1578 | hook, vendor, device, class, class_shift, hook) |
1578 | #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ | 1579 | #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ |
1579 | class_shift, hook) \ | 1580 | class_shift, hook) \ |
1580 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ | 1581 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ |
1581 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1582 | hook, vendor, device, class, class_shift, hook) |
1582 | #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ | 1583 | #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ |
1583 | class_shift, hook) \ | 1584 | class_shift, hook) \ |
1584 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ | 1585 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ |
1585 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1586 | hook, vendor, device, class, class_shift, hook) |
1586 | #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ | 1587 | #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ |
1587 | class_shift, hook) \ | 1588 | class_shift, hook) \ |
1588 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ | 1589 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ |
1589 | vendor##device##hook, vendor, device, class, class_shift, hook) | 1590 | hook, vendor, device, class, class_shift, hook) |
1590 | #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ | 1591 | #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ |
1591 | class_shift, hook) \ | 1592 | class_shift, hook) \ |
1592 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1593 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1593 | resume##vendor##device##hook, vendor, device, class, \ | 1594 | resume##hook, vendor, device, class, \ |
1594 | class_shift, hook) | 1595 | class_shift, hook) |
1595 | #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ | 1596 | #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ |
1596 | class_shift, hook) \ | 1597 | class_shift, hook) \ |
1597 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1598 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1598 | resume_early##vendor##device##hook, vendor, device, \ | 1599 | resume_early##hook, vendor, device, \ |
1599 | class, class_shift, hook) | 1600 | class, class_shift, hook) |
1600 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ | 1601 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ |
1601 | class_shift, hook) \ | 1602 | class_shift, hook) \ |
1602 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1603 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1603 | suspend##vendor##device##hook, vendor, device, class, \ | 1604 | suspend##hook, vendor, device, class, \ |
1604 | class_shift, hook) | 1605 | class_shift, hook) |
1605 | 1606 | ||
1606 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ | 1607 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ |
1607 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 1608 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
1608 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1609 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1609 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ | 1610 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ |
1610 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ | 1611 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ |
1611 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1612 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1612 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ | 1613 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ |
1613 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ | 1614 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ |
1614 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1615 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1615 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ | 1616 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ |
1616 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ | 1617 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ |
1617 | vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) | 1618 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1618 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ | 1619 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ |
1619 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1620 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1620 | resume##vendor##device##hook, vendor, device, \ | 1621 | resume##hook, vendor, device, \ |
1621 | PCI_ANY_ID, 0, hook) | 1622 | PCI_ANY_ID, 0, hook) |
1622 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ | 1623 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ |
1623 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1624 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1624 | resume_early##vendor##device##hook, vendor, device, \ | 1625 | resume_early##hook, vendor, device, \ |
1625 | PCI_ANY_ID, 0, hook) | 1626 | PCI_ANY_ID, 0, hook) |
1626 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ | 1627 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ |
1627 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1628 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1628 | suspend##vendor##device##hook, vendor, device, \ | 1629 | suspend##hook, vendor, device, \ |
1629 | PCI_ANY_ID, 0, hook) | 1630 | PCI_ANY_ID, 0, hook) |
1630 | 1631 | ||
1631 | #ifdef CONFIG_PCI_QUIRKS | 1632 | #ifdef CONFIG_PCI_QUIRKS |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 57e890abe1f0..a5fc7d01aad6 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -69,6 +69,7 @@ | |||
69 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | 69 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
70 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 70 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
71 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 71 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
72 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ | ||
72 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ | 73 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
73 | __typeof__(type) name | 74 | __typeof__(type) name |
74 | #else | 75 | #else |
diff --git a/include/linux/pstore.h b/include/linux/pstore.h index abd437d0a8a7..ece0c6bbfcc5 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h | |||
@@ -51,6 +51,7 @@ struct pstore_info { | |||
51 | char *buf; | 51 | char *buf; |
52 | size_t bufsize; | 52 | size_t bufsize; |
53 | struct mutex read_mutex; /* serialize open/read/close */ | 53 | struct mutex read_mutex; /* serialize open/read/close */ |
54 | int flags; | ||
54 | int (*open)(struct pstore_info *psi); | 55 | int (*open)(struct pstore_info *psi); |
55 | int (*close)(struct pstore_info *psi); | 56 | int (*close)(struct pstore_info *psi); |
56 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, | 57 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, |
@@ -70,6 +71,8 @@ struct pstore_info { | |||
70 | void *data; | 71 | void *data; |
71 | }; | 72 | }; |
72 | 73 | ||
74 | #define PSTORE_FLAGS_FRAGILE 1 | ||
75 | |||
73 | #ifdef CONFIG_PSTORE | 76 | #ifdef CONFIG_PSTORE |
74 | extern int pstore_register(struct pstore_info *); | 77 | extern int pstore_register(struct pstore_info *); |
75 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); | 78 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); |
diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f6f963..9e7db9e73cc1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h | |||
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); | |||
43 | * Architecture-specific implementations of sys_reboot commands. | 43 | * Architecture-specific implementations of sys_reboot commands. |
44 | */ | 44 | */ |
45 | 45 | ||
46 | extern void migrate_to_reboot_cpu(void); | ||
46 | extern void machine_restart(char *cmd); | 47 | extern void machine_restart(char *cmd); |
47 | extern void machine_halt(void); | 48 | extern void machine_halt(void); |
48 | extern void machine_power_off(void); | 49 | extern void machine_power_off(void); |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 939428ad25ac..8e3e66ac0a52 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void); | |||
24 | extern int rtnl_is_locked(void); | 24 | extern int rtnl_is_locked(void); |
25 | #ifdef CONFIG_PROVE_LOCKING | 25 | #ifdef CONFIG_PROVE_LOCKING |
26 | extern int lockdep_rtnl_is_held(void); | 26 | extern int lockdep_rtnl_is_held(void); |
27 | #else | ||
28 | static inline int lockdep_rtnl_is_held(void) | ||
29 | { | ||
30 | return 1; | ||
31 | } | ||
27 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 32 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
28 | 33 | ||
29 | /** | 34 | /** |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 768b037dfacb..53f97eb8dbc7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -440,8 +440,6 @@ struct task_cputime { | |||
440 | .sum_exec_runtime = 0, \ | 440 | .sum_exec_runtime = 0, \ |
441 | } | 441 | } |
442 | 442 | ||
443 | #define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) | ||
444 | |||
445 | #ifdef CONFIG_PREEMPT_COUNT | 443 | #ifdef CONFIG_PREEMPT_COUNT |
446 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) | 444 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) |
447 | #else | 445 | #else |
@@ -932,7 +930,8 @@ struct pipe_inode_info; | |||
932 | struct uts_namespace; | 930 | struct uts_namespace; |
933 | 931 | ||
934 | struct load_weight { | 932 | struct load_weight { |
935 | unsigned long weight, inv_weight; | 933 | unsigned long weight; |
934 | u32 inv_weight; | ||
936 | }; | 935 | }; |
937 | 936 | ||
938 | struct sched_avg { | 937 | struct sched_avg { |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 30aa0dc60d75..9d55438bc4ad 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
@@ -47,6 +47,8 @@ extern int shmem_init(void); | |||
47 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); | 47 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); |
48 | extern struct file *shmem_file_setup(const char *name, | 48 | extern struct file *shmem_file_setup(const char *name, |
49 | loff_t size, unsigned long flags); | 49 | loff_t size, unsigned long flags); |
50 | extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, | ||
51 | unsigned long flags); | ||
50 | extern int shmem_zero_setup(struct vm_area_struct *); | 52 | extern int shmem_zero_setup(struct vm_area_struct *); |
51 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); | 53 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); |
52 | extern void shmem_unlock_mapping(struct address_space *mapping); | 54 | extern void shmem_unlock_mapping(struct address_space *mapping); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bec1cc7d5e3c..6f69b3f914fb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) | |||
1638 | skb->mac_header += offset; | 1638 | skb->mac_header += offset; |
1639 | } | 1639 | } |
1640 | 1640 | ||
1641 | static inline void skb_pop_mac_header(struct sk_buff *skb) | ||
1642 | { | ||
1643 | skb->mac_header = skb->network_header; | ||
1644 | } | ||
1645 | |||
1641 | static inline void skb_probe_transport_header(struct sk_buff *skb, | 1646 | static inline void skb_probe_transport_header(struct sk_buff *skb, |
1642 | const int offset_hint) | 1647 | const int offset_hint) |
1643 | { | 1648 | { |
@@ -2263,6 +2268,24 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, | |||
2263 | 2268 | ||
2264 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); | 2269 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); |
2265 | 2270 | ||
2271 | /** | ||
2272 | * pskb_trim_rcsum - trim received skb and update checksum | ||
2273 | * @skb: buffer to trim | ||
2274 | * @len: new length | ||
2275 | * | ||
2276 | * This is exactly the same as pskb_trim except that it ensures the | ||
2277 | * checksum of received packets are still valid after the operation. | ||
2278 | */ | ||
2279 | |||
2280 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | ||
2281 | { | ||
2282 | if (likely(len >= skb->len)) | ||
2283 | return 0; | ||
2284 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
2285 | skb->ip_summed = CHECKSUM_NONE; | ||
2286 | return __pskb_trim(skb, len); | ||
2287 | } | ||
2288 | |||
2266 | #define skb_queue_walk(queue, skb) \ | 2289 | #define skb_queue_walk(queue, skb) \ |
2267 | for (skb = (queue)->next; \ | 2290 | for (skb = (queue)->next; \ |
2268 | skb != (struct sk_buff *)(queue); \ | 2291 | skb != (struct sk_buff *)(queue); \ |
@@ -2360,27 +2383,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, | |||
2360 | __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, | 2383 | __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, |
2361 | __wsum csum); | 2384 | __wsum csum); |
2362 | 2385 | ||
2363 | /** | ||
2364 | * pskb_trim_rcsum - trim received skb and update checksum | ||
2365 | * @skb: buffer to trim | ||
2366 | * @len: new length | ||
2367 | * | ||
2368 | * This is exactly the same as pskb_trim except that it ensures the | ||
2369 | * checksum of received packets are still valid after the operation. | ||
2370 | */ | ||
2371 | |||
2372 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | ||
2373 | { | ||
2374 | if (likely(len >= skb->len)) | ||
2375 | return 0; | ||
2376 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
2377 | __wsum adj = skb_checksum(skb, len, skb->len - len, 0); | ||
2378 | |||
2379 | skb->csum = csum_sub(skb->csum, adj); | ||
2380 | } | ||
2381 | return __pskb_trim(skb, len); | ||
2382 | } | ||
2383 | |||
2384 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | 2386 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, |
2385 | int len, void *buffer) | 2387 | int len, void *buffer) |
2386 | { | 2388 | { |
@@ -2529,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) | |||
2529 | * Ethernet MAC Drivers should call this function in their hard_xmit() | 2531 | * Ethernet MAC Drivers should call this function in their hard_xmit() |
2530 | * function immediately before giving the sk_buff to the MAC hardware. | 2532 | * function immediately before giving the sk_buff to the MAC hardware. |
2531 | * | 2533 | * |
2534 | * Specifically, one should make absolutely sure that this function is | ||
2535 | * called before TX completion of this packet can trigger. Otherwise | ||
2536 | * the packet could potentially already be freed. | ||
2537 | * | ||
2532 | * @skb: A socket buffer. | 2538 | * @skb: A socket buffer. |
2533 | */ | 2539 | */ |
2534 | static inline void skb_tx_timestamp(struct sk_buff *skb) | 2540 | static inline void skb_tx_timestamp(struct sk_buff *skb) |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 7454865ad148..512ab162832c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -1264,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *); | |||
1264 | * @sg: scatter gather buffer list, the buffer size of each element in | 1264 | * @sg: scatter gather buffer list, the buffer size of each element in |
1265 | * the list (except the last) must be divisible by the endpoint's | 1265 | * the list (except the last) must be divisible by the endpoint's |
1266 | * max packet size if no_sg_constraint isn't set in 'struct usb_bus' | 1266 | * max packet size if no_sg_constraint isn't set in 'struct usb_bus' |
1267 | * (FIXME: scatter-gather under xHCI is broken for periodic transfers. | ||
1268 | * Do not use urb->sg for interrupt endpoints for now, only bulk.) | ||
1267 | * @num_mapped_sgs: (internal) number of mapped sg entries | 1269 | * @num_mapped_sgs: (internal) number of mapped sg entries |
1268 | * @num_sgs: number of entries in the sg list | 1270 | * @num_sgs: number of entries in the sg list |
1269 | * @transfer_buffer_length: How big is transfer_buffer. The transfer may | 1271 | * @transfer_buffer_length: How big is transfer_buffer. The transfer may |
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 0c4d4ca370ec..eeb28329fa3c 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h | |||
@@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator) | |||
271 | #define WUSB_KEY_INDEX_TYPE_GTK 2 | 271 | #define WUSB_KEY_INDEX_TYPE_GTK 2 |
272 | #define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 | 272 | #define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 |
273 | #define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 | 273 | #define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 |
274 | /* bits 0-3 used for the key index. */ | ||
275 | #define WUSB_KEY_INDEX_MAX 15 | ||
274 | 276 | ||
275 | /* A CCM Nonce, defined in WUSB1.0[6.4.1] */ | 277 | /* A CCM Nonce, defined in WUSB1.0[6.4.1] */ |
276 | struct aes_ccm_nonce { | 278 | struct aes_ccm_nonce { |
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index bd8218b15009..941055e9d125 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
@@ -83,7 +83,7 @@ struct vb2_fileio_data; | |||
83 | struct vb2_mem_ops { | 83 | struct vb2_mem_ops { |
84 | void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); | 84 | void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); |
85 | void (*put)(void *buf_priv); | 85 | void (*put)(void *buf_priv); |
86 | struct dma_buf *(*get_dmabuf)(void *buf_priv); | 86 | struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags); |
87 | 87 | ||
88 | void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, | 88 | void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, |
89 | unsigned long size, int write); | 89 | unsigned long size, int write); |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index eb198acaac1d..488316e339a1 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -110,7 +110,8 @@ struct frag_hdr { | |||
110 | __be32 identification; | 110 | __be32 identification; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | #define IP6_MF 0x0001 | 113 | #define IP6_MF 0x0001 |
114 | #define IP6_OFFSET 0xFFF8 | ||
114 | 115 | ||
115 | #include <net/sock.h> | 116 | #include <net/sock.h> |
116 | 117 | ||
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 31e2de7d57c5..c0f0a13ed818 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h | |||
@@ -142,7 +142,7 @@ | |||
142 | #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) | 142 | #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) |
143 | 143 | ||
144 | #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) | 144 | #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) |
145 | #define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) | 145 | #define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO) |
146 | 146 | ||
147 | /* FRMR information field macros */ | 147 | /* FRMR information field macros */ |
148 | 148 | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ea0ca5f6e629..0a248b323d87 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1046,9 +1046,6 @@ struct sctp_outq { | |||
1046 | 1046 | ||
1047 | /* Corked? */ | 1047 | /* Corked? */ |
1048 | char cork; | 1048 | char cork; |
1049 | |||
1050 | /* Is this structure empty? */ | ||
1051 | char empty; | ||
1052 | }; | 1049 | }; |
1053 | 1050 | ||
1054 | void sctp_outq_init(struct sctp_association *, struct sctp_outq *); | 1051 | void sctp_outq_init(struct sctp_association *, struct sctp_outq *); |
@@ -1726,12 +1723,6 @@ struct sctp_association { | |||
1726 | /* How many duplicated TSNs have we seen? */ | 1723 | /* How many duplicated TSNs have we seen? */ |
1727 | int numduptsns; | 1724 | int numduptsns; |
1728 | 1725 | ||
1729 | /* Number of seconds of idle time before an association is closed. | ||
1730 | * In the association context, this is really used as a boolean | ||
1731 | * since the real timeout is stored in the timeouts array | ||
1732 | */ | ||
1733 | __u32 autoclose; | ||
1734 | |||
1735 | /* These are to support | 1726 | /* These are to support |
1736 | * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses | 1727 | * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses |
1737 | * and Enforcement of Flow and Message Limits" | 1728 | * and Enforcement of Flow and Message Limits" |
diff --git a/include/net/sock.h b/include/net/sock.h index e3a18ff0c38b..2ef3c3eca47a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1035,7 +1035,6 @@ enum cg_proto_flags { | |||
1035 | }; | 1035 | }; |
1036 | 1036 | ||
1037 | struct cg_proto { | 1037 | struct cg_proto { |
1038 | void (*enter_memory_pressure)(struct sock *sk); | ||
1039 | struct res_counter memory_allocated; /* Current allocated memory. */ | 1038 | struct res_counter memory_allocated; /* Current allocated memory. */ |
1040 | struct percpu_counter sockets_allocated; /* Current number of sockets. */ | 1039 | struct percpu_counter sockets_allocated; /* Current number of sockets. */ |
1041 | int memory_pressure; | 1040 | int memory_pressure; |
@@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk) | |||
1155 | struct proto *prot = sk->sk_prot; | 1154 | struct proto *prot = sk->sk_prot; |
1156 | 1155 | ||
1157 | for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) | 1156 | for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) |
1158 | if (cg_proto->memory_pressure) | 1157 | cg_proto->memory_pressure = 0; |
1159 | cg_proto->memory_pressure = 0; | ||
1160 | } | 1158 | } |
1161 | 1159 | ||
1162 | } | 1160 | } |
@@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk) | |||
1171 | struct proto *prot = sk->sk_prot; | 1169 | struct proto *prot = sk->sk_prot; |
1172 | 1170 | ||
1173 | for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) | 1171 | for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) |
1174 | cg_proto->enter_memory_pressure(sk); | 1172 | cg_proto->memory_pressure = 1; |
1175 | } | 1173 | } |
1176 | 1174 | ||
1177 | sk->sk_prot->enter_memory_pressure(sk); | 1175 | sk->sk_prot->enter_memory_pressure(sk); |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 979874c627ee..61e1935c91b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -978,7 +978,7 @@ struct ib_uobject { | |||
978 | }; | 978 | }; |
979 | 979 | ||
980 | struct ib_udata { | 980 | struct ib_udata { |
981 | void __user *inbuf; | 981 | const void __user *inbuf; |
982 | void __user *outbuf; | 982 | void __user *outbuf; |
983 | size_t inlen; | 983 | size_t inlen; |
984 | size_t outlen; | 984 | size_t outlen; |
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index af9983970417..5f73785f5977 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h | |||
@@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, | |||
108 | { | 108 | { |
109 | struct snd_sg_buf *sgbuf = dmab->private_data; | 109 | struct snd_sg_buf *sgbuf = dmab->private_data; |
110 | dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; | 110 | dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; |
111 | addr &= PAGE_MASK; | 111 | addr &= ~((dma_addr_t)PAGE_SIZE - 1); |
112 | return addr + offset % PAGE_SIZE; | 112 | return addr + offset % PAGE_SIZE; |
113 | } | 113 | } |
114 | 114 | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6afa69..321301c0a643 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -517,10 +517,6 @@ struct se_node_acl { | |||
517 | u32 acl_index; | 517 | u32 acl_index; |
518 | #define MAX_ACL_TAG_SIZE 64 | 518 | #define MAX_ACL_TAG_SIZE 64 |
519 | char acl_tag[MAX_ACL_TAG_SIZE]; | 519 | char acl_tag[MAX_ACL_TAG_SIZE]; |
520 | u64 num_cmds; | ||
521 | u64 read_bytes; | ||
522 | u64 write_bytes; | ||
523 | spinlock_t stats_lock; | ||
524 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 520 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
525 | atomic_t acl_pr_ref_count; | 521 | atomic_t acl_pr_ref_count; |
526 | struct se_dev_entry **device_list; | 522 | struct se_dev_entry **device_list; |
@@ -624,6 +620,7 @@ struct se_dev_attrib { | |||
624 | u32 unmap_granularity; | 620 | u32 unmap_granularity; |
625 | u32 unmap_granularity_alignment; | 621 | u32 unmap_granularity_alignment; |
626 | u32 max_write_same_len; | 622 | u32 max_write_same_len; |
623 | u32 max_bytes_per_io; | ||
627 | struct se_device *da_dev; | 624 | struct se_device *da_dev; |
628 | struct config_group da_group; | 625 | struct config_group da_group; |
629 | }; | 626 | }; |
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 2f3f7ea8c77b..fe421e8a431b 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
@@ -983,6 +983,8 @@ struct drm_radeon_cs { | |||
983 | #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 | 983 | #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 |
984 | /* CIK macrotile mode array */ | 984 | /* CIK macrotile mode array */ |
985 | #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 | 985 | #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
986 | /* query the number of render backends */ | ||
987 | #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 | ||
986 | 988 | ||
987 | 989 | ||
988 | struct drm_radeon_info { | 990 | struct drm_radeon_info { |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..f854ca4a1372 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -75,6 +75,7 @@ | |||
75 | #define DRM_VMW_PARAM_FIFO_CAPS 4 | 75 | #define DRM_VMW_PARAM_FIFO_CAPS 4 |
76 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 | 76 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
77 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 | 77 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
78 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 | ||
78 | 79 | ||
79 | /** | 80 | /** |
80 | * struct drm_vmw_getparam_arg | 81 | * struct drm_vmw_getparam_arg |
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index a3726275876d..bd24470d24a2 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
@@ -464,7 +464,8 @@ struct input_keymap_entry { | |||
464 | #define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ | 464 | #define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ |
465 | #define KEY_DISPLAY_OFF 245 /* display device to off state */ | 465 | #define KEY_DISPLAY_OFF 245 /* display device to off state */ |
466 | 466 | ||
467 | #define KEY_WIMAX 246 | 467 | #define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */ |
468 | #define KEY_WIMAX KEY_WWAN | ||
468 | #define KEY_RFKILL 247 /* Key that controls all radios */ | 469 | #define KEY_RFKILL 247 /* Key that controls all radios */ |
469 | 470 | ||
470 | #define KEY_MICMUTE 248 /* Mute / unmute the microphone */ | 471 | #define KEY_MICMUTE 248 /* Mute / unmute the microphone */ |
@@ -719,6 +720,8 @@ struct input_keymap_entry { | |||
719 | #define BTN_DPAD_LEFT 0x222 | 720 | #define BTN_DPAD_LEFT 0x222 |
720 | #define BTN_DPAD_RIGHT 0x223 | 721 | #define BTN_DPAD_RIGHT 0x223 |
721 | 722 | ||
723 | #define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ | ||
724 | |||
722 | #define BTN_TRIGGER_HAPPY 0x2c0 | 725 | #define BTN_TRIGGER_HAPPY 0x2c0 |
723 | #define BTN_TRIGGER_HAPPY1 0x2c0 | 726 | #define BTN_TRIGGER_HAPPY1 0x2c0 |
724 | #define BTN_TRIGGER_HAPPY2 0x2c1 | 727 | #define BTN_TRIGGER_HAPPY2 0x2c1 |
@@ -856,6 +859,7 @@ struct input_keymap_entry { | |||
856 | #define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ | 859 | #define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ |
857 | #define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ | 860 | #define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ |
858 | #define SW_LINEIN_INSERT 0x0d /* set = inserted */ | 861 | #define SW_LINEIN_INSERT 0x0d /* set = inserted */ |
862 | #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ | ||
859 | #define SW_MAX 0x0f | 863 | #define SW_MAX 0x0f |
860 | #define SW_CNT (SW_MAX+1) | 864 | #define SW_CNT (SW_MAX+1) |
861 | 865 | ||
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h index 17e7d95e4f53..6eb40244e019 100644 --- a/include/uapi/linux/mic_common.h +++ b/include/uapi/linux/mic_common.h | |||
@@ -23,12 +23,7 @@ | |||
23 | 23 | ||
24 | #include <linux/virtio_ring.h> | 24 | #include <linux/virtio_ring.h> |
25 | 25 | ||
26 | #ifndef __KERNEL__ | 26 | #define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1)) |
27 | #define ALIGN(a, x) (((a) + (x) - 1) & ~((x) - 1)) | ||
28 | #define __aligned(x) __attribute__ ((aligned(x))) | ||
29 | #endif | ||
30 | |||
31 | #define mic_aligned_size(x) ALIGN(sizeof(x), 8) | ||
32 | 27 | ||
33 | /** | 28 | /** |
34 | * struct mic_device_desc: Virtio device information shared between the | 29 | * struct mic_device_desc: Virtio device information shared between the |
@@ -48,8 +43,8 @@ struct mic_device_desc { | |||
48 | __u8 feature_len; | 43 | __u8 feature_len; |
49 | __u8 config_len; | 44 | __u8 config_len; |
50 | __u8 status; | 45 | __u8 status; |
51 | __u64 config[0]; | 46 | __le64 config[0]; |
52 | } __aligned(8); | 47 | } __attribute__ ((aligned(8))); |
53 | 48 | ||
54 | /** | 49 | /** |
55 | * struct mic_device_ctrl: Per virtio device information in the device page | 50 | * struct mic_device_ctrl: Per virtio device information in the device page |
@@ -66,7 +61,7 @@ struct mic_device_desc { | |||
66 | * @h2c_vdev_db: The doorbell number to be used by host. Set by guest. | 61 | * @h2c_vdev_db: The doorbell number to be used by host. Set by guest. |
67 | */ | 62 | */ |
68 | struct mic_device_ctrl { | 63 | struct mic_device_ctrl { |
69 | __u64 vdev; | 64 | __le64 vdev; |
70 | __u8 config_change; | 65 | __u8 config_change; |
71 | __u8 vdev_reset; | 66 | __u8 vdev_reset; |
72 | __u8 guest_ack; | 67 | __u8 guest_ack; |
@@ -74,7 +69,7 @@ struct mic_device_ctrl { | |||
74 | __u8 used_address_updated; | 69 | __u8 used_address_updated; |
75 | __s8 c2h_vdev_db; | 70 | __s8 c2h_vdev_db; |
76 | __s8 h2c_vdev_db; | 71 | __s8 h2c_vdev_db; |
77 | } __aligned(8); | 72 | } __attribute__ ((aligned(8))); |
78 | 73 | ||
79 | /** | 74 | /** |
80 | * struct mic_bootparam: Virtio device independent information in device page | 75 | * struct mic_bootparam: Virtio device independent information in device page |
@@ -87,13 +82,13 @@ struct mic_device_ctrl { | |||
87 | * @shutdown_card: Set to 1 by the host when a card shutdown is initiated | 82 | * @shutdown_card: Set to 1 by the host when a card shutdown is initiated |
88 | */ | 83 | */ |
89 | struct mic_bootparam { | 84 | struct mic_bootparam { |
90 | __u32 magic; | 85 | __le32 magic; |
91 | __s8 c2h_shutdown_db; | 86 | __s8 c2h_shutdown_db; |
92 | __s8 h2c_shutdown_db; | 87 | __s8 h2c_shutdown_db; |
93 | __s8 h2c_config_db; | 88 | __s8 h2c_config_db; |
94 | __u8 shutdown_status; | 89 | __u8 shutdown_status; |
95 | __u8 shutdown_card; | 90 | __u8 shutdown_card; |
96 | } __aligned(8); | 91 | } __attribute__ ((aligned(8))); |
97 | 92 | ||
98 | /** | 93 | /** |
99 | * struct mic_device_page: High level representation of the device page | 94 | * struct mic_device_page: High level representation of the device page |
@@ -116,10 +111,10 @@ struct mic_device_page { | |||
116 | * @num: The number of entries in the virtio_ring | 111 | * @num: The number of entries in the virtio_ring |
117 | */ | 112 | */ |
118 | struct mic_vqconfig { | 113 | struct mic_vqconfig { |
119 | __u64 address; | 114 | __le64 address; |
120 | __u64 used_address; | 115 | __le64 used_address; |
121 | __u16 num; | 116 | __le16 num; |
122 | } __aligned(8); | 117 | } __attribute__ ((aligned(8))); |
123 | 118 | ||
124 | /* | 119 | /* |
125 | * The alignment to use between consumer and producer parts of vring. | 120 | * The alignment to use between consumer and producer parts of vring. |
@@ -154,7 +149,7 @@ struct mic_vqconfig { | |||
154 | */ | 149 | */ |
155 | struct _mic_vring_info { | 150 | struct _mic_vring_info { |
156 | __u16 avail_idx; | 151 | __u16 avail_idx; |
157 | int magic; | 152 | __le32 magic; |
158 | }; | 153 | }; |
159 | 154 | ||
160 | /** | 155 | /** |
@@ -173,15 +168,13 @@ struct mic_vring { | |||
173 | int len; | 168 | int len; |
174 | }; | 169 | }; |
175 | 170 | ||
176 | #define mic_aligned_desc_size(d) ALIGN(mic_desc_size(d), 8) | 171 | #define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8) |
177 | 172 | ||
178 | #ifndef INTEL_MIC_CARD | 173 | #ifndef INTEL_MIC_CARD |
179 | static inline unsigned mic_desc_size(const struct mic_device_desc *desc) | 174 | static inline unsigned mic_desc_size(const struct mic_device_desc *desc) |
180 | { | 175 | { |
181 | return mic_aligned_size(*desc) | 176 | return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig) |
182 | + desc->num_vq * mic_aligned_size(struct mic_vqconfig) | 177 | + desc->feature_len * 2 + desc->config_len; |
183 | + desc->feature_len * 2 | ||
184 | + desc->config_len; | ||
185 | } | 178 | } |
186 | 179 | ||
187 | static inline struct mic_vqconfig * | 180 | static inline struct mic_vqconfig * |
@@ -201,8 +194,7 @@ static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc) | |||
201 | } | 194 | } |
202 | static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) | 195 | static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) |
203 | { | 196 | { |
204 | return mic_aligned_desc_size(desc) + | 197 | return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); |
205 | mic_aligned_size(struct mic_device_ctrl); | ||
206 | } | 198 | } |
207 | #endif | 199 | #endif |
208 | 200 | ||
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1802d6153ae..959d454f76a1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h | |||
@@ -679,6 +679,7 @@ enum perf_event_type { | |||
679 | * | 679 | * |
680 | * { u64 weight; } && PERF_SAMPLE_WEIGHT | 680 | * { u64 weight; } && PERF_SAMPLE_WEIGHT |
681 | * { u64 data_src; } && PERF_SAMPLE_DATA_SRC | 681 | * { u64 data_src; } && PERF_SAMPLE_DATA_SRC |
682 | * { u64 transaction; } && PERF_SAMPLE_TRANSACTION | ||
682 | * }; | 683 | * }; |
683 | */ | 684 | */ |
684 | PERF_RECORD_SAMPLE = 9, | 685 | PERF_RECORD_SAMPLE = 9, |
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index d630163b9a2e..5759810e1c1b 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <sound/compress_params.h> | 30 | #include <sound/compress_params.h> |
31 | 31 | ||
32 | 32 | ||
33 | #define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1) | 33 | #define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2) |
34 | /** | 34 | /** |
35 | * struct snd_compressed_buffer: compressed buffer | 35 | * struct snd_compressed_buffer: compressed buffer |
36 | * @fragment_size: size of buffer fragment in bytes | 36 | * @fragment_size: size of buffer fragment in bytes |
@@ -67,8 +67,8 @@ struct snd_compr_params { | |||
67 | struct snd_compr_tstamp { | 67 | struct snd_compr_tstamp { |
68 | __u32 byte_offset; | 68 | __u32 byte_offset; |
69 | __u32 copied_total; | 69 | __u32 copied_total; |
70 | snd_pcm_uframes_t pcm_frames; | 70 | __u32 pcm_frames; |
71 | snd_pcm_uframes_t pcm_io_frames; | 71 | __u32 pcm_io_frames; |
72 | __u32 sampling_rate; | 72 | __u32 sampling_rate; |
73 | }; | 73 | }; |
74 | 74 | ||
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 65e12099ef89..ae665ac59c36 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h | |||
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned { | |||
146 | struct blkif_request_rw { | 146 | struct blkif_request_rw { |
147 | uint8_t nr_segments; /* number of segments */ | 147 | uint8_t nr_segments; /* number of segments */ |
148 | blkif_vdev_t handle; /* only for read/write requests */ | 148 | blkif_vdev_t handle; /* only for read/write requests */ |
149 | #ifdef CONFIG_X86_64 | 149 | #ifndef CONFIG_X86_32 |
150 | uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ | 150 | uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ |
151 | #endif | 151 | #endif |
152 | uint64_t id; /* private guest value, echoed in resp */ | 152 | uint64_t id; /* private guest value, echoed in resp */ |
@@ -163,7 +163,7 @@ struct blkif_request_discard { | |||
163 | uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ | 163 | uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ |
164 | #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ | 164 | #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ |
165 | blkif_vdev_t _pad1; /* only for read/write requests */ | 165 | blkif_vdev_t _pad1; /* only for read/write requests */ |
166 | #ifdef CONFIG_X86_64 | 166 | #ifndef CONFIG_X86_32 |
167 | uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ | 167 | uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ |
168 | #endif | 168 | #endif |
169 | uint64_t id; /* private guest value, echoed in resp */ | 169 | uint64_t id; /* private guest value, echoed in resp */ |
@@ -175,7 +175,7 @@ struct blkif_request_discard { | |||
175 | struct blkif_request_other { | 175 | struct blkif_request_other { |
176 | uint8_t _pad1; | 176 | uint8_t _pad1; |
177 | blkif_vdev_t _pad2; /* only for read/write requests */ | 177 | blkif_vdev_t _pad2; /* only for read/write requests */ |
178 | #ifdef CONFIG_X86_64 | 178 | #ifndef CONFIG_X86_32 |
179 | uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ | 179 | uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ |
180 | #endif | 180 | #endif |
181 | uint64_t id; /* private guest value, echoed in resp */ | 181 | uint64_t id; /* private guest value, echoed in resp */ |
@@ -184,7 +184,7 @@ struct blkif_request_other { | |||
184 | struct blkif_request_indirect { | 184 | struct blkif_request_indirect { |
185 | uint8_t indirect_op; | 185 | uint8_t indirect_op; |
186 | uint16_t nr_segments; | 186 | uint16_t nr_segments; |
187 | #ifdef CONFIG_X86_64 | 187 | #ifndef CONFIG_X86_32 |
188 | uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ | 188 | uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ |
189 | #endif | 189 | #endif |
190 | uint64_t id; | 190 | uint64_t id; |
@@ -192,7 +192,7 @@ struct blkif_request_indirect { | |||
192 | blkif_vdev_t handle; | 192 | blkif_vdev_t handle; |
193 | uint16_t _pad2; | 193 | uint16_t _pad2; |
194 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; | 194 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; |
195 | #ifdef CONFIG_X86_64 | 195 | #ifndef CONFIG_X86_32 |
196 | uint32_t _pad3; /* make it 64 byte aligned */ | 196 | uint32_t _pad3; /* make it 64 byte aligned */ |
197 | #else | 197 | #else |
198 | uint64_t _pad3; /* make it 64 byte aligned */ | 198 | uint64_t _pad3; /* make it 64 byte aligned */ |
diff --git a/init/Kconfig b/init/Kconfig index 79383d3aa5dc..4e5d96ab2034 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK | |||
809 | config ARCH_SUPPORTS_NUMA_BALANCING | 809 | config ARCH_SUPPORTS_NUMA_BALANCING |
810 | bool | 810 | bool |
811 | 811 | ||
812 | # | ||
813 | # For architectures that know their GCC __int128 support is sound | ||
814 | # | ||
815 | config ARCH_SUPPORTS_INT128 | ||
816 | bool | ||
817 | |||
812 | # For architectures that (ab)use NUMA to represent different memory regions | 818 | # For architectures that (ab)use NUMA to represent different memory regions |
813 | # all cpu-local but of different latencies, such as SuperH. | 819 | # all cpu-local but of different latencies, such as SuperH. |
814 | # | 820 | # |
diff --git a/kernel/.gitignore b/kernel/.gitignore index b3097bde4e9c..790d83c7d160 100644 --- a/kernel/.gitignore +++ b/kernel/.gitignore | |||
@@ -5,3 +5,4 @@ config_data.h | |||
5 | config_data.gz | 5 | config_data.gz |
6 | timeconst.h | 6 | timeconst.h |
7 | hz.bc | 7 | hz.bc |
8 | x509_certificate_list | ||
diff --git a/kernel/Makefile b/kernel/Makefile index bbaf7d59c1bb..bc010ee272b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | |||
137 | ############################################################################### | 137 | ############################################################################### |
138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) | 138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) |
139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) | 139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) |
140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 | 140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 |
141 | X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ | 141 | X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ |
142 | $(or $(realpath $(CERT)),$(CERT)))) | 142 | $(or $(realpath $(CERT)),$(CERT)))) |
143 | X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) | ||
143 | 144 | ||
144 | ifeq ($(X509_CERTIFICATES),) | 145 | ifeq ($(X509_CERTIFICATES),) |
145 | $(warning *** No X.509 certificates found ***) | 146 | $(warning *** No X.509 certificates found ***) |
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list | |||
164 | targets += $(obj)/.x509.list | 165 | targets += $(obj)/.x509.list |
165 | $(obj)/.x509.list: | 166 | $(obj)/.x509.list: |
166 | @echo $(X509_CERTIFICATES) >$@ | 167 | @echo $(X509_CERTIFICATES) >$@ |
168 | endif | ||
167 | 169 | ||
168 | clean-files := x509_certificate_list .x509.list | 170 | clean-files := x509_certificate_list .x509.list |
169 | endif | ||
170 | 171 | ||
171 | ifeq ($(CONFIG_MODULE_SIG),y) | 172 | ifeq ($(CONFIG_MODULE_SIG),y) |
172 | ############################################################################### | 173 | ############################################################################### |
diff --git a/kernel/bounds.c b/kernel/bounds.c index 5253204afdca..9fd4246b04b8 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
@@ -22,6 +22,6 @@ void foo(void) | |||
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); | 23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); |
24 | #endif | 24 | #endif |
25 | DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); | 25 | DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); |
26 | /* End of constants */ | 26 | /* End of constants */ |
27 | } | 27 | } |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8b729c278b64..bc1dcabe9217 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
890 | struct cgroup *cgrp = dentry->d_fsdata; | 890 | struct cgroup *cgrp = dentry->d_fsdata; |
891 | 891 | ||
892 | BUG_ON(!(cgroup_is_dead(cgrp))); | 892 | BUG_ON(!(cgroup_is_dead(cgrp))); |
893 | |||
894 | /* | ||
895 | * XXX: cgrp->id is only used to look up css's. As cgroup | ||
896 | * and css's lifetimes will be decoupled, it should be made | ||
897 | * per-subsystem and moved to css->id so that lookups are | ||
898 | * successful until the target css is released. | ||
899 | */ | ||
900 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
901 | cgrp->id = -1; | ||
902 | |||
893 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | 903 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
894 | } else { | 904 | } else { |
895 | struct cfent *cfe = __d_cfe(dentry); | 905 | struct cfent *cfe = __d_cfe(dentry); |
@@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref) | |||
4268 | struct cgroup_subsys_state *css = | 4278 | struct cgroup_subsys_state *css = |
4269 | container_of(ref, struct cgroup_subsys_state, refcnt); | 4279 | container_of(ref, struct cgroup_subsys_state, refcnt); |
4270 | 4280 | ||
4281 | rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); | ||
4271 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 4282 | call_rcu(&css->rcu_head, css_free_rcu_fn); |
4272 | } | 4283 | } |
4273 | 4284 | ||
@@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4426 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); | 4437 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); |
4427 | root->number_of_cgroups++; | 4438 | root->number_of_cgroups++; |
4428 | 4439 | ||
4429 | /* each css holds a ref to the cgroup's dentry and the parent css */ | ||
4430 | for_each_root_subsys(root, ss) { | ||
4431 | struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; | ||
4432 | |||
4433 | dget(dentry); | ||
4434 | css_get(css->parent); | ||
4435 | } | ||
4436 | |||
4437 | /* hold a ref to the parent's dentry */ | 4440 | /* hold a ref to the parent's dentry */ |
4438 | dget(parent->dentry); | 4441 | dget(parent->dentry); |
4439 | 4442 | ||
@@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4445 | if (err) | 4448 | if (err) |
4446 | goto err_destroy; | 4449 | goto err_destroy; |
4447 | 4450 | ||
4451 | /* each css holds a ref to the cgroup's dentry and parent css */ | ||
4452 | dget(dentry); | ||
4453 | css_get(css->parent); | ||
4454 | |||
4455 | /* mark it consumed for error path */ | ||
4456 | css_ar[ss->subsys_id] = NULL; | ||
4457 | |||
4448 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && | 4458 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && |
4449 | parent->parent) { | 4459 | parent->parent) { |
4450 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", | 4460 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", |
@@ -4491,6 +4501,14 @@ err_free_cgrp: | |||
4491 | return err; | 4501 | return err; |
4492 | 4502 | ||
4493 | err_destroy: | 4503 | err_destroy: |
4504 | for_each_root_subsys(root, ss) { | ||
4505 | struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; | ||
4506 | |||
4507 | if (css) { | ||
4508 | percpu_ref_cancel_init(&css->refcnt); | ||
4509 | ss->css_free(css); | ||
4510 | } | ||
4511 | } | ||
4494 | cgroup_destroy_locked(cgrp); | 4512 | cgroup_destroy_locked(cgrp); |
4495 | mutex_unlock(&cgroup_mutex); | 4513 | mutex_unlock(&cgroup_mutex); |
4496 | mutex_unlock(&dentry->d_inode->i_mutex); | 4514 | mutex_unlock(&dentry->d_inode->i_mutex); |
@@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
4652 | * will be invoked to perform the rest of destruction once the | 4670 | * will be invoked to perform the rest of destruction once the |
4653 | * percpu refs of all css's are confirmed to be killed. | 4671 | * percpu refs of all css's are confirmed to be killed. |
4654 | */ | 4672 | */ |
4655 | for_each_root_subsys(cgrp->root, ss) | 4673 | for_each_root_subsys(cgrp->root, ss) { |
4656 | kill_css(cgroup_css(cgrp, ss)); | 4674 | struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); |
4675 | |||
4676 | if (css) | ||
4677 | kill_css(css); | ||
4678 | } | ||
4657 | 4679 | ||
4658 | /* | 4680 | /* |
4659 | * Mark @cgrp dead. This prevents further task migration and child | 4681 | * Mark @cgrp dead. This prevents further task migration and child |
@@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) | |||
4722 | /* delete this cgroup from parent->children */ | 4744 | /* delete this cgroup from parent->children */ |
4723 | list_del_rcu(&cgrp->sibling); | 4745 | list_del_rcu(&cgrp->sibling); |
4724 | 4746 | ||
4725 | /* | ||
4726 | * We should remove the cgroup object from idr before its grace | ||
4727 | * period starts, so we won't be looking up a cgroup while the | ||
4728 | * cgroup is being freed. | ||
4729 | */ | ||
4730 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
4731 | cgrp->id = -1; | ||
4732 | |||
4733 | dput(d); | 4747 | dput(d); |
4734 | 4748 | ||
4735 | set_bit(CGRP_RELEASABLE, &parent->flags); | 4749 | set_bit(CGRP_RELEASABLE, &parent->flags); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, | |||
1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1397 | return; | 1397 | return; |
1398 | 1398 | ||
1399 | perf_pmu_disable(event->pmu); | ||
1400 | |||
1399 | event->state = PERF_EVENT_STATE_INACTIVE; | 1401 | event->state = PERF_EVENT_STATE_INACTIVE; |
1400 | if (event->pending_disable) { | 1402 | if (event->pending_disable) { |
1401 | event->pending_disable = 0; | 1403 | event->pending_disable = 0; |
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, | |||
1412 | ctx->nr_freq--; | 1414 | ctx->nr_freq--; |
1413 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1415 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
1414 | cpuctx->exclusive = 0; | 1416 | cpuctx->exclusive = 0; |
1417 | |||
1418 | perf_pmu_enable(event->pmu); | ||
1415 | } | 1419 | } |
1416 | 1420 | ||
1417 | static void | 1421 | static void |
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, | |||
1652 | struct perf_event_context *ctx) | 1656 | struct perf_event_context *ctx) |
1653 | { | 1657 | { |
1654 | u64 tstamp = perf_event_time(event); | 1658 | u64 tstamp = perf_event_time(event); |
1659 | int ret = 0; | ||
1655 | 1660 | ||
1656 | if (event->state <= PERF_EVENT_STATE_OFF) | 1661 | if (event->state <= PERF_EVENT_STATE_OFF) |
1657 | return 0; | 1662 | return 0; |
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, | |||
1674 | */ | 1679 | */ |
1675 | smp_wmb(); | 1680 | smp_wmb(); |
1676 | 1681 | ||
1682 | perf_pmu_disable(event->pmu); | ||
1683 | |||
1677 | if (event->pmu->add(event, PERF_EF_START)) { | 1684 | if (event->pmu->add(event, PERF_EF_START)) { |
1678 | event->state = PERF_EVENT_STATE_INACTIVE; | 1685 | event->state = PERF_EVENT_STATE_INACTIVE; |
1679 | event->oncpu = -1; | 1686 | event->oncpu = -1; |
1680 | return -EAGAIN; | 1687 | ret = -EAGAIN; |
1688 | goto out; | ||
1681 | } | 1689 | } |
1682 | 1690 | ||
1683 | event->tstamp_running += tstamp - event->tstamp_stopped; | 1691 | event->tstamp_running += tstamp - event->tstamp_stopped; |
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, | |||
1693 | if (event->attr.exclusive) | 1701 | if (event->attr.exclusive) |
1694 | cpuctx->exclusive = 1; | 1702 | cpuctx->exclusive = 1; |
1695 | 1703 | ||
1696 | return 0; | 1704 | out: |
1705 | perf_pmu_enable(event->pmu); | ||
1706 | |||
1707 | return ret; | ||
1697 | } | 1708 | } |
1698 | 1709 | ||
1699 | static int | 1710 | static int |
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2743 | if (!event_filter_match(event)) | 2754 | if (!event_filter_match(event)) |
2744 | continue; | 2755 | continue; |
2745 | 2756 | ||
2757 | perf_pmu_disable(event->pmu); | ||
2758 | |||
2746 | hwc = &event->hw; | 2759 | hwc = &event->hw; |
2747 | 2760 | ||
2748 | if (hwc->interrupts == MAX_INTERRUPTS) { | 2761 | if (hwc->interrupts == MAX_INTERRUPTS) { |
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2752 | } | 2765 | } |
2753 | 2766 | ||
2754 | if (!event->attr.freq || !event->attr.sample_freq) | 2767 | if (!event->attr.freq || !event->attr.sample_freq) |
2755 | continue; | 2768 | goto next; |
2756 | 2769 | ||
2757 | /* | 2770 | /* |
2758 | * stop the event and update event->count | 2771 | * stop the event and update event->count |
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2774 | perf_adjust_period(event, period, delta, false); | 2787 | perf_adjust_period(event, period, delta, false); |
2775 | 2788 | ||
2776 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2789 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
2790 | next: | ||
2791 | perf_pmu_enable(event->pmu); | ||
2777 | } | 2792 | } |
2778 | 2793 | ||
2779 | perf_pmu_enable(ctx->pmu); | 2794 | perf_pmu_enable(ctx->pmu); |
diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be9548c..5721f0e3f2da 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
537 | spin_lock_init(&mm->page_table_lock); | 537 | spin_lock_init(&mm->page_table_lock); |
538 | mm_init_aio(mm); | 538 | mm_init_aio(mm); |
539 | mm_init_owner(mm, p); | 539 | mm_init_owner(mm, p); |
540 | clear_tlb_flush_pending(mm); | ||
540 | 541 | ||
541 | if (likely(!mm_alloc_pgd(mm))) { | 542 | if (likely(!mm_alloc_pgd(mm))) { |
542 | mm->def_flags = 0; | 543 | mm->def_flags = 0; |
diff --git a/kernel/freezer.c b/kernel/freezer.c index b462fa197517..aa6a8aadb911 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt); | |||
19 | bool pm_freezing; | 19 | bool pm_freezing; |
20 | bool pm_nosig_freezing; | 20 | bool pm_nosig_freezing; |
21 | 21 | ||
22 | /* | ||
23 | * Temporary export for the deadlock workaround in ata_scsi_hotplug(). | ||
24 | * Remove once the hack becomes unnecessary. | ||
25 | */ | ||
26 | EXPORT_SYMBOL_GPL(pm_freezing); | ||
27 | |||
22 | /* protects freezing and frozen transitions */ | 28 | /* protects freezing and frozen transitions */ |
23 | static DEFINE_SPINLOCK(freezer_lock); | 29 | static DEFINE_SPINLOCK(freezer_lock); |
24 | 30 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index 80ba086f021d..f6ff0191ecf7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
251 | return -EINVAL; | 251 | return -EINVAL; |
252 | address -= key->both.offset; | 252 | address -= key->both.offset; |
253 | 253 | ||
254 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) | ||
255 | return -EFAULT; | ||
256 | |||
254 | /* | 257 | /* |
255 | * PROCESS_PRIVATE futexes are fast. | 258 | * PROCESS_PRIVATE futexes are fast. |
256 | * As the mm cannot disappear under us and the 'key' only needs | 259 | * As the mm cannot disappear under us and the 'key' only needs |
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
259 | * but access_ok() should be faster than find_vma() | 262 | * but access_ok() should be faster than find_vma() |
260 | */ | 263 | */ |
261 | if (!fshared) { | 264 | if (!fshared) { |
262 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | ||
263 | return -EFAULT; | ||
264 | key->private.mm = mm; | 265 | key->private.mm = mm; |
265 | key->private.address = address; | 266 | key->private.address = address; |
266 | get_futex_key_refs(key); | 267 | get_futex_key_refs(key); |
@@ -288,7 +289,7 @@ again: | |||
288 | put_page(page); | 289 | put_page(page); |
289 | /* serialize against __split_huge_page_splitting() */ | 290 | /* serialize against __split_huge_page_splitting() */ |
290 | local_irq_disable(); | 291 | local_irq_disable(); |
291 | if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { | 292 | if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) { |
292 | page_head = compound_head(page); | 293 | page_head = compound_head(page); |
293 | /* | 294 | /* |
294 | * page_head is valid pointer but we must pin | 295 | * page_head is valid pointer but we must pin |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 490afc03627e..9c970167e402 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |||
47 | size_t vmcoreinfo_size; | 47 | size_t vmcoreinfo_size; |
48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | 48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); |
49 | 49 | ||
50 | /* Flag to indicate we are going to kexec a new kernel */ | ||
51 | bool kexec_in_progress = false; | ||
52 | |||
50 | /* Location of the reserved area for the crash kernel */ | 53 | /* Location of the reserved area for the crash kernel */ |
51 | struct resource crashk_res = { | 54 | struct resource crashk_res = { |
52 | .name = "Crash kernel", | 55 | .name = "Crash kernel", |
@@ -1675,7 +1678,9 @@ int kernel_kexec(void) | |||
1675 | } else | 1678 | } else |
1676 | #endif | 1679 | #endif |
1677 | { | 1680 | { |
1681 | kexec_in_progress = true; | ||
1678 | kernel_restart_prepare(NULL); | 1682 | kernel_restart_prepare(NULL); |
1683 | migrate_to_reboot_cpu(); | ||
1679 | printk(KERN_EMERG "Starting new kernel\n"); | 1684 | printk(KERN_EMERG "Starting new kernel\n"); |
1680 | machine_shutdown(); | 1685 | machine_shutdown(); |
1681 | } | 1686 | } |
diff --git a/kernel/power/console.c b/kernel/power/console.c index 463aa6736751..eacb8bd8cab4 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev) | |||
81 | list_for_each_entry(tmp, &pm_vt_switch_list, head) { | 81 | list_for_each_entry(tmp, &pm_vt_switch_list, head) { |
82 | if (tmp->dev == dev) { | 82 | if (tmp->dev == dev) { |
83 | list_del(&tmp->head); | 83 | list_del(&tmp->head); |
84 | kfree(tmp); | ||
84 | break; | 85 | break; |
85 | } | 86 | } |
86 | } | 87 | } |
diff --git a/kernel/reboot.c b/kernel/reboot.c index f813b3474646..662c83fc16b7 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb) | |||
104 | } | 104 | } |
105 | EXPORT_SYMBOL(unregister_reboot_notifier); | 105 | EXPORT_SYMBOL(unregister_reboot_notifier); |
106 | 106 | ||
107 | static void migrate_to_reboot_cpu(void) | 107 | void migrate_to_reboot_cpu(void) |
108 | { | 108 | { |
109 | /* The boot cpu is always logical cpu 0 */ | 109 | /* The boot cpu is always logical cpu 0 */ |
110 | int cpu = reboot_cpu; | 110 | int cpu = reboot_cpu; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85cda20ab2b..a88f4a485c5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); | |||
4902 | static void update_top_cache_domain(int cpu) | 4902 | static void update_top_cache_domain(int cpu) |
4903 | { | 4903 | { |
4904 | struct sched_domain *sd; | 4904 | struct sched_domain *sd; |
4905 | struct sched_domain *busy_sd = NULL; | ||
4905 | int id = cpu; | 4906 | int id = cpu; |
4906 | int size = 1; | 4907 | int size = 1; |
4907 | 4908 | ||
@@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu) | |||
4909 | if (sd) { | 4910 | if (sd) { |
4910 | id = cpumask_first(sched_domain_span(sd)); | 4911 | id = cpumask_first(sched_domain_span(sd)); |
4911 | size = cpumask_weight(sched_domain_span(sd)); | 4912 | size = cpumask_weight(sched_domain_span(sd)); |
4912 | sd = sd->parent; /* sd_busy */ | 4913 | busy_sd = sd->parent; /* sd_busy */ |
4913 | } | 4914 | } |
4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | 4915 | rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); |
4915 | 4916 | ||
4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4917 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
4917 | per_cpu(sd_llc_size, cpu) = size; | 4918 | per_cpu(sd_llc_size, cpu) = size; |
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
5112 | * die on a /0 trap. | 5113 | * die on a /0 trap. |
5113 | */ | 5114 | */ |
5114 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); | 5115 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); |
5116 | sg->sgp->power_orig = sg->sgp->power; | ||
5115 | 5117 | ||
5116 | /* | 5118 | /* |
5117 | * Make sure the first group of this domain contains the | 5119 | * Make sure the first group of this domain contains the |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd773ade1a31..c7395d97e4cb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -178,59 +178,61 @@ void sched_init_granularity(void) | |||
178 | update_sysctl(); | 178 | update_sysctl(); |
179 | } | 179 | } |
180 | 180 | ||
181 | #if BITS_PER_LONG == 32 | 181 | #define WMULT_CONST (~0U) |
182 | # define WMULT_CONST (~0UL) | ||
183 | #else | ||
184 | # define WMULT_CONST (1UL << 32) | ||
185 | #endif | ||
186 | |||
187 | #define WMULT_SHIFT 32 | 182 | #define WMULT_SHIFT 32 |
188 | 183 | ||
189 | /* | 184 | static void __update_inv_weight(struct load_weight *lw) |
190 | * Shift right and round: | 185 | { |
191 | */ | 186 | unsigned long w; |
192 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 187 | |
188 | if (likely(lw->inv_weight)) | ||
189 | return; | ||
190 | |||
191 | w = scale_load_down(lw->weight); | ||
192 | |||
193 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | ||
194 | lw->inv_weight = 1; | ||
195 | else if (unlikely(!w)) | ||
196 | lw->inv_weight = WMULT_CONST; | ||
197 | else | ||
198 | lw->inv_weight = WMULT_CONST / w; | ||
199 | } | ||
193 | 200 | ||
194 | /* | 201 | /* |
195 | * delta *= weight / lw | 202 | * delta_exec * weight / lw.weight |
203 | * OR | ||
204 | * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT | ||
205 | * | ||
206 | * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case | ||
207 | * we're guaranteed shift stays positive because inv_weight is guaranteed to | ||
208 | * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. | ||
209 | * | ||
210 | * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus | ||
211 | * weight/lw.weight <= 1, and therefore our shift will also be positive. | ||
196 | */ | 212 | */ |
197 | static unsigned long | 213 | static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) |
198 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | ||
199 | struct load_weight *lw) | ||
200 | { | 214 | { |
201 | u64 tmp; | 215 | u64 fact = scale_load_down(weight); |
202 | 216 | int shift = WMULT_SHIFT; | |
203 | /* | ||
204 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | ||
205 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than | ||
206 | * 2^SCHED_LOAD_RESOLUTION. | ||
207 | */ | ||
208 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | ||
209 | tmp = (u64)delta_exec * scale_load_down(weight); | ||
210 | else | ||
211 | tmp = (u64)delta_exec; | ||
212 | 217 | ||
213 | if (!lw->inv_weight) { | 218 | __update_inv_weight(lw); |
214 | unsigned long w = scale_load_down(lw->weight); | ||
215 | 219 | ||
216 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | 220 | if (unlikely(fact >> 32)) { |
217 | lw->inv_weight = 1; | 221 | while (fact >> 32) { |
218 | else if (unlikely(!w)) | 222 | fact >>= 1; |
219 | lw->inv_weight = WMULT_CONST; | 223 | shift--; |
220 | else | 224 | } |
221 | lw->inv_weight = WMULT_CONST / w; | ||
222 | } | 225 | } |
223 | 226 | ||
224 | /* | 227 | /* hint to use a 32x32->64 mul */ |
225 | * Check whether we'd overflow the 64-bit multiplication: | 228 | fact = (u64)(u32)fact * lw->inv_weight; |
226 | */ | 229 | |
227 | if (unlikely(tmp > WMULT_CONST)) | 230 | while (fact >> 32) { |
228 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | 231 | fact >>= 1; |
229 | WMULT_SHIFT/2); | 232 | shift--; |
230 | else | 233 | } |
231 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); | ||
232 | 234 | ||
233 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 235 | return mul_u64_u32_shr(delta_exec, fact, shift); |
234 | } | 236 | } |
235 | 237 | ||
236 | 238 | ||
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |||
443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 445 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
444 | 446 | ||
445 | static __always_inline | 447 | static __always_inline |
446 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); | 448 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); |
447 | 449 | ||
448 | /************************************************************** | 450 | /************************************************************** |
449 | * Scheduling class tree data structure manipulation methods: | 451 | * Scheduling class tree data structure manipulation methods: |
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write, | |||
612 | /* | 614 | /* |
613 | * delta /= w | 615 | * delta /= w |
614 | */ | 616 | */ |
615 | static inline unsigned long | 617 | static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) |
616 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | ||
617 | { | 618 | { |
618 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 619 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
619 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 620 | delta = __calc_delta(delta, NICE_0_LOAD, &se->load); |
620 | 621 | ||
621 | return delta; | 622 | return delta; |
622 | } | 623 | } |
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
665 | update_load_add(&lw, se->load.weight); | 666 | update_load_add(&lw, se->load.weight); |
666 | load = &lw; | 667 | load = &lw; |
667 | } | 668 | } |
668 | slice = calc_delta_mine(slice, se->load.weight, load); | 669 | slice = __calc_delta(slice, se->load.weight, load); |
669 | } | 670 | } |
670 | return slice; | 671 | return slice; |
671 | } | 672 | } |
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p) | |||
703 | #endif | 704 | #endif |
704 | 705 | ||
705 | /* | 706 | /* |
706 | * Update the current task's runtime statistics. Skip current tasks that | 707 | * Update the current task's runtime statistics. |
707 | * are not in our scheduling class. | ||
708 | */ | 708 | */ |
709 | static inline void | ||
710 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | ||
711 | unsigned long delta_exec) | ||
712 | { | ||
713 | unsigned long delta_exec_weighted; | ||
714 | |||
715 | schedstat_set(curr->statistics.exec_max, | ||
716 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
717 | |||
718 | curr->sum_exec_runtime += delta_exec; | ||
719 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
720 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | ||
721 | |||
722 | curr->vruntime += delta_exec_weighted; | ||
723 | update_min_vruntime(cfs_rq); | ||
724 | } | ||
725 | |||
726 | static void update_curr(struct cfs_rq *cfs_rq) | 709 | static void update_curr(struct cfs_rq *cfs_rq) |
727 | { | 710 | { |
728 | struct sched_entity *curr = cfs_rq->curr; | 711 | struct sched_entity *curr = cfs_rq->curr; |
729 | u64 now = rq_clock_task(rq_of(cfs_rq)); | 712 | u64 now = rq_clock_task(rq_of(cfs_rq)); |
730 | unsigned long delta_exec; | 713 | u64 delta_exec; |
731 | 714 | ||
732 | if (unlikely(!curr)) | 715 | if (unlikely(!curr)) |
733 | return; | 716 | return; |
734 | 717 | ||
735 | /* | 718 | delta_exec = now - curr->exec_start; |
736 | * Get the amount of time the current task was running | 719 | if (unlikely((s64)delta_exec <= 0)) |
737 | * since the last time we changed load (this cannot | ||
738 | * overflow on 32 bits): | ||
739 | */ | ||
740 | delta_exec = (unsigned long)(now - curr->exec_start); | ||
741 | if (!delta_exec) | ||
742 | return; | 720 | return; |
743 | 721 | ||
744 | __update_curr(cfs_rq, curr, delta_exec); | ||
745 | curr->exec_start = now; | 722 | curr->exec_start = now; |
746 | 723 | ||
724 | schedstat_set(curr->statistics.exec_max, | ||
725 | max(delta_exec, curr->statistics.exec_max)); | ||
726 | |||
727 | curr->sum_exec_runtime += delta_exec; | ||
728 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
729 | |||
730 | curr->vruntime += calc_delta_fair(delta_exec, curr); | ||
731 | update_min_vruntime(cfs_rq); | ||
732 | |||
747 | if (entity_is_task(curr)) { | 733 | if (entity_is_task(curr)) { |
748 | struct task_struct *curtask = task_of(curr); | 734 | struct task_struct *curtask = task_of(curr); |
749 | 735 | ||
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work) | |||
1752 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) | 1738 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) |
1753 | continue; | 1739 | continue; |
1754 | 1740 | ||
1741 | /* | ||
1742 | * Skip inaccessible VMAs to avoid any confusion between | ||
1743 | * PROT_NONE and NUMA hinting ptes | ||
1744 | */ | ||
1745 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
1746 | continue; | ||
1747 | |||
1755 | do { | 1748 | do { |
1756 | start = max(start, vma->vm_start); | 1749 | start = max(start, vma->vm_start); |
1757 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); | 1750 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); |
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
3015 | } | 3008 | } |
3016 | } | 3009 | } |
3017 | 3010 | ||
3018 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3011 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
3019 | unsigned long delta_exec) | ||
3020 | { | 3012 | { |
3021 | /* dock delta_exec before expiring quota (as it could span periods) */ | 3013 | /* dock delta_exec before expiring quota (as it could span periods) */ |
3022 | cfs_rq->runtime_remaining -= delta_exec; | 3014 | cfs_rq->runtime_remaining -= delta_exec; |
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | |||
3034 | } | 3026 | } |
3035 | 3027 | ||
3036 | static __always_inline | 3028 | static __always_inline |
3037 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) | 3029 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
3038 | { | 3030 | { |
3039 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) | 3031 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) |
3040 | return; | 3032 | return; |
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) | |||
3574 | return rq_clock_task(rq_of(cfs_rq)); | 3566 | return rq_clock_task(rq_of(cfs_rq)); |
3575 | } | 3567 | } |
3576 | 3568 | ||
3577 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3569 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} |
3578 | unsigned long delta_exec) {} | ||
3579 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3570 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
3580 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | 3571 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
3581 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3572 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7d57275fc396..1c4065575fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
901 | { | 901 | { |
902 | struct rq *rq = rq_of_rt_rq(rt_rq); | 902 | struct rq *rq = rq_of_rt_rq(rt_rq); |
903 | 903 | ||
904 | #ifdef CONFIG_RT_GROUP_SCHED | ||
905 | /* | ||
906 | * Change rq's cpupri only if rt_rq is the top queue. | ||
907 | */ | ||
908 | if (&rq->rt != rt_rq) | ||
909 | return; | ||
910 | #endif | ||
904 | if (rq->online && prio < prev_prio) | 911 | if (rq->online && prio < prev_prio) |
905 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | 912 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
906 | } | 913 | } |
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
910 | { | 917 | { |
911 | struct rq *rq = rq_of_rt_rq(rt_rq); | 918 | struct rq *rq = rq_of_rt_rq(rt_rq); |
912 | 919 | ||
920 | #ifdef CONFIG_RT_GROUP_SCHED | ||
921 | /* | ||
922 | * Change rq's cpupri only if rt_rq is the top queue. | ||
923 | */ | ||
924 | if (&rq->rt != rt_rq) | ||
925 | return; | ||
926 | #endif | ||
913 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) | 927 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
914 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); | 928 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
915 | } | 929 | } |
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S index 4aef390671cb..3e9868d47535 100644 --- a/kernel/system_certificates.S +++ b/kernel/system_certificates.S | |||
@@ -3,8 +3,18 @@ | |||
3 | 3 | ||
4 | __INITRODATA | 4 | __INITRODATA |
5 | 5 | ||
6 | .align 8 | ||
6 | .globl VMLINUX_SYMBOL(system_certificate_list) | 7 | .globl VMLINUX_SYMBOL(system_certificate_list) |
7 | VMLINUX_SYMBOL(system_certificate_list): | 8 | VMLINUX_SYMBOL(system_certificate_list): |
9 | __cert_list_start: | ||
8 | .incbin "kernel/x509_certificate_list" | 10 | .incbin "kernel/x509_certificate_list" |
9 | .globl VMLINUX_SYMBOL(system_certificate_list_end) | 11 | __cert_list_end: |
10 | VMLINUX_SYMBOL(system_certificate_list_end): | 12 | |
13 | .align 8 | ||
14 | .globl VMLINUX_SYMBOL(system_certificate_list_size) | ||
15 | VMLINUX_SYMBOL(system_certificate_list_size): | ||
16 | #ifdef CONFIG_64BIT | ||
17 | .quad __cert_list_end - __cert_list_start | ||
18 | #else | ||
19 | .long __cert_list_end - __cert_list_start | ||
20 | #endif | ||
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c index 564dd93430a2..52ebc70263f4 100644 --- a/kernel/system_keyring.c +++ b/kernel/system_keyring.c | |||
@@ -22,7 +22,7 @@ struct key *system_trusted_keyring; | |||
22 | EXPORT_SYMBOL_GPL(system_trusted_keyring); | 22 | EXPORT_SYMBOL_GPL(system_trusted_keyring); |
23 | 23 | ||
24 | extern __initconst const u8 system_certificate_list[]; | 24 | extern __initconst const u8 system_certificate_list[]; |
25 | extern __initconst const u8 system_certificate_list_end[]; | 25 | extern __initconst const unsigned long system_certificate_list_size; |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Load the compiled-in keys | 28 | * Load the compiled-in keys |
@@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void) | |||
60 | 60 | ||
61 | pr_notice("Loading compiled-in X.509 certificates\n"); | 61 | pr_notice("Loading compiled-in X.509 certificates\n"); |
62 | 62 | ||
63 | end = system_certificate_list_end; | ||
64 | p = system_certificate_list; | 63 | p = system_certificate_list; |
64 | end = p + system_certificate_list_size; | ||
65 | while (p < end) { | 65 | while (p < end) { |
66 | /* Each cert begins with an ASN.1 SEQUENCE tag and must be more | 66 | /* Each cert begins with an ASN.1 SEQUENCE tag and must be more |
67 | * than 256 bytes in size. | 67 | * than 256 bytes in size. |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9f9eaade2f..72a0f81dc5a8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -775,7 +775,7 @@ static int ftrace_profile_init(void) | |||
775 | int cpu; | 775 | int cpu; |
776 | int ret = 0; | 776 | int ret = 0; |
777 | 777 | ||
778 | for_each_online_cpu(cpu) { | 778 | for_each_possible_cpu(cpu) { |
779 | ret = ftrace_profile_init_cpu(cpu); | 779 | ret = ftrace_profile_init_cpu(cpu); |
780 | if (ret) | 780 | if (ret) |
781 | break; | 781 | break; |
diff --git a/kernel/user.c b/kernel/user.c index a3a0dbfda329..c006131beb77 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = { | |||
51 | .owner = GLOBAL_ROOT_UID, | 51 | .owner = GLOBAL_ROOT_UID, |
52 | .group = GLOBAL_ROOT_GID, | 52 | .group = GLOBAL_ROOT_GID, |
53 | .proc_inum = PROC_USER_INIT_INO, | 53 | .proc_inum = PROC_USER_INIT_INO, |
54 | #ifdef CONFIG_KEYS_KERBEROS_CACHE | 54 | #ifdef CONFIG_PERSISTENT_KEYRINGS |
55 | .krb_cache_register_sem = | 55 | .persistent_keyring_register_sem = |
56 | __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), | 56 | __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), |
57 | #endif | 57 | #endif |
58 | }; | 58 | }; |
59 | EXPORT_SYMBOL_GPL(init_user_ns); | 59 | EXPORT_SYMBOL_GPL(init_user_ns); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c66912be990f..b010eac595d2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2851,19 +2851,6 @@ already_gone: | |||
2851 | return false; | 2851 | return false; |
2852 | } | 2852 | } |
2853 | 2853 | ||
2854 | static bool __flush_work(struct work_struct *work) | ||
2855 | { | ||
2856 | struct wq_barrier barr; | ||
2857 | |||
2858 | if (start_flush_work(work, &barr)) { | ||
2859 | wait_for_completion(&barr.done); | ||
2860 | destroy_work_on_stack(&barr.work); | ||
2861 | return true; | ||
2862 | } else { | ||
2863 | return false; | ||
2864 | } | ||
2865 | } | ||
2866 | |||
2867 | /** | 2854 | /** |
2868 | * flush_work - wait for a work to finish executing the last queueing instance | 2855 | * flush_work - wait for a work to finish executing the last queueing instance |
2869 | * @work: the work to flush | 2856 | * @work: the work to flush |
@@ -2877,10 +2864,18 @@ static bool __flush_work(struct work_struct *work) | |||
2877 | */ | 2864 | */ |
2878 | bool flush_work(struct work_struct *work) | 2865 | bool flush_work(struct work_struct *work) |
2879 | { | 2866 | { |
2867 | struct wq_barrier barr; | ||
2868 | |||
2880 | lock_map_acquire(&work->lockdep_map); | 2869 | lock_map_acquire(&work->lockdep_map); |
2881 | lock_map_release(&work->lockdep_map); | 2870 | lock_map_release(&work->lockdep_map); |
2882 | 2871 | ||
2883 | return __flush_work(work); | 2872 | if (start_flush_work(work, &barr)) { |
2873 | wait_for_completion(&barr.done); | ||
2874 | destroy_work_on_stack(&barr.work); | ||
2875 | return true; | ||
2876 | } else { | ||
2877 | return false; | ||
2878 | } | ||
2884 | } | 2879 | } |
2885 | EXPORT_SYMBOL_GPL(flush_work); | 2880 | EXPORT_SYMBOL_GPL(flush_work); |
2886 | 2881 | ||
@@ -4832,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | |||
4832 | 4827 | ||
4833 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); | 4828 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
4834 | schedule_work_on(cpu, &wfc.work); | 4829 | schedule_work_on(cpu, &wfc.work); |
4835 | 4830 | flush_work(&wfc.work); | |
4836 | /* | ||
4837 | * The work item is on-stack and can't lead to deadlock through | ||
4838 | * flushing. Use __flush_work() to avoid spurious lockdep warnings | ||
4839 | * when work_on_cpu()s are nested. | ||
4840 | */ | ||
4841 | __flush_work(&wfc.work); | ||
4842 | |||
4843 | return wfc.ret; | 4831 | return wfc.ret; |
4844 | } | 4832 | } |
4845 | EXPORT_SYMBOL_GPL(work_on_cpu); | 4833 | EXPORT_SYMBOL_GPL(work_on_cpu); |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 17edeaf19180..1b6a44f1ec3e 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -759,8 +759,8 @@ all_leaves_cluster_together: | |||
759 | pr_devel("all leaves cluster together\n"); | 759 | pr_devel("all leaves cluster together\n"); |
760 | diff = INT_MAX; | 760 | diff = INT_MAX; |
761 | for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { | 761 | for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { |
762 | int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf), | 762 | int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]), |
763 | assoc_array_ptr_to_leaf(node->slots[i])); | 763 | index_key); |
764 | if (x < diff) { | 764 | if (x < diff) { |
765 | BUG_ON(x < 0); | 765 | BUG_ON(x < 0); |
766 | diff = x; | 766 | diff = x; |
diff --git a/mm/Kconfig b/mm/Kconfig index eb69f352401d..723bbe04a0b0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -543,7 +543,7 @@ config ZSWAP | |||
543 | 543 | ||
544 | config MEM_SOFT_DIRTY | 544 | config MEM_SOFT_DIRTY |
545 | bool "Track memory changes" | 545 | bool "Track memory changes" |
546 | depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY | 546 | depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS |
547 | select PROC_PAGE_MONITOR | 547 | select PROC_PAGE_MONITOR |
548 | help | 548 | help |
549 | This option enables memory changes tracking by introducing a | 549 | This option enables memory changes tracking by introducing a |
diff --git a/mm/compaction.c b/mm/compaction.c index 805165bcd3dd..f58bcd016f43 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc, | |||
134 | bool migrate_scanner) | 134 | bool migrate_scanner) |
135 | { | 135 | { |
136 | struct zone *zone = cc->zone; | 136 | struct zone *zone = cc->zone; |
137 | |||
138 | if (cc->ignore_skip_hint) | ||
139 | return; | ||
140 | |||
137 | if (!page) | 141 | if (!page) |
138 | return; | 142 | return; |
139 | 143 | ||
diff --git a/mm/fremap.c b/mm/fremap.c index 5bff08147768..bbc4d660221a 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -208,9 +208,10 @@ get_write_lock: | |||
208 | if (mapping_cap_account_dirty(mapping)) { | 208 | if (mapping_cap_account_dirty(mapping)) { |
209 | unsigned long addr; | 209 | unsigned long addr; |
210 | struct file *file = get_file(vma->vm_file); | 210 | struct file *file = get_file(vma->vm_file); |
211 | /* mmap_region may free vma; grab the info now */ | ||
212 | vm_flags = vma->vm_flags; | ||
211 | 213 | ||
212 | addr = mmap_region(file, start, size, | 214 | addr = mmap_region(file, start, size, vm_flags, pgoff); |
213 | vma->vm_flags, pgoff); | ||
214 | fput(file); | 215 | fput(file); |
215 | if (IS_ERR_VALUE(addr)) { | 216 | if (IS_ERR_VALUE(addr)) { |
216 | err = addr; | 217 | err = addr; |
@@ -218,7 +219,7 @@ get_write_lock: | |||
218 | BUG_ON(addr != start); | 219 | BUG_ON(addr != start); |
219 | err = 0; | 220 | err = 0; |
220 | } | 221 | } |
221 | goto out; | 222 | goto out_freed; |
222 | } | 223 | } |
223 | mutex_lock(&mapping->i_mmap_mutex); | 224 | mutex_lock(&mapping->i_mmap_mutex); |
224 | flush_dcache_mmap_lock(mapping); | 225 | flush_dcache_mmap_lock(mapping); |
@@ -253,6 +254,7 @@ get_write_lock: | |||
253 | out: | 254 | out: |
254 | if (vma) | 255 | if (vma) |
255 | vm_flags = vma->vm_flags; | 256 | vm_flags = vma->vm_flags; |
257 | out_freed: | ||
256 | if (likely(!has_write_lock)) | 258 | if (likely(!has_write_lock)) |
257 | up_read(&mm->mmap_sem); | 259 | up_read(&mm->mmap_sem); |
258 | else | 260 | else |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bccd5a628ea6..95d1acb0f3d2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -882,6 +882,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
882 | ret = 0; | 882 | ret = 0; |
883 | goto out_unlock; | 883 | goto out_unlock; |
884 | } | 884 | } |
885 | |||
885 | if (unlikely(pmd_trans_splitting(pmd))) { | 886 | if (unlikely(pmd_trans_splitting(pmd))) { |
886 | /* split huge page running from under us */ | 887 | /* split huge page running from under us */ |
887 | spin_unlock(src_ptl); | 888 | spin_unlock(src_ptl); |
@@ -1153,7 +1154,7 @@ alloc: | |||
1153 | new_page = NULL; | 1154 | new_page = NULL; |
1154 | 1155 | ||
1155 | if (unlikely(!new_page)) { | 1156 | if (unlikely(!new_page)) { |
1156 | if (is_huge_zero_pmd(orig_pmd)) { | 1157 | if (!page) { |
1157 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, | 1158 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, |
1158 | address, pmd, orig_pmd, haddr); | 1159 | address, pmd, orig_pmd, haddr); |
1159 | } else { | 1160 | } else { |
@@ -1180,7 +1181,7 @@ alloc: | |||
1180 | 1181 | ||
1181 | count_vm_event(THP_FAULT_ALLOC); | 1182 | count_vm_event(THP_FAULT_ALLOC); |
1182 | 1183 | ||
1183 | if (is_huge_zero_pmd(orig_pmd)) | 1184 | if (!page) |
1184 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); | 1185 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |
1185 | else | 1186 | else |
1186 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); | 1187 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
@@ -1206,7 +1207,7 @@ alloc: | |||
1206 | page_add_new_anon_rmap(new_page, vma, haddr); | 1207 | page_add_new_anon_rmap(new_page, vma, haddr); |
1207 | set_pmd_at(mm, haddr, pmd, entry); | 1208 | set_pmd_at(mm, haddr, pmd, entry); |
1208 | update_mmu_cache_pmd(vma, address, pmd); | 1209 | update_mmu_cache_pmd(vma, address, pmd); |
1209 | if (is_huge_zero_pmd(orig_pmd)) { | 1210 | if (!page) { |
1210 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); | 1211 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
1211 | put_huge_zero_page(); | 1212 | put_huge_zero_page(); |
1212 | } else { | 1213 | } else { |
@@ -1243,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | |||
1243 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) | 1244 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) |
1244 | return ERR_PTR(-EFAULT); | 1245 | return ERR_PTR(-EFAULT); |
1245 | 1246 | ||
1247 | /* Full NUMA hinting faults to serialise migration in fault paths */ | ||
1248 | if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) | ||
1249 | goto out; | ||
1250 | |||
1246 | page = pmd_page(*pmd); | 1251 | page = pmd_page(*pmd); |
1247 | VM_BUG_ON(!PageHead(page)); | 1252 | VM_BUG_ON(!PageHead(page)); |
1248 | if (flags & FOLL_TOUCH) { | 1253 | if (flags & FOLL_TOUCH) { |
@@ -1295,6 +1300,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1295 | if (unlikely(!pmd_same(pmd, *pmdp))) | 1300 | if (unlikely(!pmd_same(pmd, *pmdp))) |
1296 | goto out_unlock; | 1301 | goto out_unlock; |
1297 | 1302 | ||
1303 | /* | ||
1304 | * If there are potential migrations, wait for completion and retry | ||
1305 | * without disrupting NUMA hinting information. Do not relock and | ||
1306 | * check_same as the page may no longer be mapped. | ||
1307 | */ | ||
1308 | if (unlikely(pmd_trans_migrating(*pmdp))) { | ||
1309 | spin_unlock(ptl); | ||
1310 | wait_migrate_huge_page(vma->anon_vma, pmdp); | ||
1311 | goto out; | ||
1312 | } | ||
1313 | |||
1298 | page = pmd_page(pmd); | 1314 | page = pmd_page(pmd); |
1299 | BUG_ON(is_huge_zero_page(page)); | 1315 | BUG_ON(is_huge_zero_page(page)); |
1300 | page_nid = page_to_nid(page); | 1316 | page_nid = page_to_nid(page); |
@@ -1323,23 +1339,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1323 | /* If the page was locked, there are no parallel migrations */ | 1339 | /* If the page was locked, there are no parallel migrations */ |
1324 | if (page_locked) | 1340 | if (page_locked) |
1325 | goto clear_pmdnuma; | 1341 | goto clear_pmdnuma; |
1342 | } | ||
1326 | 1343 | ||
1327 | /* | 1344 | /* Migration could have started since the pmd_trans_migrating check */ |
1328 | * Otherwise wait for potential migrations and retry. We do | 1345 | if (!page_locked) { |
1329 | * relock and check_same as the page may no longer be mapped. | ||
1330 | * As the fault is being retried, do not account for it. | ||
1331 | */ | ||
1332 | spin_unlock(ptl); | 1346 | spin_unlock(ptl); |
1333 | wait_on_page_locked(page); | 1347 | wait_on_page_locked(page); |
1334 | page_nid = -1; | 1348 | page_nid = -1; |
1335 | goto out; | 1349 | goto out; |
1336 | } | 1350 | } |
1337 | 1351 | ||
1338 | /* Page is misplaced, serialise migrations and parallel THP splits */ | 1352 | /* |
1353 | * Page is misplaced. Page lock serialises migrations. Acquire anon_vma | ||
1354 | * to serialises splits | ||
1355 | */ | ||
1339 | get_page(page); | 1356 | get_page(page); |
1340 | spin_unlock(ptl); | 1357 | spin_unlock(ptl); |
1341 | if (!page_locked) | ||
1342 | lock_page(page); | ||
1343 | anon_vma = page_lock_anon_vma_read(page); | 1358 | anon_vma = page_lock_anon_vma_read(page); |
1344 | 1359 | ||
1345 | /* Confirm the PMD did not change while page_table_lock was released */ | 1360 | /* Confirm the PMD did not change while page_table_lock was released */ |
@@ -1351,6 +1366,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1351 | goto out_unlock; | 1366 | goto out_unlock; |
1352 | } | 1367 | } |
1353 | 1368 | ||
1369 | /* Bail if we fail to protect against THP splits for any reason */ | ||
1370 | if (unlikely(!anon_vma)) { | ||
1371 | put_page(page); | ||
1372 | page_nid = -1; | ||
1373 | goto clear_pmdnuma; | ||
1374 | } | ||
1375 | |||
1354 | /* | 1376 | /* |
1355 | * Migrate the THP to the requested node, returns with page unlocked | 1377 | * Migrate the THP to the requested node, returns with page unlocked |
1356 | * and pmd_numa cleared. | 1378 | * and pmd_numa cleared. |
@@ -1481,8 +1503,18 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, | |||
1481 | pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); | 1503 | pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); |
1482 | VM_BUG_ON(!pmd_none(*new_pmd)); | 1504 | VM_BUG_ON(!pmd_none(*new_pmd)); |
1483 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); | 1505 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); |
1484 | if (new_ptl != old_ptl) | 1506 | if (new_ptl != old_ptl) { |
1507 | pgtable_t pgtable; | ||
1508 | |||
1509 | /* | ||
1510 | * Move preallocated PTE page table if new_pmd is on | ||
1511 | * different PMD page table. | ||
1512 | */ | ||
1513 | pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); | ||
1514 | pgtable_trans_huge_deposit(mm, new_pmd, pgtable); | ||
1515 | |||
1485 | spin_unlock(new_ptl); | 1516 | spin_unlock(new_ptl); |
1517 | } | ||
1486 | spin_unlock(old_ptl); | 1518 | spin_unlock(old_ptl); |
1487 | } | 1519 | } |
1488 | out: | 1520 | out: |
@@ -1507,6 +1539,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1507 | ret = 1; | 1539 | ret = 1; |
1508 | if (!prot_numa) { | 1540 | if (!prot_numa) { |
1509 | entry = pmdp_get_and_clear(mm, addr, pmd); | 1541 | entry = pmdp_get_and_clear(mm, addr, pmd); |
1542 | if (pmd_numa(entry)) | ||
1543 | entry = pmd_mknonnuma(entry); | ||
1510 | entry = pmd_modify(entry, newprot); | 1544 | entry = pmd_modify(entry, newprot); |
1511 | ret = HPAGE_PMD_NR; | 1545 | ret = HPAGE_PMD_NR; |
1512 | BUG_ON(pmd_write(entry)); | 1546 | BUG_ON(pmd_write(entry)); |
@@ -1521,7 +1555,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1521 | */ | 1555 | */ |
1522 | if (!is_huge_zero_page(page) && | 1556 | if (!is_huge_zero_page(page) && |
1523 | !pmd_numa(*pmd)) { | 1557 | !pmd_numa(*pmd)) { |
1524 | entry = pmdp_get_and_clear(mm, addr, pmd); | 1558 | entry = *pmd; |
1525 | entry = pmd_mknuma(entry); | 1559 | entry = pmd_mknuma(entry); |
1526 | ret = HPAGE_PMD_NR; | 1560 | ret = HPAGE_PMD_NR; |
1527 | } | 1561 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f1a0ae6e11b8..7f1a356153c0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -338,7 +338,7 @@ struct mem_cgroup { | |||
338 | static size_t memcg_size(void) | 338 | static size_t memcg_size(void) |
339 | { | 339 | { |
340 | return sizeof(struct mem_cgroup) + | 340 | return sizeof(struct mem_cgroup) + |
341 | nr_node_ids * sizeof(struct mem_cgroup_per_node); | 341 | nr_node_ids * sizeof(struct mem_cgroup_per_node *); |
342 | } | 342 | } |
343 | 343 | ||
344 | /* internal only representation about the status of kmem accounting. */ | 344 | /* internal only representation about the status of kmem accounting. */ |
@@ -2694,7 +2694,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
2694 | goto bypass; | 2694 | goto bypass; |
2695 | 2695 | ||
2696 | if (unlikely(task_in_memcg_oom(current))) | 2696 | if (unlikely(task_in_memcg_oom(current))) |
2697 | goto bypass; | 2697 | goto nomem; |
2698 | |||
2699 | if (gfp_mask & __GFP_NOFAIL) | ||
2700 | oom = false; | ||
2698 | 2701 | ||
2699 | /* | 2702 | /* |
2700 | * We always charge the cgroup the mm_struct belongs to. | 2703 | * We always charge the cgroup the mm_struct belongs to. |
@@ -6352,6 +6355,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
6352 | static void mem_cgroup_css_free(struct cgroup_subsys_state *css) | 6355 | static void mem_cgroup_css_free(struct cgroup_subsys_state *css) |
6353 | { | 6356 | { |
6354 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 6357 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
6358 | /* | ||
6359 | * XXX: css_offline() would be where we should reparent all | ||
6360 | * memory to prepare the cgroup for destruction. However, | ||
6361 | * memcg does not do css_tryget() and res_counter charging | ||
6362 | * under the same RCU lock region, which means that charging | ||
6363 | * could race with offlining. Offlining only happens to | ||
6364 | * cgroups with no tasks in them but charges can show up | ||
6365 | * without any tasks from the swapin path when the target | ||
6366 | * memcg is looked up from the swapout record and not from the | ||
6367 | * current task as it usually is. A race like this can leak | ||
6368 | * charges and put pages with stale cgroup pointers into | ||
6369 | * circulation: | ||
6370 | * | ||
6371 | * #0 #1 | ||
6372 | * lookup_swap_cgroup_id() | ||
6373 | * rcu_read_lock() | ||
6374 | * mem_cgroup_lookup() | ||
6375 | * css_tryget() | ||
6376 | * rcu_read_unlock() | ||
6377 | * disable css_tryget() | ||
6378 | * call_rcu() | ||
6379 | * offline_css() | ||
6380 | * reparent_charges() | ||
6381 | * res_counter_charge() | ||
6382 | * css_put() | ||
6383 | * css_free() | ||
6384 | * pc->mem_cgroup = dead memcg | ||
6385 | * add page to lru | ||
6386 | * | ||
6387 | * The bulk of the charges are still moved in offline_css() to | ||
6388 | * avoid pinning a lot of pages in case a long-term reference | ||
6389 | * like a swapout record is deferring the css_free() to long | ||
6390 | * after offlining. But this makes sure we catch any charges | ||
6391 | * made after offlining: | ||
6392 | */ | ||
6393 | mem_cgroup_reparent_charges(memcg); | ||
6355 | 6394 | ||
6356 | memcg_destroy_kmem(memcg); | 6395 | memcg_destroy_kmem(memcg); |
6357 | __mem_cgroup_free(memcg); | 6396 | __mem_cgroup_free(memcg); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b7c171602ba1..fabe55046c1d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
938 | BUG_ON(!PageHWPoison(p)); | 938 | BUG_ON(!PageHWPoison(p)); |
939 | return SWAP_FAIL; | 939 | return SWAP_FAIL; |
940 | } | 940 | } |
941 | /* | ||
942 | * We pinned the head page for hwpoison handling, | ||
943 | * now we split the thp and we are interested in | ||
944 | * the hwpoisoned raw page, so move the refcount | ||
945 | * to it. | ||
946 | */ | ||
947 | if (hpage != p) { | ||
948 | put_page(hpage); | ||
949 | get_page(p); | ||
950 | } | ||
941 | /* THP is split, so ppage should be the real poisoned page. */ | 951 | /* THP is split, so ppage should be the real poisoned page. */ |
942 | ppage = p; | 952 | ppage = p; |
943 | } | 953 | } |
@@ -1505,10 +1515,16 @@ static int soft_offline_huge_page(struct page *page, int flags) | |||
1505 | if (ret > 0) | 1515 | if (ret > 0) |
1506 | ret = -EIO; | 1516 | ret = -EIO; |
1507 | } else { | 1517 | } else { |
1508 | set_page_hwpoison_huge_page(hpage); | 1518 | /* overcommit hugetlb page will be freed to buddy */ |
1509 | dequeue_hwpoisoned_huge_page(hpage); | 1519 | if (PageHuge(page)) { |
1510 | atomic_long_add(1 << compound_order(hpage), | 1520 | set_page_hwpoison_huge_page(hpage); |
1511 | &num_poisoned_pages); | 1521 | dequeue_hwpoisoned_huge_page(hpage); |
1522 | atomic_long_add(1 << compound_order(hpage), | ||
1523 | &num_poisoned_pages); | ||
1524 | } else { | ||
1525 | SetPageHWPoison(page); | ||
1526 | atomic_long_inc(&num_poisoned_pages); | ||
1527 | } | ||
1512 | } | 1528 | } |
1513 | return ret; | 1529 | return ret; |
1514 | } | 1530 | } |
diff --git a/mm/memory.c b/mm/memory.c index 5d9025f3b3e1..6768ce9e57d2 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src, | |||
4271 | } | 4271 | } |
4272 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 4272 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
4273 | 4273 | ||
4274 | #if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS | 4274 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS |
4275 | bool ptlock_alloc(struct page *page) | 4275 | bool ptlock_alloc(struct page *page) |
4276 | { | 4276 | { |
4277 | spinlock_t *ptl; | 4277 | spinlock_t *ptl; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eca4a3129129..0cd2c4d4e270 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int * | |||
1197 | break; | 1197 | break; |
1198 | vma = vma->vm_next; | 1198 | vma = vma->vm_next; |
1199 | } | 1199 | } |
1200 | |||
1201 | if (PageHuge(page)) { | ||
1202 | if (vma) | ||
1203 | return alloc_huge_page_noerr(vma, address, 1); | ||
1204 | else | ||
1205 | return NULL; | ||
1206 | } | ||
1200 | /* | 1207 | /* |
1201 | * queue_pages_range() confirms that @page belongs to some vma, | 1208 | * if !vma, alloc_page_vma() will use task or system default policy |
1202 | * so vma shouldn't be NULL. | ||
1203 | */ | 1209 | */ |
1204 | BUG_ON(!vma); | ||
1205 | |||
1206 | if (PageHuge(page)) | ||
1207 | return alloc_huge_page_noerr(vma, address, 1); | ||
1208 | return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 1210 | return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
1209 | } | 1211 | } |
1210 | #else | 1212 | #else |
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1318 | if (nr_failed && (flags & MPOL_MF_STRICT)) | 1320 | if (nr_failed && (flags & MPOL_MF_STRICT)) |
1319 | err = -EIO; | 1321 | err = -EIO; |
1320 | } else | 1322 | } else |
1321 | putback_lru_pages(&pagelist); | 1323 | putback_movable_pages(&pagelist); |
1322 | 1324 | ||
1323 | up_write(&mm->mmap_sem); | 1325 | up_write(&mm->mmap_sem); |
1324 | mpol_out: | 1326 | mpol_out: |
diff --git a/mm/migrate.c b/mm/migrate.c index bb940045fe85..9194375b2307 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/hugetlb_cgroup.h> | 36 | #include <linux/hugetlb_cgroup.h> |
37 | #include <linux/gfp.h> | 37 | #include <linux/gfp.h> |
38 | #include <linux/balloon_compaction.h> | 38 | #include <linux/balloon_compaction.h> |
39 | #include <linux/mmu_notifier.h> | ||
39 | 40 | ||
40 | #include <asm/tlbflush.h> | 41 | #include <asm/tlbflush.h> |
41 | 42 | ||
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
316 | */ | 317 | */ |
317 | int migrate_page_move_mapping(struct address_space *mapping, | 318 | int migrate_page_move_mapping(struct address_space *mapping, |
318 | struct page *newpage, struct page *page, | 319 | struct page *newpage, struct page *page, |
319 | struct buffer_head *head, enum migrate_mode mode) | 320 | struct buffer_head *head, enum migrate_mode mode, |
321 | int extra_count) | ||
320 | { | 322 | { |
321 | int expected_count = 0; | 323 | int expected_count = 1 + extra_count; |
322 | void **pslot; | 324 | void **pslot; |
323 | 325 | ||
324 | if (!mapping) { | 326 | if (!mapping) { |
325 | /* Anonymous page without mapping */ | 327 | /* Anonymous page without mapping */ |
326 | if (page_count(page) != 1) | 328 | if (page_count(page) != expected_count) |
327 | return -EAGAIN; | 329 | return -EAGAIN; |
328 | return MIGRATEPAGE_SUCCESS; | 330 | return MIGRATEPAGE_SUCCESS; |
329 | } | 331 | } |
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
333 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 335 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
334 | page_index(page)); | 336 | page_index(page)); |
335 | 337 | ||
336 | expected_count = 2 + page_has_private(page); | 338 | expected_count += 1 + page_has_private(page); |
337 | if (page_count(page) != expected_count || | 339 | if (page_count(page) != expected_count || |
338 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 340 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { |
339 | spin_unlock_irq(&mapping->tree_lock); | 341 | spin_unlock_irq(&mapping->tree_lock); |
@@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping, | |||
583 | 585 | ||
584 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | 586 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
585 | 587 | ||
586 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); | 588 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); |
587 | 589 | ||
588 | if (rc != MIGRATEPAGE_SUCCESS) | 590 | if (rc != MIGRATEPAGE_SUCCESS) |
589 | return rc; | 591 | return rc; |
@@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping, | |||
610 | 612 | ||
611 | head = page_buffers(page); | 613 | head = page_buffers(page); |
612 | 614 | ||
613 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); | 615 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); |
614 | 616 | ||
615 | if (rc != MIGRATEPAGE_SUCCESS) | 617 | if (rc != MIGRATEPAGE_SUCCESS) |
616 | return rc; | 618 | return rc; |
@@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) | |||
1654 | return 1; | 1656 | return 1; |
1655 | } | 1657 | } |
1656 | 1658 | ||
1659 | bool pmd_trans_migrating(pmd_t pmd) | ||
1660 | { | ||
1661 | struct page *page = pmd_page(pmd); | ||
1662 | return PageLocked(page); | ||
1663 | } | ||
1664 | |||
1665 | void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) | ||
1666 | { | ||
1667 | struct page *page = pmd_page(*pmd); | ||
1668 | wait_on_page_locked(page); | ||
1669 | } | ||
1670 | |||
1657 | /* | 1671 | /* |
1658 | * Attempt to migrate a misplaced page to the specified destination | 1672 | * Attempt to migrate a misplaced page to the specified destination |
1659 | * node. Caller is expected to have an elevated reference count on | 1673 | * node. Caller is expected to have an elevated reference count on |
@@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1716 | struct page *page, int node) | 1730 | struct page *page, int node) |
1717 | { | 1731 | { |
1718 | spinlock_t *ptl; | 1732 | spinlock_t *ptl; |
1719 | unsigned long haddr = address & HPAGE_PMD_MASK; | ||
1720 | pg_data_t *pgdat = NODE_DATA(node); | 1733 | pg_data_t *pgdat = NODE_DATA(node); |
1721 | int isolated = 0; | 1734 | int isolated = 0; |
1722 | struct page *new_page = NULL; | 1735 | struct page *new_page = NULL; |
1723 | struct mem_cgroup *memcg = NULL; | 1736 | struct mem_cgroup *memcg = NULL; |
1724 | int page_lru = page_is_file_cache(page); | 1737 | int page_lru = page_is_file_cache(page); |
1738 | unsigned long mmun_start = address & HPAGE_PMD_MASK; | ||
1739 | unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; | ||
1740 | pmd_t orig_entry; | ||
1725 | 1741 | ||
1726 | /* | 1742 | /* |
1727 | * Rate-limit the amount of data that is being migrated to a node. | 1743 | * Rate-limit the amount of data that is being migrated to a node. |
@@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1744 | goto out_fail; | 1760 | goto out_fail; |
1745 | } | 1761 | } |
1746 | 1762 | ||
1763 | if (mm_tlb_flush_pending(mm)) | ||
1764 | flush_tlb_range(vma, mmun_start, mmun_end); | ||
1765 | |||
1747 | /* Prepare a page as a migration target */ | 1766 | /* Prepare a page as a migration target */ |
1748 | __set_page_locked(new_page); | 1767 | __set_page_locked(new_page); |
1749 | SetPageSwapBacked(new_page); | 1768 | SetPageSwapBacked(new_page); |
@@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1755 | WARN_ON(PageLRU(new_page)); | 1774 | WARN_ON(PageLRU(new_page)); |
1756 | 1775 | ||
1757 | /* Recheck the target PMD */ | 1776 | /* Recheck the target PMD */ |
1777 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
1758 | ptl = pmd_lock(mm, pmd); | 1778 | ptl = pmd_lock(mm, pmd); |
1759 | if (unlikely(!pmd_same(*pmd, entry))) { | 1779 | if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { |
1780 | fail_putback: | ||
1760 | spin_unlock(ptl); | 1781 | spin_unlock(ptl); |
1782 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
1761 | 1783 | ||
1762 | /* Reverse changes made by migrate_page_copy() */ | 1784 | /* Reverse changes made by migrate_page_copy() */ |
1763 | if (TestClearPageActive(new_page)) | 1785 | if (TestClearPageActive(new_page)) |
@@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1774 | putback_lru_page(page); | 1796 | putback_lru_page(page); |
1775 | mod_zone_page_state(page_zone(page), | 1797 | mod_zone_page_state(page_zone(page), |
1776 | NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); | 1798 | NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); |
1777 | goto out_fail; | 1799 | |
1800 | goto out_unlock; | ||
1778 | } | 1801 | } |
1779 | 1802 | ||
1780 | /* | 1803 | /* |
@@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1786 | */ | 1809 | */ |
1787 | mem_cgroup_prepare_migration(page, new_page, &memcg); | 1810 | mem_cgroup_prepare_migration(page, new_page, &memcg); |
1788 | 1811 | ||
1812 | orig_entry = *pmd; | ||
1789 | entry = mk_pmd(new_page, vma->vm_page_prot); | 1813 | entry = mk_pmd(new_page, vma->vm_page_prot); |
1790 | entry = pmd_mknonnuma(entry); | ||
1791 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
1792 | entry = pmd_mkhuge(entry); | 1814 | entry = pmd_mkhuge(entry); |
1815 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
1793 | 1816 | ||
1794 | pmdp_clear_flush(vma, haddr, pmd); | 1817 | /* |
1795 | set_pmd_at(mm, haddr, pmd, entry); | 1818 | * Clear the old entry under pagetable lock and establish the new PTE. |
1796 | page_add_new_anon_rmap(new_page, vma, haddr); | 1819 | * Any parallel GUP will either observe the old page blocking on the |
1820 | * page lock, block on the page table lock or observe the new page. | ||
1821 | * The SetPageUptodate on the new page and page_add_new_anon_rmap | ||
1822 | * guarantee the copy is visible before the pagetable update. | ||
1823 | */ | ||
1824 | flush_cache_range(vma, mmun_start, mmun_end); | ||
1825 | page_add_new_anon_rmap(new_page, vma, mmun_start); | ||
1826 | pmdp_clear_flush(vma, mmun_start, pmd); | ||
1827 | set_pmd_at(mm, mmun_start, pmd, entry); | ||
1828 | flush_tlb_range(vma, mmun_start, mmun_end); | ||
1797 | update_mmu_cache_pmd(vma, address, &entry); | 1829 | update_mmu_cache_pmd(vma, address, &entry); |
1830 | |||
1831 | if (page_count(page) != 2) { | ||
1832 | set_pmd_at(mm, mmun_start, pmd, orig_entry); | ||
1833 | flush_tlb_range(vma, mmun_start, mmun_end); | ||
1834 | update_mmu_cache_pmd(vma, address, &entry); | ||
1835 | page_remove_rmap(new_page); | ||
1836 | goto fail_putback; | ||
1837 | } | ||
1838 | |||
1798 | page_remove_rmap(page); | 1839 | page_remove_rmap(page); |
1840 | |||
1799 | /* | 1841 | /* |
1800 | * Finish the charge transaction under the page table lock to | 1842 | * Finish the charge transaction under the page table lock to |
1801 | * prevent split_huge_page() from dividing up the charge | 1843 | * prevent split_huge_page() from dividing up the charge |
@@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1803 | */ | 1845 | */ |
1804 | mem_cgroup_end_migration(memcg, page, new_page, true); | 1846 | mem_cgroup_end_migration(memcg, page, new_page, true); |
1805 | spin_unlock(ptl); | 1847 | spin_unlock(ptl); |
1848 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
1806 | 1849 | ||
1807 | unlock_page(new_page); | 1850 | unlock_page(new_page); |
1808 | unlock_page(page); | 1851 | unlock_page(page); |
@@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1820 | out_fail: | 1863 | out_fail: |
1821 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); | 1864 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); |
1822 | out_dropref: | 1865 | out_dropref: |
1823 | entry = pmd_mknonnuma(entry); | 1866 | ptl = pmd_lock(mm, pmd); |
1824 | set_pmd_at(mm, haddr, pmd, entry); | 1867 | if (pmd_same(*pmd, entry)) { |
1825 | update_mmu_cache_pmd(vma, address, &entry); | 1868 | entry = pmd_mknonnuma(entry); |
1869 | set_pmd_at(mm, mmun_start, pmd, entry); | ||
1870 | update_mmu_cache_pmd(vma, address, &entry); | ||
1871 | } | ||
1872 | spin_unlock(ptl); | ||
1826 | 1873 | ||
1874 | out_unlock: | ||
1827 | unlock_page(page); | 1875 | unlock_page(page); |
1828 | put_page(page); | 1876 | put_page(page); |
1829 | return 0; | 1877 | return 0; |
diff --git a/mm/mlock.c b/mm/mlock.c index d480cd6fc475..192e6eebe4f2 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page) | |||
133 | 133 | ||
134 | /** | 134 | /** |
135 | * munlock_vma_page - munlock a vma page | 135 | * munlock_vma_page - munlock a vma page |
136 | * @page - page to be unlocked | 136 | * @page - page to be unlocked, either a normal page or THP page head |
137 | * | ||
138 | * returns the size of the page as a page mask (0 for normal page, | ||
139 | * HPAGE_PMD_NR - 1 for THP head page) | ||
137 | * | 140 | * |
138 | * called from munlock()/munmap() path with page supposedly on the LRU. | 141 | * called from munlock()/munmap() path with page supposedly on the LRU. |
139 | * When we munlock a page, because the vma where we found the page is being | 142 | * When we munlock a page, because the vma where we found the page is being |
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page) | |||
148 | */ | 151 | */ |
149 | unsigned int munlock_vma_page(struct page *page) | 152 | unsigned int munlock_vma_page(struct page *page) |
150 | { | 153 | { |
151 | unsigned int page_mask = 0; | 154 | unsigned int nr_pages; |
152 | 155 | ||
153 | BUG_ON(!PageLocked(page)); | 156 | BUG_ON(!PageLocked(page)); |
154 | 157 | ||
155 | if (TestClearPageMlocked(page)) { | 158 | if (TestClearPageMlocked(page)) { |
156 | unsigned int nr_pages = hpage_nr_pages(page); | 159 | nr_pages = hpage_nr_pages(page); |
157 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); | 160 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
158 | page_mask = nr_pages - 1; | ||
159 | if (!isolate_lru_page(page)) | 161 | if (!isolate_lru_page(page)) |
160 | __munlock_isolated_page(page); | 162 | __munlock_isolated_page(page); |
161 | else | 163 | else |
162 | __munlock_isolation_failed(page); | 164 | __munlock_isolation_failed(page); |
165 | } else { | ||
166 | nr_pages = hpage_nr_pages(page); | ||
163 | } | 167 | } |
164 | 168 | ||
165 | return page_mask; | 169 | /* |
170 | * Regardless of the original PageMlocked flag, we determine nr_pages | ||
171 | * after touching the flag. This leaves a possible race with a THP page | ||
172 | * split, such that a whole THP page was munlocked, but nr_pages == 1. | ||
173 | * Returning a smaller mask due to that is OK, the worst that can | ||
174 | * happen is subsequent useless scanning of the former tail pages. | ||
175 | * The NR_MLOCK accounting can however become broken. | ||
176 | */ | ||
177 | return nr_pages - 1; | ||
166 | } | 178 | } |
167 | 179 | ||
168 | /** | 180 | /** |
@@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
286 | { | 298 | { |
287 | int i; | 299 | int i; |
288 | int nr = pagevec_count(pvec); | 300 | int nr = pagevec_count(pvec); |
289 | int delta_munlocked = -nr; | 301 | int delta_munlocked; |
290 | struct pagevec pvec_putback; | 302 | struct pagevec pvec_putback; |
291 | int pgrescued = 0; | 303 | int pgrescued = 0; |
292 | 304 | ||
305 | pagevec_init(&pvec_putback, 0); | ||
306 | |||
293 | /* Phase 1: page isolation */ | 307 | /* Phase 1: page isolation */ |
294 | spin_lock_irq(&zone->lru_lock); | 308 | spin_lock_irq(&zone->lru_lock); |
295 | for (i = 0; i < nr; i++) { | 309 | for (i = 0; i < nr; i++) { |
@@ -318,18 +332,21 @@ skip_munlock: | |||
318 | /* | 332 | /* |
319 | * We won't be munlocking this page in the next phase | 333 | * We won't be munlocking this page in the next phase |
320 | * but we still need to release the follow_page_mask() | 334 | * but we still need to release the follow_page_mask() |
321 | * pin. | 335 | * pin. We cannot do it under lru_lock however. If it's |
336 | * the last pin, __page_cache_release would deadlock. | ||
322 | */ | 337 | */ |
338 | pagevec_add(&pvec_putback, pvec->pages[i]); | ||
323 | pvec->pages[i] = NULL; | 339 | pvec->pages[i] = NULL; |
324 | put_page(page); | ||
325 | delta_munlocked++; | ||
326 | } | 340 | } |
327 | } | 341 | } |
342 | delta_munlocked = -nr + pagevec_count(&pvec_putback); | ||
328 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); | 343 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |
329 | spin_unlock_irq(&zone->lru_lock); | 344 | spin_unlock_irq(&zone->lru_lock); |
330 | 345 | ||
346 | /* Now we can release pins of pages that we are not munlocking */ | ||
347 | pagevec_release(&pvec_putback); | ||
348 | |||
331 | /* Phase 2: page munlock */ | 349 | /* Phase 2: page munlock */ |
332 | pagevec_init(&pvec_putback, 0); | ||
333 | for (i = 0; i < nr; i++) { | 350 | for (i = 0; i < nr; i++) { |
334 | struct page *page = pvec->pages[i]; | 351 | struct page *page = pvec->pages[i]; |
335 | 352 | ||
@@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
440 | 457 | ||
441 | while (start < end) { | 458 | while (start < end) { |
442 | struct page *page = NULL; | 459 | struct page *page = NULL; |
443 | unsigned int page_mask, page_increm; | 460 | unsigned int page_mask; |
461 | unsigned long page_increm; | ||
444 | struct pagevec pvec; | 462 | struct pagevec pvec; |
445 | struct zone *zone; | 463 | struct zone *zone; |
446 | int zoneid; | 464 | int zoneid; |
@@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
490 | goto next; | 508 | goto next; |
491 | } | 509 | } |
492 | } | 510 | } |
493 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | 511 | /* It's a bug to munlock in the middle of a THP page */ |
512 | VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); | ||
513 | page_increm = 1 + page_mask; | ||
494 | start += page_increm * PAGE_SIZE; | 514 | start += page_increm * PAGE_SIZE; |
495 | next: | 515 | next: |
496 | cond_resched(); | 516 | cond_resched(); |
diff --git a/mm/mprotect.c b/mm/mprotect.c index 26667971c824..bb53a6591aea 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
52 | pte_t ptent; | 52 | pte_t ptent; |
53 | bool updated = false; | 53 | bool updated = false; |
54 | 54 | ||
55 | ptent = ptep_modify_prot_start(mm, addr, pte); | ||
56 | if (!prot_numa) { | 55 | if (!prot_numa) { |
56 | ptent = ptep_modify_prot_start(mm, addr, pte); | ||
57 | if (pte_numa(ptent)) | ||
58 | ptent = pte_mknonnuma(ptent); | ||
57 | ptent = pte_modify(ptent, newprot); | 59 | ptent = pte_modify(ptent, newprot); |
58 | updated = true; | 60 | updated = true; |
59 | } else { | 61 | } else { |
60 | struct page *page; | 62 | struct page *page; |
61 | 63 | ||
64 | ptent = *pte; | ||
62 | page = vm_normal_page(vma, addr, oldpte); | 65 | page = vm_normal_page(vma, addr, oldpte); |
63 | if (page) { | 66 | if (page) { |
64 | if (!pte_numa(oldpte)) { | 67 | if (!pte_numa(oldpte)) { |
65 | ptent = pte_mknuma(ptent); | 68 | ptent = pte_mknuma(ptent); |
69 | set_pte_at(mm, addr, pte, ptent); | ||
66 | updated = true; | 70 | updated = true; |
67 | } | 71 | } |
68 | } | 72 | } |
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
79 | 83 | ||
80 | if (updated) | 84 | if (updated) |
81 | pages++; | 85 | pages++; |
82 | ptep_modify_prot_commit(mm, addr, pte, ptent); | 86 | |
87 | /* Only !prot_numa always clears the pte */ | ||
88 | if (!prot_numa) | ||
89 | ptep_modify_prot_commit(mm, addr, pte, ptent); | ||
83 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { | 90 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { |
84 | swp_entry_t entry = pte_to_swp_entry(oldpte); | 91 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
85 | 92 | ||
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, | |||
181 | BUG_ON(addr >= end); | 188 | BUG_ON(addr >= end); |
182 | pgd = pgd_offset(mm, addr); | 189 | pgd = pgd_offset(mm, addr); |
183 | flush_cache_range(vma, addr, end); | 190 | flush_cache_range(vma, addr, end); |
191 | set_tlb_flush_pending(mm); | ||
184 | do { | 192 | do { |
185 | next = pgd_addr_end(addr, end); | 193 | next = pgd_addr_end(addr, end); |
186 | if (pgd_none_or_clear_bad(pgd)) | 194 | if (pgd_none_or_clear_bad(pgd)) |
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, | |||
192 | /* Only flush the TLB if we actually modified any entries: */ | 200 | /* Only flush the TLB if we actually modified any entries: */ |
193 | if (pages) | 201 | if (pages) |
194 | flush_tlb_range(vma, start, end); | 202 | flush_tlb_range(vma, start, end); |
203 | clear_tlb_flush_pending(mm); | ||
195 | 204 | ||
196 | return pages; | 205 | return pages; |
197 | } | 206 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 580a5f075ed0..5248fe070aa4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) | |||
1816 | 1816 | ||
1817 | static bool zone_local(struct zone *local_zone, struct zone *zone) | 1817 | static bool zone_local(struct zone *local_zone, struct zone *zone) |
1818 | { | 1818 | { |
1819 | return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; | 1819 | return local_zone->node == zone->node; |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | 1822 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) |
@@ -1913,18 +1913,17 @@ zonelist_scan: | |||
1913 | * page was allocated in should have no effect on the | 1913 | * page was allocated in should have no effect on the |
1914 | * time the page has in memory before being reclaimed. | 1914 | * time the page has in memory before being reclaimed. |
1915 | * | 1915 | * |
1916 | * When zone_reclaim_mode is enabled, try to stay in | 1916 | * Try to stay in local zones in the fastpath. If |
1917 | * local zones in the fastpath. If that fails, the | 1917 | * that fails, the slowpath is entered, which will do |
1918 | * slowpath is entered, which will do another pass | 1918 | * another pass starting with the local zones, but |
1919 | * starting with the local zones, but ultimately fall | 1919 | * ultimately fall back to remote zones that do not |
1920 | * back to remote zones that do not partake in the | 1920 | * partake in the fairness round-robin cycle of this |
1921 | * fairness round-robin cycle of this zonelist. | 1921 | * zonelist. |
1922 | */ | 1922 | */ |
1923 | if (alloc_flags & ALLOC_WMARK_LOW) { | 1923 | if (alloc_flags & ALLOC_WMARK_LOW) { |
1924 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) | 1924 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) |
1925 | continue; | 1925 | continue; |
1926 | if (zone_reclaim_mode && | 1926 | if (!zone_local(preferred_zone, zone)) |
1927 | !zone_local(preferred_zone, zone)) | ||
1928 | continue; | 1927 | continue; |
1929 | } | 1928 | } |
1930 | /* | 1929 | /* |
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2390 | * thrash fairness information for zones that are not | 2389 | * thrash fairness information for zones that are not |
2391 | * actually part of this zonelist's round-robin cycle. | 2390 | * actually part of this zonelist's round-robin cycle. |
2392 | */ | 2391 | */ |
2393 | if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) | 2392 | if (!zone_local(preferred_zone, zone)) |
2394 | continue; | 2393 | continue; |
2395 | mod_zone_page_state(zone, NR_ALLOC_BATCH, | 2394 | mod_zone_page_state(zone, NR_ALLOC_BATCH, |
2396 | high_wmark_pages(zone) - | 2395 | high_wmark_pages(zone) - |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index cbb38545d9d6..a8b919925934 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, | |||
110 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, | 110 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, |
111 | pte_t *ptep) | 111 | pte_t *ptep) |
112 | { | 112 | { |
113 | struct mm_struct *mm = (vma)->vm_mm; | ||
113 | pte_t pte; | 114 | pte_t pte; |
114 | pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); | 115 | pte = ptep_get_and_clear(mm, address, ptep); |
115 | if (pte_accessible(pte)) | 116 | if (pte_accessible(mm, pte)) |
116 | flush_tlb_page(vma, address); | 117 | flush_tlb_page(vma, address); |
117 | return pte; | 118 | return pte; |
118 | } | 119 | } |
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |||
191 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | 192 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
192 | pmd_t *pmdp) | 193 | pmd_t *pmdp) |
193 | { | 194 | { |
195 | pmd_t entry = *pmdp; | ||
196 | if (pmd_numa(entry)) | ||
197 | entry = pmd_mknonnuma(entry); | ||
194 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); | 198 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); |
195 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | 199 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
196 | } | 200 | } |
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, | |||
600 | spinlock_t *ptl; | 600 | spinlock_t *ptl; |
601 | 601 | ||
602 | if (unlikely(PageHuge(page))) { | 602 | if (unlikely(PageHuge(page))) { |
603 | /* when pud is not present, pte will be NULL */ | ||
603 | pte = huge_pte_offset(mm, address); | 604 | pte = huge_pte_offset(mm, address); |
605 | if (!pte) | ||
606 | return NULL; | ||
607 | |||
604 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | 608 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); |
605 | goto check; | 609 | goto check; |
606 | } | 610 | } |
diff --git a/mm/shmem.c b/mm/shmem.c index 8297623fcaed..902a14842b74 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2918,13 +2918,8 @@ static struct dentry_operations anon_ops = { | |||
2918 | .d_dname = simple_dname | 2918 | .d_dname = simple_dname |
2919 | }; | 2919 | }; |
2920 | 2920 | ||
2921 | /** | 2921 | static struct file *__shmem_file_setup(const char *name, loff_t size, |
2922 | * shmem_file_setup - get an unlinked file living in tmpfs | 2922 | unsigned long flags, unsigned int i_flags) |
2923 | * @name: name for dentry (to be seen in /proc/<pid>/maps | ||
2924 | * @size: size to be set for the file | ||
2925 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size | ||
2926 | */ | ||
2927 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) | ||
2928 | { | 2923 | { |
2929 | struct file *res; | 2924 | struct file *res; |
2930 | struct inode *inode; | 2925 | struct inode *inode; |
@@ -2957,6 +2952,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags | |||
2957 | if (!inode) | 2952 | if (!inode) |
2958 | goto put_dentry; | 2953 | goto put_dentry; |
2959 | 2954 | ||
2955 | inode->i_flags |= i_flags; | ||
2960 | d_instantiate(path.dentry, inode); | 2956 | d_instantiate(path.dentry, inode); |
2961 | inode->i_size = size; | 2957 | inode->i_size = size; |
2962 | clear_nlink(inode); /* It is unlinked */ | 2958 | clear_nlink(inode); /* It is unlinked */ |
@@ -2977,6 +2973,32 @@ put_memory: | |||
2977 | shmem_unacct_size(flags, size); | 2973 | shmem_unacct_size(flags, size); |
2978 | return res; | 2974 | return res; |
2979 | } | 2975 | } |
2976 | |||
2977 | /** | ||
2978 | * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be | ||
2979 | * kernel internal. There will be NO LSM permission checks against the | ||
2980 | * underlying inode. So users of this interface must do LSM checks at a | ||
2981 | * higher layer. The one user is the big_key implementation. LSM checks | ||
2982 | * are provided at the key level rather than the inode level. | ||
2983 | * @name: name for dentry (to be seen in /proc/<pid>/maps | ||
2984 | * @size: size to be set for the file | ||
2985 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size | ||
2986 | */ | ||
2987 | struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) | ||
2988 | { | ||
2989 | return __shmem_file_setup(name, size, flags, S_PRIVATE); | ||
2990 | } | ||
2991 | |||
2992 | /** | ||
2993 | * shmem_file_setup - get an unlinked file living in tmpfs | ||
2994 | * @name: name for dentry (to be seen in /proc/<pid>/maps | ||
2995 | * @size: size to be set for the file | ||
2996 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size | ||
2997 | */ | ||
2998 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) | ||
2999 | { | ||
3000 | return __shmem_file_setup(name, size, flags, 0); | ||
3001 | } | ||
2980 | EXPORT_SYMBOL_GPL(shmem_file_setup); | 3002 | EXPORT_SYMBOL_GPL(shmem_file_setup); |
2981 | 3003 | ||
2982 | /** | 3004 | /** |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 762896ebfcf5..47c908f1f626 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = { | |||
530 | .parse = eth_header_parse, | 530 | .parse = eth_header_parse, |
531 | }; | 531 | }; |
532 | 532 | ||
533 | static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, | ||
534 | unsigned short type, | ||
535 | const void *daddr, const void *saddr, | ||
536 | unsigned int len) | ||
537 | { | ||
538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | ||
539 | struct net_device *real_dev = vlan->real_dev; | ||
540 | |||
541 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); | ||
542 | } | ||
543 | |||
544 | static const struct header_ops vlan_passthru_header_ops = { | ||
545 | .create = vlan_passthru_hard_header, | ||
546 | .rebuild = dev_rebuild_header, | ||
547 | .parse = eth_header_parse, | ||
548 | }; | ||
549 | |||
533 | static struct device_type vlan_type = { | 550 | static struct device_type vlan_type = { |
534 | .name = "vlan", | 551 | .name = "vlan", |
535 | }; | 552 | }; |
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
573 | 590 | ||
574 | dev->needed_headroom = real_dev->needed_headroom; | 591 | dev->needed_headroom = real_dev->needed_headroom; |
575 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { | 592 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
576 | dev->header_ops = real_dev->header_ops; | 593 | dev->header_ops = &vlan_passthru_header_ops; |
577 | dev->hard_header_len = real_dev->hard_header_len; | 594 | dev->hard_header_len = real_dev->hard_header_len; |
578 | } else { | 595 | } else { |
579 | dev->header_ops = &vlan_header_ops; | 596 | dev->header_ops = &vlan_header_ops; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a2b480a90872..b9c8a6eedf45 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) | |||
307 | hard_iface->bat_iv.ogm_buff = ogm_buff; | 307 | hard_iface->bat_iv.ogm_buff = ogm_buff; |
308 | 308 | ||
309 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | 309 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
310 | batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; | 310 | batadv_ogm_packet->packet_type = BATADV_IV_OGM; |
311 | batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; | 311 | batadv_ogm_packet->version = BATADV_COMPAT_VERSION; |
312 | batadv_ogm_packet->header.ttl = 2; | 312 | batadv_ogm_packet->ttl = 2; |
313 | batadv_ogm_packet->flags = BATADV_NO_FLAGS; | 313 | batadv_ogm_packet->flags = BATADV_NO_FLAGS; |
314 | batadv_ogm_packet->reserved = 0; | 314 | batadv_ogm_packet->reserved = 0; |
315 | batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; | 315 | batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; |
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) | |||
346 | 346 | ||
347 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | 347 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
348 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; | 348 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; |
349 | batadv_ogm_packet->header.ttl = BATADV_TTL; | 349 | batadv_ogm_packet->ttl = BATADV_TTL; |
350 | } | 350 | } |
351 | 351 | ||
352 | /* when do we schedule our own ogm to be sent */ | 352 | /* when do we schedule our own ogm to be sent */ |
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, | |||
435 | fwd_str, (packet_num > 0 ? "aggregated " : ""), | 435 | fwd_str, (packet_num > 0 ? "aggregated " : ""), |
436 | batadv_ogm_packet->orig, | 436 | batadv_ogm_packet->orig, |
437 | ntohl(batadv_ogm_packet->seqno), | 437 | ntohl(batadv_ogm_packet->seqno), |
438 | batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl, | 438 | batadv_ogm_packet->tq, batadv_ogm_packet->ttl, |
439 | (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? | 439 | (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? |
440 | "on" : "off"), | 440 | "on" : "off"), |
441 | hard_iface->net_dev->name, | 441 | hard_iface->net_dev->name, |
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) | |||
491 | /* multihomed peer assumed | 491 | /* multihomed peer assumed |
492 | * non-primary OGMs are only broadcasted on their interface | 492 | * non-primary OGMs are only broadcasted on their interface |
493 | */ | 493 | */ |
494 | if ((directlink && (batadv_ogm_packet->header.ttl == 1)) || | 494 | if ((directlink && (batadv_ogm_packet->ttl == 1)) || |
495 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { | 495 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { |
496 | /* FIXME: what about aggregated packets ? */ | 496 | /* FIXME: what about aggregated packets ? */ |
497 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 497 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) | |||
499 | (forw_packet->own ? "Sending own" : "Forwarding"), | 499 | (forw_packet->own ? "Sending own" : "Forwarding"), |
500 | batadv_ogm_packet->orig, | 500 | batadv_ogm_packet->orig, |
501 | ntohl(batadv_ogm_packet->seqno), | 501 | ntohl(batadv_ogm_packet->seqno), |
502 | batadv_ogm_packet->header.ttl, | 502 | batadv_ogm_packet->ttl, |
503 | forw_packet->if_incoming->net_dev->name, | 503 | forw_packet->if_incoming->net_dev->name, |
504 | forw_packet->if_incoming->net_dev->dev_addr); | 504 | forw_packet->if_incoming->net_dev->dev_addr); |
505 | 505 | ||
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, | |||
572 | */ | 572 | */ |
573 | if ((!directlink) && | 573 | if ((!directlink) && |
574 | (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && | 574 | (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && |
575 | (batadv_ogm_packet->header.ttl != 1) && | 575 | (batadv_ogm_packet->ttl != 1) && |
576 | 576 | ||
577 | /* own packets originating non-primary | 577 | /* own packets originating non-primary |
578 | * interfaces leave only that interface | 578 | * interfaces leave only that interface |
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, | |||
587 | * interface only - we still can aggregate | 587 | * interface only - we still can aggregate |
588 | */ | 588 | */ |
589 | if ((directlink) && | 589 | if ((directlink) && |
590 | (new_bat_ogm_packet->header.ttl == 1) && | 590 | (new_bat_ogm_packet->ttl == 1) && |
591 | (forw_packet->if_incoming == if_incoming) && | 591 | (forw_packet->if_incoming == if_incoming) && |
592 | 592 | ||
593 | /* packets from direct neighbors or | 593 | /* packets from direct neighbors or |
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
778 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 778 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
779 | uint16_t tvlv_len; | 779 | uint16_t tvlv_len; |
780 | 780 | ||
781 | if (batadv_ogm_packet->header.ttl <= 1) { | 781 | if (batadv_ogm_packet->ttl <= 1) { |
782 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); | 782 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); |
783 | return; | 783 | return; |
784 | } | 784 | } |
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
798 | 798 | ||
799 | tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); | 799 | tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); |
800 | 800 | ||
801 | batadv_ogm_packet->header.ttl--; | 801 | batadv_ogm_packet->ttl--; |
802 | memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | 802 | memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); |
803 | 803 | ||
804 | /* apply hop penalty */ | 804 | /* apply hop penalty */ |
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
807 | 807 | ||
808 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 808 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
809 | "Forwarding packet: tq: %i, ttl: %i\n", | 809 | "Forwarding packet: tq: %i, ttl: %i\n", |
810 | batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl); | 810 | batadv_ogm_packet->tq, batadv_ogm_packet->ttl); |
811 | 811 | ||
812 | /* switch of primaries first hop flag when forwarding */ | 812 | /* switch of primaries first hop flag when forwarding */ |
813 | batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; | 813 | batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; |
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
972 | spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); | 972 | spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); |
973 | 973 | ||
974 | if (dup_status == BATADV_NO_DUP) { | 974 | if (dup_status == BATADV_NO_DUP) { |
975 | orig_node->last_ttl = batadv_ogm_packet->header.ttl; | 975 | orig_node->last_ttl = batadv_ogm_packet->ttl; |
976 | neigh_node->last_ttl = batadv_ogm_packet->header.ttl; | 976 | neigh_node->last_ttl = batadv_ogm_packet->ttl; |
977 | } | 977 | } |
978 | 978 | ||
979 | batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); | 979 | batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); |
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1247 | * packet in an aggregation. Here we expect that the padding | 1247 | * packet in an aggregation. Here we expect that the padding |
1248 | * is always zero (or not 0x01) | 1248 | * is always zero (or not 0x01) |
1249 | */ | 1249 | */ |
1250 | if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM) | 1250 | if (batadv_ogm_packet->packet_type != BATADV_IV_OGM) |
1251 | return; | 1251 | return; |
1252 | 1252 | ||
1253 | /* could be changed by schedule_own_packet() */ | 1253 | /* could be changed by schedule_own_packet() */ |
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1267 | if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, | 1267 | if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, |
1268 | batadv_ogm_packet->prev_sender, | 1268 | batadv_ogm_packet->prev_sender, |
1269 | ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, | 1269 | ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, |
1270 | batadv_ogm_packet->header.ttl, | 1270 | batadv_ogm_packet->ttl, |
1271 | batadv_ogm_packet->header.version, has_directlink_flag); | 1271 | batadv_ogm_packet->version, has_directlink_flag); |
1272 | 1272 | ||
1273 | rcu_read_lock(); | 1273 | rcu_read_lock(); |
1274 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 1274 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1433 | * seqno and similar ttl as the non-duplicate | 1433 | * seqno and similar ttl as the non-duplicate |
1434 | */ | 1434 | */ |
1435 | sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); | 1435 | sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); |
1436 | similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; | 1436 | similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl; |
1437 | if (is_bidirect && ((dup_status == BATADV_NO_DUP) || | 1437 | if (is_bidirect && ((dup_status == BATADV_NO_DUP) || |
1438 | (sameseq && similar_ttl))) | 1438 | (sameseq && similar_ttl))) |
1439 | batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, | 1439 | batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 6c8c3934bd7b..b316a4cb6f14 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
349 | 349 | ||
350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
351 | 351 | ||
352 | switch (unicast_4addr_packet->u.header.packet_type) { | 352 | switch (unicast_4addr_packet->u.packet_type) { |
353 | case BATADV_UNICAST: | 353 | case BATADV_UNICAST: |
354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
355 | "* encapsulated within a UNICAST packet\n"); | 355 | "* encapsulated within a UNICAST packet\n"); |
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
374 | break; | 374 | break; |
375 | default: | 375 | default: |
376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", | 376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", |
377 | unicast_4addr_packet->u.header.packet_type); | 377 | unicast_4addr_packet->u.packet_type); |
378 | } | 378 | } |
379 | break; | 379 | break; |
380 | case BATADV_BCAST: | 380 | case BATADV_BCAST: |
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
387 | default: | 387 | default: |
388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
389 | "* encapsulated within an unknown packet type (0x%x)\n", | 389 | "* encapsulated within an unknown packet type (0x%x)\n", |
390 | unicast_4addr_packet->u.header.packet_type); | 390 | unicast_4addr_packet->u.packet_type); |
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 271d321b3a04..6ddb6145ffb5 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, | |||
355 | batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, | 355 | batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, |
356 | skb->len + ETH_HLEN); | 356 | skb->len + ETH_HLEN); |
357 | 357 | ||
358 | packet->header.ttl--; | 358 | packet->ttl--; |
359 | batadv_send_skb_packet(skb, neigh_node->if_incoming, | 359 | batadv_send_skb_packet(skb, neigh_node->if_incoming, |
360 | neigh_node->addr); | 360 | neigh_node->addr); |
361 | ret = true; | 361 | ret = true; |
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb, | |||
444 | goto out_err; | 444 | goto out_err; |
445 | 445 | ||
446 | /* Create one header to be copied to all fragments */ | 446 | /* Create one header to be copied to all fragments */ |
447 | frag_header.header.packet_type = BATADV_UNICAST_FRAG; | 447 | frag_header.packet_type = BATADV_UNICAST_FRAG; |
448 | frag_header.header.version = BATADV_COMPAT_VERSION; | 448 | frag_header.version = BATADV_COMPAT_VERSION; |
449 | frag_header.header.ttl = BATADV_TTL; | 449 | frag_header.ttl = BATADV_TTL; |
450 | frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); | 450 | frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); |
451 | frag_header.reserved = 0; | 451 | frag_header.reserved = 0; |
452 | frag_header.no = 0; | 452 | frag_header.no = 0; |
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 29ae4efe3543..130cc3217e2b 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c | |||
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
194 | goto free_skb; | 194 | goto free_skb; |
195 | } | 195 | } |
196 | 196 | ||
197 | if (icmp_header->header.packet_type != BATADV_ICMP) { | 197 | if (icmp_header->packet_type != BATADV_ICMP) { |
198 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 198 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
199 | "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); | 199 | "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); |
200 | len = -EINVAL; | 200 | len = -EINVAL; |
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
243 | 243 | ||
244 | icmp_header->uid = socket_client->index; | 244 | icmp_header->uid = socket_client->index; |
245 | 245 | ||
246 | if (icmp_header->header.version != BATADV_COMPAT_VERSION) { | 246 | if (icmp_header->version != BATADV_COMPAT_VERSION) { |
247 | icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; | 247 | icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; |
248 | icmp_header->header.version = BATADV_COMPAT_VERSION; | 248 | icmp_header->version = BATADV_COMPAT_VERSION; |
249 | batadv_socket_add_packet(socket_client, icmp_header, | 249 | batadv_socket_add_packet(socket_client, icmp_header, |
250 | packet_len); | 250 | packet_len); |
251 | goto free_skb; | 251 | goto free_skb; |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index c51a5e568f0a..1511f64a6cea 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
383 | 383 | ||
384 | batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; | 384 | batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; |
385 | 385 | ||
386 | if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { | 386 | if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) { |
387 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 387 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
388 | "Drop packet: incompatible batman version (%i)\n", | 388 | "Drop packet: incompatible batman version (%i)\n", |
389 | batadv_ogm_packet->header.version); | 389 | batadv_ogm_packet->version); |
390 | goto err_free; | 390 | goto err_free; |
391 | } | 391 | } |
392 | 392 | ||
393 | /* all receive handlers return whether they received or reused | 393 | /* all receive handlers return whether they received or reused |
394 | * the supplied skb. if not, we have to free the skb. | 394 | * the supplied skb. if not, we have to free the skb. |
395 | */ | 395 | */ |
396 | idx = batadv_ogm_packet->header.packet_type; | 396 | idx = batadv_ogm_packet->packet_type; |
397 | ret = (*batadv_rx_handler[idx])(skb, hard_iface); | 397 | ret = (*batadv_rx_handler[idx])(skb, hard_iface); |
398 | 398 | ||
399 | if (ret == NET_RX_DROP) | 399 | if (ret == NET_RX_DROP) |
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void) | |||
426 | BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); | 426 | BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); |
427 | BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); | 427 | BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); |
428 | BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); | 428 | BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); |
429 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4); | 429 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4); |
430 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4); | 430 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4); |
431 | 431 | ||
432 | /* broadcast packet */ | 432 | /* broadcast packet */ |
433 | batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; | 433 | batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; |
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src, | |||
1119 | skb_reserve(skb, ETH_HLEN); | 1119 | skb_reserve(skb, ETH_HLEN); |
1120 | tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); | 1120 | tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); |
1121 | unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; | 1121 | unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; |
1122 | unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV; | 1122 | unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV; |
1123 | unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION; | 1123 | unicast_tvlv_packet->version = BATADV_COMPAT_VERSION; |
1124 | unicast_tvlv_packet->header.ttl = BATADV_TTL; | 1124 | unicast_tvlv_packet->ttl = BATADV_TTL; |
1125 | unicast_tvlv_packet->reserved = 0; | 1125 | unicast_tvlv_packet->reserved = 0; |
1126 | unicast_tvlv_packet->tvlv_len = htons(tvlv_len); | 1126 | unicast_tvlv_packet->tvlv_len = htons(tvlv_len); |
1127 | unicast_tvlv_packet->align = 0; | 1127 | unicast_tvlv_packet->align = 0; |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 351e199bc0af..511d7e1eea38 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv, | |||
722 | { | 722 | { |
723 | if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) | 723 | if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) |
724 | return false; | 724 | return false; |
725 | if (orig_node->last_ttl != ogm_packet->header.ttl + 1) | 725 | if (orig_node->last_ttl != ogm_packet->ttl + 1) |
726 | return false; | 726 | return false; |
727 | if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) | 727 | if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) |
728 | return false; | 728 | return false; |
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, | |||
1082 | coded_packet = (struct batadv_coded_packet *)skb_dest->data; | 1082 | coded_packet = (struct batadv_coded_packet *)skb_dest->data; |
1083 | skb_reset_mac_header(skb_dest); | 1083 | skb_reset_mac_header(skb_dest); |
1084 | 1084 | ||
1085 | coded_packet->header.packet_type = BATADV_CODED; | 1085 | coded_packet->packet_type = BATADV_CODED; |
1086 | coded_packet->header.version = BATADV_COMPAT_VERSION; | 1086 | coded_packet->version = BATADV_COMPAT_VERSION; |
1087 | coded_packet->header.ttl = packet1->header.ttl; | 1087 | coded_packet->ttl = packet1->ttl; |
1088 | 1088 | ||
1089 | /* Info about first unicast packet */ | 1089 | /* Info about first unicast packet */ |
1090 | memcpy(coded_packet->first_source, first_source, ETH_ALEN); | 1090 | memcpy(coded_packet->first_source, first_source, ETH_ALEN); |
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, | |||
1097 | memcpy(coded_packet->second_source, second_source, ETH_ALEN); | 1097 | memcpy(coded_packet->second_source, second_source, ETH_ALEN); |
1098 | memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); | 1098 | memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); |
1099 | coded_packet->second_crc = packet_id2; | 1099 | coded_packet->second_crc = packet_id2; |
1100 | coded_packet->second_ttl = packet2->header.ttl; | 1100 | coded_packet->second_ttl = packet2->ttl; |
1101 | coded_packet->second_ttvn = packet2->ttvn; | 1101 | coded_packet->second_ttvn = packet2->ttvn; |
1102 | coded_packet->coded_len = htons(coding_len); | 1102 | coded_packet->coded_len = htons(coding_len); |
1103 | 1103 | ||
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb, | |||
1452 | /* We only handle unicast packets */ | 1452 | /* We only handle unicast packets */ |
1453 | payload = skb_network_header(skb); | 1453 | payload = skb_network_header(skb); |
1454 | packet = (struct batadv_unicast_packet *)payload; | 1454 | packet = (struct batadv_unicast_packet *)payload; |
1455 | if (packet->header.packet_type != BATADV_UNICAST) | 1455 | if (packet->packet_type != BATADV_UNICAST) |
1456 | goto out; | 1456 | goto out; |
1457 | 1457 | ||
1458 | /* Try to find a coding opportunity and send the skb if one is found */ | 1458 | /* Try to find a coding opportunity and send the skb if one is found */ |
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv, | |||
1505 | /* Check for supported packet type */ | 1505 | /* Check for supported packet type */ |
1506 | payload = skb_network_header(skb); | 1506 | payload = skb_network_header(skb); |
1507 | packet = (struct batadv_unicast_packet *)payload; | 1507 | packet = (struct batadv_unicast_packet *)payload; |
1508 | if (packet->header.packet_type != BATADV_UNICAST) | 1508 | if (packet->packet_type != BATADV_UNICAST) |
1509 | goto out; | 1509 | goto out; |
1510 | 1510 | ||
1511 | /* Find existing nc_path or create a new */ | 1511 | /* Find existing nc_path or create a new */ |
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
1623 | ttvn = coded_packet_tmp.second_ttvn; | 1623 | ttvn = coded_packet_tmp.second_ttvn; |
1624 | } else { | 1624 | } else { |
1625 | orig_dest = coded_packet_tmp.first_orig_dest; | 1625 | orig_dest = coded_packet_tmp.first_orig_dest; |
1626 | ttl = coded_packet_tmp.header.ttl; | 1626 | ttl = coded_packet_tmp.ttl; |
1627 | ttvn = coded_packet_tmp.first_ttvn; | 1627 | ttvn = coded_packet_tmp.first_ttvn; |
1628 | } | 1628 | } |
1629 | 1629 | ||
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
1648 | 1648 | ||
1649 | /* Create decoded unicast packet */ | 1649 | /* Create decoded unicast packet */ |
1650 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 1650 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
1651 | unicast_packet->header.packet_type = BATADV_UNICAST; | 1651 | unicast_packet->packet_type = BATADV_UNICAST; |
1652 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 1652 | unicast_packet->version = BATADV_COMPAT_VERSION; |
1653 | unicast_packet->header.ttl = ttl; | 1653 | unicast_packet->ttl = ttl; |
1654 | memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); | 1654 | memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); |
1655 | unicast_packet->ttvn = ttvn; | 1655 | unicast_packet->ttvn = ttvn; |
1656 | 1656 | ||
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 207459b62966..2dd8f2422550 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
@@ -155,6 +155,7 @@ enum batadv_tvlv_type { | |||
155 | BATADV_TVLV_ROAM = 0x05, | 155 | BATADV_TVLV_ROAM = 0x05, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | #pragma pack(2) | ||
158 | /* the destination hardware field in the ARP frame is used to | 159 | /* the destination hardware field in the ARP frame is used to |
159 | * transport the claim type and the group id | 160 | * transport the claim type and the group id |
160 | */ | 161 | */ |
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst { | |||
163 | uint8_t type; /* bla_claimframe */ | 164 | uint8_t type; /* bla_claimframe */ |
164 | __be16 group; /* group id */ | 165 | __be16 group; /* group id */ |
165 | }; | 166 | }; |
166 | 167 | #pragma pack() | |
167 | struct batadv_header { | ||
168 | uint8_t packet_type; | ||
169 | uint8_t version; /* batman version field */ | ||
170 | uint8_t ttl; | ||
171 | /* the parent struct has to add a byte after the header to make | ||
172 | * everything 4 bytes aligned again | ||
173 | */ | ||
174 | }; | ||
175 | 168 | ||
176 | /** | 169 | /** |
177 | * struct batadv_ogm_packet - ogm (routing protocol) packet | 170 | * struct batadv_ogm_packet - ogm (routing protocol) packet |
178 | * @header: common batman packet header | 171 | * @packet_type: batman-adv packet type, part of the general header |
172 | * @version: batman-adv protocol version, part of the genereal header | ||
173 | * @ttl: time to live for this packet, part of the genereal header | ||
179 | * @flags: contains routing relevant flags - see enum batadv_iv_flags | 174 | * @flags: contains routing relevant flags - see enum batadv_iv_flags |
180 | * @tvlv_len: length of tvlv data following the ogm header | 175 | * @tvlv_len: length of tvlv data following the ogm header |
181 | */ | 176 | */ |
182 | struct batadv_ogm_packet { | 177 | struct batadv_ogm_packet { |
183 | struct batadv_header header; | 178 | uint8_t packet_type; |
179 | uint8_t version; | ||
180 | uint8_t ttl; | ||
184 | uint8_t flags; | 181 | uint8_t flags; |
185 | __be32 seqno; | 182 | __be32 seqno; |
186 | uint8_t orig[ETH_ALEN]; | 183 | uint8_t orig[ETH_ALEN]; |
@@ -196,29 +193,51 @@ struct batadv_ogm_packet { | |||
196 | #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) | 193 | #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) |
197 | 194 | ||
198 | /** | 195 | /** |
199 | * batadv_icmp_header - common ICMP header | 196 | * batadv_icmp_header - common members among all the ICMP packets |
200 | * @header: common batman header | 197 | * @packet_type: batman-adv packet type, part of the general header |
198 | * @version: batman-adv protocol version, part of the genereal header | ||
199 | * @ttl: time to live for this packet, part of the genereal header | ||
201 | * @msg_type: ICMP packet type | 200 | * @msg_type: ICMP packet type |
202 | * @dst: address of the destination node | 201 | * @dst: address of the destination node |
203 | * @orig: address of the source node | 202 | * @orig: address of the source node |
204 | * @uid: local ICMP socket identifier | 203 | * @uid: local ICMP socket identifier |
204 | * @align: not used - useful for alignment purposes only | ||
205 | * | ||
206 | * This structure is used for ICMP packets parsing only and it is never sent | ||
207 | * over the wire. The alignment field at the end is there to ensure that | ||
208 | * members are padded the same way as they are in real packets. | ||
205 | */ | 209 | */ |
206 | struct batadv_icmp_header { | 210 | struct batadv_icmp_header { |
207 | struct batadv_header header; | 211 | uint8_t packet_type; |
212 | uint8_t version; | ||
213 | uint8_t ttl; | ||
208 | uint8_t msg_type; /* see ICMP message types above */ | 214 | uint8_t msg_type; /* see ICMP message types above */ |
209 | uint8_t dst[ETH_ALEN]; | 215 | uint8_t dst[ETH_ALEN]; |
210 | uint8_t orig[ETH_ALEN]; | 216 | uint8_t orig[ETH_ALEN]; |
211 | uint8_t uid; | 217 | uint8_t uid; |
218 | uint8_t align[3]; | ||
212 | }; | 219 | }; |
213 | 220 | ||
214 | /** | 221 | /** |
215 | * batadv_icmp_packet - ICMP packet | 222 | * batadv_icmp_packet - ICMP packet |
216 | * @icmph: common ICMP header | 223 | * @packet_type: batman-adv packet type, part of the general header |
224 | * @version: batman-adv protocol version, part of the genereal header | ||
225 | * @ttl: time to live for this packet, part of the genereal header | ||
226 | * @msg_type: ICMP packet type | ||
227 | * @dst: address of the destination node | ||
228 | * @orig: address of the source node | ||
229 | * @uid: local ICMP socket identifier | ||
217 | * @reserved: not used - useful for alignment | 230 | * @reserved: not used - useful for alignment |
218 | * @seqno: ICMP sequence number | 231 | * @seqno: ICMP sequence number |
219 | */ | 232 | */ |
220 | struct batadv_icmp_packet { | 233 | struct batadv_icmp_packet { |
221 | struct batadv_icmp_header icmph; | 234 | uint8_t packet_type; |
235 | uint8_t version; | ||
236 | uint8_t ttl; | ||
237 | uint8_t msg_type; /* see ICMP message types above */ | ||
238 | uint8_t dst[ETH_ALEN]; | ||
239 | uint8_t orig[ETH_ALEN]; | ||
240 | uint8_t uid; | ||
222 | uint8_t reserved; | 241 | uint8_t reserved; |
223 | __be16 seqno; | 242 | __be16 seqno; |
224 | }; | 243 | }; |
@@ -227,13 +246,25 @@ struct batadv_icmp_packet { | |||
227 | 246 | ||
228 | /** | 247 | /** |
229 | * batadv_icmp_packet_rr - ICMP RouteRecord packet | 248 | * batadv_icmp_packet_rr - ICMP RouteRecord packet |
230 | * @icmph: common ICMP header | 249 | * @packet_type: batman-adv packet type, part of the general header |
250 | * @version: batman-adv protocol version, part of the genereal header | ||
251 | * @ttl: time to live for this packet, part of the genereal header | ||
252 | * @msg_type: ICMP packet type | ||
253 | * @dst: address of the destination node | ||
254 | * @orig: address of the source node | ||
255 | * @uid: local ICMP socket identifier | ||
231 | * @rr_cur: number of entries the rr array | 256 | * @rr_cur: number of entries the rr array |
232 | * @seqno: ICMP sequence number | 257 | * @seqno: ICMP sequence number |
233 | * @rr: route record array | 258 | * @rr: route record array |
234 | */ | 259 | */ |
235 | struct batadv_icmp_packet_rr { | 260 | struct batadv_icmp_packet_rr { |
236 | struct batadv_icmp_header icmph; | 261 | uint8_t packet_type; |
262 | uint8_t version; | ||
263 | uint8_t ttl; | ||
264 | uint8_t msg_type; /* see ICMP message types above */ | ||
265 | uint8_t dst[ETH_ALEN]; | ||
266 | uint8_t orig[ETH_ALEN]; | ||
267 | uint8_t uid; | ||
237 | uint8_t rr_cur; | 268 | uint8_t rr_cur; |
238 | __be16 seqno; | 269 | __be16 seqno; |
239 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; | 270 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; |
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr { | |||
253 | */ | 284 | */ |
254 | #pragma pack(2) | 285 | #pragma pack(2) |
255 | 286 | ||
287 | /** | ||
288 | * struct batadv_unicast_packet - unicast packet for network payload | ||
289 | * @packet_type: batman-adv packet type, part of the general header | ||
290 | * @version: batman-adv protocol version, part of the genereal header | ||
291 | * @ttl: time to live for this packet, part of the genereal header | ||
292 | * @ttvn: translation table version number | ||
293 | * @dest: originator destination of the unicast packet | ||
294 | */ | ||
256 | struct batadv_unicast_packet { | 295 | struct batadv_unicast_packet { |
257 | struct batadv_header header; | 296 | uint8_t packet_type; |
297 | uint8_t version; | ||
298 | uint8_t ttl; | ||
258 | uint8_t ttvn; /* destination translation table version number */ | 299 | uint8_t ttvn; /* destination translation table version number */ |
259 | uint8_t dest[ETH_ALEN]; | 300 | uint8_t dest[ETH_ALEN]; |
260 | /* "4 bytes boundary + 2 bytes" long to make the payload after the | 301 | /* "4 bytes boundary + 2 bytes" long to make the payload after the |
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet { | |||
280 | 321 | ||
281 | /** | 322 | /** |
282 | * struct batadv_frag_packet - fragmented packet | 323 | * struct batadv_frag_packet - fragmented packet |
283 | * @header: common batman packet header with type, compatversion, and ttl | 324 | * @packet_type: batman-adv packet type, part of the general header |
325 | * @version: batman-adv protocol version, part of the genereal header | ||
326 | * @ttl: time to live for this packet, part of the genereal header | ||
284 | * @dest: final destination used when routing fragments | 327 | * @dest: final destination used when routing fragments |
285 | * @orig: originator of the fragment used when merging the packet | 328 | * @orig: originator of the fragment used when merging the packet |
286 | * @no: fragment number within this sequence | 329 | * @no: fragment number within this sequence |
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet { | |||
289 | * @total_size: size of the merged packet | 332 | * @total_size: size of the merged packet |
290 | */ | 333 | */ |
291 | struct batadv_frag_packet { | 334 | struct batadv_frag_packet { |
292 | struct batadv_header header; | 335 | uint8_t packet_type; |
336 | uint8_t version; /* batman version field */ | ||
337 | uint8_t ttl; | ||
293 | #if defined(__BIG_ENDIAN_BITFIELD) | 338 | #if defined(__BIG_ENDIAN_BITFIELD) |
294 | uint8_t no:4; | 339 | uint8_t no:4; |
295 | uint8_t reserved:4; | 340 | uint8_t reserved:4; |
@@ -305,8 +350,19 @@ struct batadv_frag_packet { | |||
305 | __be16 total_size; | 350 | __be16 total_size; |
306 | }; | 351 | }; |
307 | 352 | ||
353 | /** | ||
354 | * struct batadv_bcast_packet - broadcast packet for network payload | ||
355 | * @packet_type: batman-adv packet type, part of the general header | ||
356 | * @version: batman-adv protocol version, part of the genereal header | ||
357 | * @ttl: time to live for this packet, part of the genereal header | ||
358 | * @reserved: reserved byte for alignment | ||
359 | * @seqno: sequence identification | ||
360 | * @orig: originator of the broadcast packet | ||
361 | */ | ||
308 | struct batadv_bcast_packet { | 362 | struct batadv_bcast_packet { |
309 | struct batadv_header header; | 363 | uint8_t packet_type; |
364 | uint8_t version; /* batman version field */ | ||
365 | uint8_t ttl; | ||
310 | uint8_t reserved; | 366 | uint8_t reserved; |
311 | __be32 seqno; | 367 | __be32 seqno; |
312 | uint8_t orig[ETH_ALEN]; | 368 | uint8_t orig[ETH_ALEN]; |
@@ -315,11 +371,11 @@ struct batadv_bcast_packet { | |||
315 | */ | 371 | */ |
316 | }; | 372 | }; |
317 | 373 | ||
318 | #pragma pack() | ||
319 | |||
320 | /** | 374 | /** |
321 | * struct batadv_coded_packet - network coded packet | 375 | * struct batadv_coded_packet - network coded packet |
322 | * @header: common batman packet header and ttl of first included packet | 376 | * @packet_type: batman-adv packet type, part of the general header |
377 | * @version: batman-adv protocol version, part of the genereal header | ||
378 | * @ttl: time to live for this packet, part of the genereal header | ||
323 | * @reserved: Align following fields to 2-byte boundaries | 379 | * @reserved: Align following fields to 2-byte boundaries |
324 | * @first_source: original source of first included packet | 380 | * @first_source: original source of first included packet |
325 | * @first_orig_dest: original destinal of first included packet | 381 | * @first_orig_dest: original destinal of first included packet |
@@ -334,7 +390,9 @@ struct batadv_bcast_packet { | |||
334 | * @coded_len: length of network coded part of the payload | 390 | * @coded_len: length of network coded part of the payload |
335 | */ | 391 | */ |
336 | struct batadv_coded_packet { | 392 | struct batadv_coded_packet { |
337 | struct batadv_header header; | 393 | uint8_t packet_type; |
394 | uint8_t version; /* batman version field */ | ||
395 | uint8_t ttl; | ||
338 | uint8_t first_ttvn; | 396 | uint8_t first_ttvn; |
339 | /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ | 397 | /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ |
340 | uint8_t first_source[ETH_ALEN]; | 398 | uint8_t first_source[ETH_ALEN]; |
@@ -349,9 +407,13 @@ struct batadv_coded_packet { | |||
349 | __be16 coded_len; | 407 | __be16 coded_len; |
350 | }; | 408 | }; |
351 | 409 | ||
410 | #pragma pack() | ||
411 | |||
352 | /** | 412 | /** |
353 | * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload | 413 | * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload |
354 | * @header: common batman packet header | 414 | * @packet_type: batman-adv packet type, part of the general header |
415 | * @version: batman-adv protocol version, part of the genereal header | ||
416 | * @ttl: time to live for this packet, part of the genereal header | ||
355 | * @reserved: reserved field (for packet alignment) | 417 | * @reserved: reserved field (for packet alignment) |
356 | * @src: address of the source | 418 | * @src: address of the source |
357 | * @dst: address of the destination | 419 | * @dst: address of the destination |
@@ -359,7 +421,9 @@ struct batadv_coded_packet { | |||
359 | * @align: 2 bytes to align the header to a 4 byte boundry | 421 | * @align: 2 bytes to align the header to a 4 byte boundry |
360 | */ | 422 | */ |
361 | struct batadv_unicast_tvlv_packet { | 423 | struct batadv_unicast_tvlv_packet { |
362 | struct batadv_header header; | 424 | uint8_t packet_type; |
425 | uint8_t version; /* batman version field */ | ||
426 | uint8_t ttl; | ||
363 | uint8_t reserved; | 427 | uint8_t reserved; |
364 | uint8_t dst[ETH_ALEN]; | 428 | uint8_t dst[ETH_ALEN]; |
365 | uint8_t src[ETH_ALEN]; | 429 | uint8_t src[ETH_ALEN]; |
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data { | |||
420 | * struct batadv_tvlv_tt_change - translation table diff data | 484 | * struct batadv_tvlv_tt_change - translation table diff data |
421 | * @flags: status indicators concerning the non-mesh client (see | 485 | * @flags: status indicators concerning the non-mesh client (see |
422 | * batadv_tt_client_flags) | 486 | * batadv_tt_client_flags) |
423 | * @reserved: reserved field | 487 | * @reserved: reserved field - useful for alignment purposes only |
424 | * @addr: mac address of non-mesh client that triggered this tt change | 488 | * @addr: mac address of non-mesh client that triggered this tt change |
425 | * @vid: VLAN identifier | 489 | * @vid: VLAN identifier |
426 | */ | 490 | */ |
427 | struct batadv_tvlv_tt_change { | 491 | struct batadv_tvlv_tt_change { |
428 | uint8_t flags; | 492 | uint8_t flags; |
429 | uint8_t reserved; | 493 | uint8_t reserved[3]; |
430 | uint8_t addr[ETH_ALEN]; | 494 | uint8_t addr[ETH_ALEN]; |
431 | __be16 vid; | 495 | __be16 vid; |
432 | }; | 496 | }; |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index d4114d775ad6..46278bfb8fdb 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, | |||
308 | memcpy(icmph->dst, icmph->orig, ETH_ALEN); | 308 | memcpy(icmph->dst, icmph->orig, ETH_ALEN); |
309 | memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); | 309 | memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); |
310 | icmph->msg_type = BATADV_ECHO_REPLY; | 310 | icmph->msg_type = BATADV_ECHO_REPLY; |
311 | icmph->header.ttl = BATADV_TTL; | 311 | icmph->ttl = BATADV_TTL; |
312 | 312 | ||
313 | res = batadv_send_skb_to_orig(skb, orig_node, NULL); | 313 | res = batadv_send_skb_to_orig(skb, orig_node, NULL); |
314 | if (res != NET_XMIT_DROP) | 314 | if (res != NET_XMIT_DROP) |
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
338 | icmp_packet = (struct batadv_icmp_packet *)skb->data; | 338 | icmp_packet = (struct batadv_icmp_packet *)skb->data; |
339 | 339 | ||
340 | /* send TTL exceeded if packet is an echo request (traceroute) */ | 340 | /* send TTL exceeded if packet is an echo request (traceroute) */ |
341 | if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) { | 341 | if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) { |
342 | pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", | 342 | pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", |
343 | icmp_packet->icmph.orig, icmp_packet->icmph.dst); | 343 | icmp_packet->orig, icmp_packet->dst); |
344 | goto out; | 344 | goto out; |
345 | } | 345 | } |
346 | 346 | ||
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
349 | goto out; | 349 | goto out; |
350 | 350 | ||
351 | /* get routing information */ | 351 | /* get routing information */ |
352 | orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig); | 352 | orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig); |
353 | if (!orig_node) | 353 | if (!orig_node) |
354 | goto out; | 354 | goto out; |
355 | 355 | ||
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
359 | 359 | ||
360 | icmp_packet = (struct batadv_icmp_packet *)skb->data; | 360 | icmp_packet = (struct batadv_icmp_packet *)skb->data; |
361 | 361 | ||
362 | memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN); | 362 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); |
363 | memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr, | 363 | memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, |
364 | ETH_ALEN); | 364 | ETH_ALEN); |
365 | icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED; | 365 | icmp_packet->msg_type = BATADV_TTL_EXCEEDED; |
366 | icmp_packet->icmph.header.ttl = BATADV_TTL; | 366 | icmp_packet->ttl = BATADV_TTL; |
367 | 367 | ||
368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) | 368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) |
369 | ret = NET_RX_SUCCESS; | 369 | ret = NET_RX_SUCCESS; |
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
434 | return batadv_recv_my_icmp_packet(bat_priv, skb); | 434 | return batadv_recv_my_icmp_packet(bat_priv, skb); |
435 | 435 | ||
436 | /* TTL exceeded */ | 436 | /* TTL exceeded */ |
437 | if (icmph->header.ttl < 2) | 437 | if (icmph->ttl < 2) |
438 | return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); | 438 | return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); |
439 | 439 | ||
440 | /* get routing information */ | 440 | /* get routing information */ |
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
449 | icmph = (struct batadv_icmp_header *)skb->data; | 449 | icmph = (struct batadv_icmp_header *)skb->data; |
450 | 450 | ||
451 | /* decrement ttl */ | 451 | /* decrement ttl */ |
452 | icmph->header.ttl--; | 452 | icmph->ttl--; |
453 | 453 | ||
454 | /* route it */ | 454 | /* route it */ |
455 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) | 455 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) |
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
709 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 709 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
710 | 710 | ||
711 | /* TTL exceeded */ | 711 | /* TTL exceeded */ |
712 | if (unicast_packet->header.ttl < 2) { | 712 | if (unicast_packet->ttl < 2) { |
713 | pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", | 713 | pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", |
714 | ethhdr->h_source, unicast_packet->dest); | 714 | ethhdr->h_source, unicast_packet->dest); |
715 | goto out; | 715 | goto out; |
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
727 | 727 | ||
728 | /* decrement ttl */ | 728 | /* decrement ttl */ |
729 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 729 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
730 | unicast_packet->header.ttl--; | 730 | unicast_packet->ttl--; |
731 | 731 | ||
732 | switch (unicast_packet->header.packet_type) { | 732 | switch (unicast_packet->packet_type) { |
733 | case BATADV_UNICAST_4ADDR: | 733 | case BATADV_UNICAST_4ADDR: |
734 | hdr_len = sizeof(struct batadv_unicast_4addr_packet); | 734 | hdr_len = sizeof(struct batadv_unicast_4addr_packet); |
735 | break; | 735 | break; |
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
970 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 970 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
971 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 971 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
972 | 972 | ||
973 | is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR; | 973 | is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; |
974 | /* the caller function should have already pulled 2 bytes */ | 974 | /* the caller function should have already pulled 2 bytes */ |
975 | if (is4addr) | 975 | if (is4addr) |
976 | hdr_size = sizeof(*unicast_4addr_packet); | 976 | hdr_size = sizeof(*unicast_4addr_packet); |
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
1160 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) | 1160 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) |
1161 | goto out; | 1161 | goto out; |
1162 | 1162 | ||
1163 | if (bcast_packet->header.ttl < 2) | 1163 | if (bcast_packet->ttl < 2) |
1164 | goto out; | 1164 | goto out; |
1165 | 1165 | ||
1166 | orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); | 1166 | orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index c83be5ebaa28..fba4dcfcfac2 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, | |||
161 | return false; | 161 | return false; |
162 | 162 | ||
163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
164 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 164 | unicast_packet->version = BATADV_COMPAT_VERSION; |
165 | /* batman packet type: unicast */ | 165 | /* batman packet type: unicast */ |
166 | unicast_packet->header.packet_type = BATADV_UNICAST; | 166 | unicast_packet->packet_type = BATADV_UNICAST; |
167 | /* set unicast ttl */ | 167 | /* set unicast ttl */ |
168 | unicast_packet->header.ttl = BATADV_TTL; | 168 | unicast_packet->ttl = BATADV_TTL; |
169 | /* copy the destination for faster routing */ | 169 | /* copy the destination for faster routing */ |
170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | 170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); |
171 | /* set the destination tt version number */ | 171 | /* set the destination tt version number */ |
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, | |||
221 | goto out; | 221 | goto out; |
222 | 222 | ||
223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
224 | uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; | 224 | uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; |
225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); | 225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); |
226 | uc_4addr_packet->subtype = packet_subtype; | 226 | uc_4addr_packet->subtype = packet_subtype; |
227 | uc_4addr_packet->reserved = 0; | 227 | uc_4addr_packet->reserved = 0; |
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | |||
436 | 436 | ||
437 | /* as we have a copy now, it is safe to decrease the TTL */ | 437 | /* as we have a copy now, it is safe to decrease the TTL */ |
438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; | 438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; |
439 | bcast_packet->header.ttl--; | 439 | bcast_packet->ttl--; |
440 | 440 | ||
441 | skb_reset_mac_header(newskb); | 441 | skb_reset_mac_header(newskb); |
442 | 442 | ||
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 36f050876f82..a8f99d1486c0 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
264 | goto dropped; | 264 | goto dropped; |
265 | 265 | ||
266 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | 266 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
267 | bcast_packet->header.version = BATADV_COMPAT_VERSION; | 267 | bcast_packet->version = BATADV_COMPAT_VERSION; |
268 | bcast_packet->header.ttl = BATADV_TTL; | 268 | bcast_packet->ttl = BATADV_TTL; |
269 | 269 | ||
270 | /* batman packet type: broadcast */ | 270 | /* batman packet type: broadcast */ |
271 | bcast_packet->header.packet_type = BATADV_BCAST; | 271 | bcast_packet->packet_type = BATADV_BCAST; |
272 | bcast_packet->reserved = 0; | 272 | bcast_packet->reserved = 0; |
273 | 273 | ||
274 | /* hw address of first interface is the orig mac because only | 274 | /* hw address of first interface is the orig mac because only |
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
328 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, | 328 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, |
329 | int hdr_size, struct batadv_orig_node *orig_node) | 329 | int hdr_size, struct batadv_orig_node *orig_node) |
330 | { | 330 | { |
331 | struct batadv_header *batadv_header = (struct batadv_header *)skb->data; | 331 | struct batadv_bcast_packet *batadv_bcast_packet; |
332 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 332 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
333 | __be16 ethertype = htons(ETH_P_BATMAN); | 333 | __be16 ethertype = htons(ETH_P_BATMAN); |
334 | struct vlan_ethhdr *vhdr; | 334 | struct vlan_ethhdr *vhdr; |
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
336 | unsigned short vid; | 336 | unsigned short vid; |
337 | bool is_bcast; | 337 | bool is_bcast; |
338 | 338 | ||
339 | is_bcast = (batadv_header->packet_type == BATADV_BCAST); | 339 | batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; |
340 | is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); | ||
340 | 341 | ||
341 | /* check if enough space is available for pulling, and pull */ | 342 | /* check if enough space is available for pulling, and pull */ |
342 | if (!pskb_may_pull(skb, hdr_size)) | 343 | if (!pskb_may_pull(skb, hdr_size)) |
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
345 | skb_pull_rcsum(skb, hdr_size); | 346 | skb_pull_rcsum(skb, hdr_size); |
346 | skb_reset_mac_header(skb); | 347 | skb_reset_mac_header(skb); |
347 | 348 | ||
348 | vid = batadv_get_vid(skb, hdr_size); | 349 | /* clean the netfilter state now that the batman-adv header has been |
350 | * removed | ||
351 | */ | ||
352 | nf_reset(skb); | ||
353 | |||
354 | vid = batadv_get_vid(skb, 0); | ||
349 | ethhdr = eth_hdr(skb); | 355 | ethhdr = eth_hdr(skb); |
350 | 356 | ||
351 | switch (ntohs(ethhdr->h_proto)) { | 357 | switch (ntohs(ethhdr->h_proto)) { |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4add57d4857f..ff625fedbc5e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, | |||
333 | return; | 333 | return; |
334 | 334 | ||
335 | tt_change_node->change.flags = flags; | 335 | tt_change_node->change.flags = flags; |
336 | tt_change_node->change.reserved = 0; | 336 | memset(tt_change_node->change.reserved, 0, |
337 | sizeof(tt_change_node->change.reserved)); | ||
337 | memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); | 338 | memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); |
338 | tt_change_node->change.vid = htons(common->vid); | 339 | tt_change_node->change.vid = htons(common->vid); |
339 | 340 | ||
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, | |||
2221 | ETH_ALEN); | 2222 | ETH_ALEN); |
2222 | tt_change->flags = tt_common_entry->flags; | 2223 | tt_change->flags = tt_common_entry->flags; |
2223 | tt_change->vid = htons(tt_common_entry->vid); | 2224 | tt_change->vid = htons(tt_common_entry->vid); |
2224 | tt_change->reserved = 0; | 2225 | memset(tt_change->reserved, 0, |
2226 | sizeof(tt_change->reserved)); | ||
2225 | 2227 | ||
2226 | tt_num_entries++; | 2228 | tt_num_entries++; |
2227 | tt_change++; | 2229 | tt_change++; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 6a6c8bb4fd72..7552f9e3089c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
940 | bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); | 940 | bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); |
941 | skb_pull(skb, 1); | 941 | skb_pull(skb, 1); |
942 | 942 | ||
943 | if (hci_pi(sk)->channel == HCI_CHANNEL_RAW && | 943 | if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { |
944 | bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { | 944 | /* No permission check is needed for user channel |
945 | * since that gets enforced when binding the socket. | ||
946 | * | ||
947 | * However check that the packet type is valid. | ||
948 | */ | ||
949 | if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && | ||
950 | bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && | ||
951 | bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { | ||
952 | err = -EINVAL; | ||
953 | goto drop; | ||
954 | } | ||
955 | |||
956 | skb_queue_tail(&hdev->raw_q, skb); | ||
957 | queue_work(hdev->workqueue, &hdev->tx_work); | ||
958 | } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { | ||
945 | u16 opcode = get_unaligned_le16(skb->data); | 959 | u16 opcode = get_unaligned_le16(skb->data); |
946 | u16 ogf = hci_opcode_ogf(opcode); | 960 | u16 ogf = hci_opcode_ogf(opcode); |
947 | u16 ocf = hci_opcode_ocf(opcode); | 961 | u16 ocf = hci_opcode_ocf(opcode); |
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
972 | goto drop; | 986 | goto drop; |
973 | } | 987 | } |
974 | 988 | ||
975 | if (hci_pi(sk)->channel == HCI_CHANNEL_USER && | ||
976 | bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && | ||
977 | bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && | ||
978 | bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { | ||
979 | err = -EINVAL; | ||
980 | goto drop; | ||
981 | } | ||
982 | |||
983 | skb_queue_tail(&hdev->raw_q, skb); | 989 | skb_queue_tail(&hdev->raw_q, skb); |
984 | queue_work(hdev->workqueue, &hdev->tx_work); | 990 | queue_work(hdev->workqueue, &hdev->tx_work); |
985 | } | 991 | } |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 4c214b2b88ef..ef66365b7354 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) | |||
1998 | u32 old; | 1998 | u32 old; |
1999 | struct net_bridge_mdb_htable *mdb; | 1999 | struct net_bridge_mdb_htable *mdb; |
2000 | 2000 | ||
2001 | spin_lock(&br->multicast_lock); | 2001 | spin_lock_bh(&br->multicast_lock); |
2002 | if (!netif_running(br->dev)) | 2002 | if (!netif_running(br->dev)) |
2003 | goto unlock; | 2003 | goto unlock; |
2004 | 2004 | ||
@@ -2030,7 +2030,7 @@ rollback: | |||
2030 | } | 2030 | } |
2031 | 2031 | ||
2032 | unlock: | 2032 | unlock: |
2033 | spin_unlock(&br->multicast_lock); | 2033 | spin_unlock_bh(&br->multicast_lock); |
2034 | 2034 | ||
2035 | return err; | 2035 | return err; |
2036 | } | 2036 | } |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 229d820bdf0b..045d56eaeca2 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br, | |||
426 | int br_handle_frame_finish(struct sk_buff *skb); | 426 | int br_handle_frame_finish(struct sk_buff *skb); |
427 | rx_handler_result_t br_handle_frame(struct sk_buff **pskb); | 427 | rx_handler_result_t br_handle_frame(struct sk_buff **pskb); |
428 | 428 | ||
429 | static inline bool br_rx_handler_check_rcu(const struct net_device *dev) | ||
430 | { | ||
431 | return rcu_dereference(dev->rx_handler) == br_handle_frame; | ||
432 | } | ||
433 | |||
434 | static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev) | ||
435 | { | ||
436 | return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL; | ||
437 | } | ||
438 | |||
429 | /* br_ioctl.c */ | 439 | /* br_ioctl.c */ |
430 | int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 440 | int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
431 | int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, | 441 | int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 8660ea3be705..bdb459d21ad8 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, | |||
153 | if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) | 153 | if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) |
154 | goto err; | 154 | goto err; |
155 | 155 | ||
156 | p = br_port_get_rcu(dev); | 156 | p = br_port_get_check_rcu(dev); |
157 | if (!p) | 157 | if (!p) |
158 | goto err; | 158 | goto err; |
159 | 159 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index ba3b7ea5ebb3..0ce469e5ec80 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb, | |||
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2542 | struct netdev_queue *txq, void *accel_priv) | 2542 | struct netdev_queue *txq) |
2543 | { | 2543 | { |
2544 | const struct net_device_ops *ops = dev->netdev_ops; | 2544 | const struct net_device_ops *ops = dev->netdev_ops; |
2545 | int rc = NETDEV_TX_OK; | 2545 | int rc = NETDEV_TX_OK; |
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
2605 | dev_queue_xmit_nit(skb, dev); | 2605 | dev_queue_xmit_nit(skb, dev); |
2606 | 2606 | ||
2607 | skb_len = skb->len; | 2607 | skb_len = skb->len; |
2608 | if (accel_priv) | ||
2609 | rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); | ||
2610 | else | ||
2611 | rc = ops->ndo_start_xmit(skb, dev); | 2608 | rc = ops->ndo_start_xmit(skb, dev); |
2612 | 2609 | ||
2613 | trace_net_dev_xmit(skb, rc, dev, skb_len); | 2610 | trace_net_dev_xmit(skb, rc, dev, skb_len); |
2614 | if (rc == NETDEV_TX_OK && txq) | 2611 | if (rc == NETDEV_TX_OK) |
2615 | txq_trans_update(txq); | 2612 | txq_trans_update(txq); |
2616 | return rc; | 2613 | return rc; |
2617 | } | 2614 | } |
@@ -2627,10 +2624,7 @@ gso: | |||
2627 | dev_queue_xmit_nit(nskb, dev); | 2624 | dev_queue_xmit_nit(nskb, dev); |
2628 | 2625 | ||
2629 | skb_len = nskb->len; | 2626 | skb_len = nskb->len; |
2630 | if (accel_priv) | 2627 | rc = ops->ndo_start_xmit(nskb, dev); |
2631 | rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); | ||
2632 | else | ||
2633 | rc = ops->ndo_start_xmit(nskb, dev); | ||
2634 | trace_net_dev_xmit(nskb, rc, dev, skb_len); | 2628 | trace_net_dev_xmit(nskb, rc, dev, skb_len); |
2635 | if (unlikely(rc != NETDEV_TX_OK)) { | 2629 | if (unlikely(rc != NETDEV_TX_OK)) { |
2636 | if (rc & ~NETDEV_TX_MASK) | 2630 | if (rc & ~NETDEV_TX_MASK) |
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); | |||
2811 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 2805 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
2812 | * --BLG | 2806 | * --BLG |
2813 | */ | 2807 | */ |
2814 | int dev_queue_xmit(struct sk_buff *skb) | 2808 | int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
2815 | { | 2809 | { |
2816 | struct net_device *dev = skb->dev; | 2810 | struct net_device *dev = skb->dev; |
2817 | struct netdev_queue *txq; | 2811 | struct netdev_queue *txq; |
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2827 | 2821 | ||
2828 | skb_update_prio(skb); | 2822 | skb_update_prio(skb); |
2829 | 2823 | ||
2830 | txq = netdev_pick_tx(dev, skb); | 2824 | txq = netdev_pick_tx(dev, skb, accel_priv); |
2831 | q = rcu_dereference_bh(txq->qdisc); | 2825 | q = rcu_dereference_bh(txq->qdisc); |
2832 | 2826 | ||
2833 | #ifdef CONFIG_NET_CLS_ACT | 2827 | #ifdef CONFIG_NET_CLS_ACT |
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2863 | 2857 | ||
2864 | if (!netif_xmit_stopped(txq)) { | 2858 | if (!netif_xmit_stopped(txq)) { |
2865 | __this_cpu_inc(xmit_recursion); | 2859 | __this_cpu_inc(xmit_recursion); |
2866 | rc = dev_hard_start_xmit(skb, dev, txq, NULL); | 2860 | rc = dev_hard_start_xmit(skb, dev, txq); |
2867 | __this_cpu_dec(xmit_recursion); | 2861 | __this_cpu_dec(xmit_recursion); |
2868 | if (dev_xmit_complete(rc)) { | 2862 | if (dev_xmit_complete(rc)) { |
2869 | HARD_TX_UNLOCK(dev, txq); | 2863 | HARD_TX_UNLOCK(dev, txq); |
@@ -2892,8 +2886,19 @@ out: | |||
2892 | rcu_read_unlock_bh(); | 2886 | rcu_read_unlock_bh(); |
2893 | return rc; | 2887 | return rc; |
2894 | } | 2888 | } |
2889 | |||
2890 | int dev_queue_xmit(struct sk_buff *skb) | ||
2891 | { | ||
2892 | return __dev_queue_xmit(skb, NULL); | ||
2893 | } | ||
2895 | EXPORT_SYMBOL(dev_queue_xmit); | 2894 | EXPORT_SYMBOL(dev_queue_xmit); |
2896 | 2895 | ||
2896 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | ||
2897 | { | ||
2898 | return __dev_queue_xmit(skb, accel_priv); | ||
2899 | } | ||
2900 | EXPORT_SYMBOL(dev_queue_xmit_accel); | ||
2901 | |||
2897 | 2902 | ||
2898 | /*======================================================================= | 2903 | /*======================================================================= |
2899 | Receiver routines | 2904 | Receiver routines |
@@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, | |||
4500 | { | 4505 | { |
4501 | struct netdev_adjacent *upper; | 4506 | struct netdev_adjacent *upper; |
4502 | 4507 | ||
4503 | WARN_ON_ONCE(!rcu_read_lock_held()); | 4508 | WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); |
4504 | 4509 | ||
4505 | upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); | 4510 | upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); |
4506 | 4511 | ||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 95897183226e..e70301eb7a4a 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = { | |||
64 | .hdrsize = 0, | 64 | .hdrsize = 0, |
65 | .name = "NET_DM", | 65 | .name = "NET_DM", |
66 | .version = 2, | 66 | .version = 2, |
67 | .maxattr = NET_DM_CMD_MAX, | ||
68 | }; | 67 | }; |
69 | 68 | ||
70 | static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); | 69 | static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d6ef17322500..2fc5beaf5783 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
395 | EXPORT_SYMBOL(__netdev_pick_tx); | 395 | EXPORT_SYMBOL(__netdev_pick_tx); |
396 | 396 | ||
397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
398 | struct sk_buff *skb) | 398 | struct sk_buff *skb, |
399 | void *accel_priv) | ||
399 | { | 400 | { |
400 | int queue_index = 0; | 401 | int queue_index = 0; |
401 | 402 | ||
402 | if (dev->real_num_tx_queues != 1) { | 403 | if (dev->real_num_tx_queues != 1) { |
403 | const struct net_device_ops *ops = dev->netdev_ops; | 404 | const struct net_device_ops *ops = dev->netdev_ops; |
404 | if (ops->ndo_select_queue) | 405 | if (ops->ndo_select_queue) |
405 | queue_index = ops->ndo_select_queue(dev, skb); | 406 | queue_index = ops->ndo_select_queue(dev, skb, |
407 | accel_priv); | ||
406 | else | 408 | else |
407 | queue_index = __netdev_pick_tx(dev, skb); | 409 | queue_index = __netdev_pick_tx(dev, skb); |
408 | queue_index = dev_cap_txqueue(dev, queue_index); | 410 | |
411 | if (!accel_priv) | ||
412 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | skb_set_queue_mapping(skb, queue_index); | 415 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ca15f32821fb..932c6d7cf666 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1161 | neigh->parms->reachable_time : | 1161 | neigh->parms->reachable_time : |
1162 | 0))); | 1162 | 0))); |
1163 | neigh->nud_state = new; | 1163 | neigh->nud_state = new; |
1164 | notify = 1; | ||
1164 | } | 1165 | } |
1165 | 1166 | ||
1166 | if (lladdr != neigh->ha) { | 1167 | if (lladdr != neigh->ha) { |
@@ -1274,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb) | |||
1274 | 1275 | ||
1275 | if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, | 1276 | if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, |
1276 | skb->len) < 0 && | 1277 | skb->len) < 0 && |
1277 | dev->header_ops->rebuild(skb)) | 1278 | dev_rebuild_header(skb)) |
1278 | return 0; | 1279 | return 0; |
1279 | 1280 | ||
1280 | return dev_queue_xmit(skb); | 1281 | return dev_queue_xmit(skb); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8f971990677c..19fe9c717ced 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
376 | struct netdev_queue *txq; | 376 | struct netdev_queue *txq; |
377 | 377 | ||
378 | txq = netdev_pick_tx(dev, skb); | 378 | txq = netdev_pick_tx(dev, skb, NULL); |
379 | 379 | ||
380 | /* try until next clock tick */ | 380 | /* try until next clock tick */ |
381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
386 | !vlan_hw_offload_capable(netif_skb_features(skb), | 386 | !vlan_hw_offload_capable(netif_skb_features(skb), |
387 | skb->vlan_proto)) { | 387 | skb->vlan_proto)) { |
388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); | 388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); |
389 | if (unlikely(!skb)) | 389 | if (unlikely(!skb)) { |
390 | break; | 390 | /* This is actually a packet drop, but we |
391 | * don't want the code at the end of this | ||
392 | * function to try and re-queue a NULL skb. | ||
393 | */ | ||
394 | status = NETDEV_TX_OK; | ||
395 | goto unlock_txq; | ||
396 | } | ||
391 | skb->vlan_tci = 0; | 397 | skb->vlan_tci = 0; |
392 | } | 398 | } |
393 | 399 | ||
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
395 | if (status == NETDEV_TX_OK) | 401 | if (status == NETDEV_TX_OK) |
396 | txq_trans_update(txq); | 402 | txq_trans_update(txq); |
397 | } | 403 | } |
404 | unlock_txq: | ||
398 | __netif_tx_unlock(txq); | 405 | __netif_tx_unlock(txq); |
399 | 406 | ||
400 | if (status == NETDEV_TX_OK) | 407 | if (status == NETDEV_TX_OK) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2718fed53d8c..06e72d3cdf60 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) | |||
3584 | skb->tstamp.tv64 = 0; | 3584 | skb->tstamp.tv64 = 0; |
3585 | skb->pkt_type = PACKET_HOST; | 3585 | skb->pkt_type = PACKET_HOST; |
3586 | skb->skb_iif = 0; | 3586 | skb->skb_iif = 0; |
3587 | skb->local_df = 0; | ||
3587 | skb_dst_drop(skb); | 3588 | skb_dst_drop(skb); |
3588 | skb->mark = 0; | 3589 | skb->mark = 0; |
3589 | secpath_reset(skb); | 3590 | secpath_reset(skb); |
diff --git a/net/core/sock.c b/net/core/sock.c index ab20ed9b0f31..5393b4b719d7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -882,7 +882,7 @@ set_rcvbuf: | |||
882 | 882 | ||
883 | case SO_PEEK_OFF: | 883 | case SO_PEEK_OFF: |
884 | if (sock->ops->set_peek_off) | 884 | if (sock->ops->set_peek_off) |
885 | sock->ops->set_peek_off(sk, val); | 885 | ret = sock->ops->set_peek_off(sk, val); |
886 | else | 886 | else |
887 | ret = -EOPNOTSUPP; | 887 | ret = -EOPNOTSUPP; |
888 | break; | 888 | break; |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4ac71ff7c2e4..2b90a786e475 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -851,7 +851,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
851 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 851 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
852 | if (flowlabel == NULL) | 852 | if (flowlabel == NULL) |
853 | return -EINVAL; | 853 | return -EINVAL; |
854 | usin->sin6_addr = flowlabel->dst; | ||
855 | fl6_sock_release(flowlabel); | 854 | fl6_sock_release(flowlabel); |
856 | } | 855 | } |
857 | } | 856 | } |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 4c6bdf97a657..595ddf0459db 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = { | |||
152 | .llseek = noop_llseek, | 152 | .llseek = noop_llseek, |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static __init int setup_jprobe(void) | ||
156 | { | ||
157 | int ret = register_jprobe(&dccp_send_probe); | ||
158 | |||
159 | if (ret) { | ||
160 | request_module("dccp"); | ||
161 | ret = register_jprobe(&dccp_send_probe); | ||
162 | } | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static __init int dccpprobe_init(void) | 155 | static __init int dccpprobe_init(void) |
167 | { | 156 | { |
168 | int ret = -ENOMEM; | 157 | int ret = -ENOMEM; |
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void) | |||
174 | if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) | 163 | if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) |
175 | goto err0; | 164 | goto err0; |
176 | 165 | ||
177 | ret = setup_jprobe(); | 166 | ret = register_jprobe(&dccp_send_probe); |
167 | if (ret) { | ||
168 | ret = request_module("dccp"); | ||
169 | if (!ret) | ||
170 | ret = register_jprobe(&dccp_send_probe); | ||
171 | } | ||
172 | |||
178 | if (ret) | 173 | if (ret) |
179 | goto err1; | 174 | goto err1; |
180 | 175 | ||
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 459e200c08a4..a2d2456a557a 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c | |||
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb, | |||
547 | hc06_ptr += 3; | 547 | hc06_ptr += 3; |
548 | } else { | 548 | } else { |
549 | /* compress nothing */ | 549 | /* compress nothing */ |
550 | memcpy(hc06_ptr, &hdr, 4); | 550 | memcpy(hc06_ptr, hdr, 4); |
551 | /* replace the top byte with new ECN | DSCP format */ | 551 | /* replace the top byte with new ECN | DSCP format */ |
552 | *hc06_ptr = tmp; | 552 | *hc06_ptr = tmp; |
553 | hc06_ptr += 4; | 553 | hc06_ptr += 4; |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 523be38e37de..f2e15738534d 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -104,7 +104,10 @@ errout: | |||
104 | static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) | 104 | static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) |
105 | { | 105 | { |
106 | struct fib_result *result = (struct fib_result *) arg->result; | 106 | struct fib_result *result = (struct fib_result *) arg->result; |
107 | struct net_device *dev = result->fi->fib_dev; | 107 | struct net_device *dev = NULL; |
108 | |||
109 | if (result->fi) | ||
110 | dev = result->fi->fib_dev; | ||
108 | 111 | ||
109 | /* do not accept result if the route does | 112 | /* do not accept result if the route does |
110 | * not meet the required prefix length | 113 | * not meet the required prefix length |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index e5d436188464..2cd02f32f99f 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
28 | netdev_features_t enc_features; | 28 | netdev_features_t enc_features; |
29 | int ghl = GRE_HEADER_SECTION; | 29 | int ghl = GRE_HEADER_SECTION; |
30 | struct gre_base_hdr *greh; | 30 | struct gre_base_hdr *greh; |
31 | u16 mac_offset = skb->mac_header; | ||
31 | int mac_len = skb->mac_len; | 32 | int mac_len = skb->mac_len; |
32 | __be16 protocol = skb->protocol; | 33 | __be16 protocol = skb->protocol; |
33 | int tnl_hlen; | 34 | int tnl_hlen; |
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
58 | } else | 59 | } else |
59 | csum = false; | 60 | csum = false; |
60 | 61 | ||
62 | if (unlikely(!pskb_may_pull(skb, ghl))) | ||
63 | goto out; | ||
64 | |||
61 | /* setup inner skb. */ | 65 | /* setup inner skb. */ |
62 | skb->protocol = greh->protocol; | 66 | skb->protocol = greh->protocol; |
63 | skb->encapsulation = 0; | 67 | skb->encapsulation = 0; |
64 | 68 | ||
65 | if (unlikely(!pskb_may_pull(skb, ghl))) | ||
66 | goto out; | ||
67 | |||
68 | __skb_pull(skb, ghl); | 69 | __skb_pull(skb, ghl); |
69 | skb_reset_mac_header(skb); | 70 | skb_reset_mac_header(skb); |
70 | skb_set_network_header(skb, skb_inner_network_offset(skb)); | 71 | skb_set_network_header(skb, skb_inner_network_offset(skb)); |
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
73 | /* segment inner packet. */ | 74 | /* segment inner packet. */ |
74 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 75 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
75 | segs = skb_mac_gso_segment(skb, enc_features); | 76 | segs = skb_mac_gso_segment(skb, enc_features); |
76 | if (!segs || IS_ERR(segs)) | 77 | if (!segs || IS_ERR(segs)) { |
78 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); | ||
77 | goto out; | 79 | goto out; |
80 | } | ||
78 | 81 | ||
79 | skb = segs; | 82 | skb = segs; |
80 | tnl_hlen = skb_tnl_header_len(skb); | 83 | tnl_hlen = skb_tnl_header_len(skb); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 56a964a553d2..a0f52dac8940 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
106 | 106 | ||
107 | r->id.idiag_sport = inet->inet_sport; | 107 | r->id.idiag_sport = inet->inet_sport; |
108 | r->id.idiag_dport = inet->inet_dport; | 108 | r->id.idiag_dport = inet->inet_dport; |
109 | |||
110 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
111 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
112 | |||
109 | r->id.idiag_src[0] = inet->inet_rcv_saddr; | 113 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
110 | r->id.idiag_dst[0] = inet->inet_daddr; | 114 | r->id.idiag_dst[0] = inet->inet_daddr; |
111 | 115 | ||
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, | |||
240 | 244 | ||
241 | r->idiag_family = tw->tw_family; | 245 | r->idiag_family = tw->tw_family; |
242 | r->idiag_retrans = 0; | 246 | r->idiag_retrans = 0; |
247 | |||
243 | r->id.idiag_if = tw->tw_bound_dev_if; | 248 | r->id.idiag_if = tw->tw_bound_dev_if; |
244 | sock_diag_save_cookie(tw, r->id.idiag_cookie); | 249 | sock_diag_save_cookie(tw, r->id.idiag_cookie); |
250 | |||
245 | r->id.idiag_sport = tw->tw_sport; | 251 | r->id.idiag_sport = tw->tw_sport; |
246 | r->id.idiag_dport = tw->tw_dport; | 252 | r->id.idiag_dport = tw->tw_dport; |
253 | |||
254 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
255 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
256 | |||
247 | r->id.idiag_src[0] = tw->tw_rcv_saddr; | 257 | r->id.idiag_src[0] = tw->tw_rcv_saddr; |
248 | r->id.idiag_dst[0] = tw->tw_daddr; | 258 | r->id.idiag_dst[0] = tw->tw_daddr; |
259 | |||
249 | r->idiag_state = tw->tw_substate; | 260 | r->idiag_state = tw->tw_substate; |
250 | r->idiag_timer = 3; | 261 | r->idiag_timer = 3; |
251 | r->idiag_expires = jiffies_to_msecs(tmo); | 262 | r->idiag_expires = jiffies_to_msecs(tmo); |
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
726 | 737 | ||
727 | r->id.idiag_sport = inet->inet_sport; | 738 | r->id.idiag_sport = inet->inet_sport; |
728 | r->id.idiag_dport = ireq->ir_rmt_port; | 739 | r->id.idiag_dport = ireq->ir_rmt_port; |
740 | |||
741 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
742 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
743 | |||
729 | r->id.idiag_src[0] = ireq->ir_loc_addr; | 744 | r->id.idiag_src[0] = ireq->ir_loc_addr; |
730 | r->id.idiag_dst[0] = ireq->ir_rmt_addr; | 745 | r->id.idiag_dst[0] = ireq->ir_rmt_addr; |
746 | |||
731 | r->idiag_expires = jiffies_to_msecs(tmo); | 747 | r->idiag_expires = jiffies_to_msecs(tmo); |
732 | r->idiag_rqueue = 0; | 748 | r->idiag_rqueue = 0; |
733 | r->idiag_wqueue = 0; | 749 | r->idiag_wqueue = 0; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d7aea4c5b940..e560ef34cf4b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) | |||
217 | iph->saddr, iph->daddr, tpi->key); | 217 | iph->saddr, iph->daddr, tpi->key); |
218 | 218 | ||
219 | if (tunnel) { | 219 | if (tunnel) { |
220 | skb_pop_mac_header(skb); | ||
220 | ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); | 221 | ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); |
221 | return PACKET_RCVD; | 222 | return PACKET_RCVD; |
222 | } | 223 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 912402752f2f..df184616493f 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk, | |||
828 | 828 | ||
829 | if (cork->length + length > maxnonfragsize - fragheaderlen) { | 829 | if (cork->length + length > maxnonfragsize - fragheaderlen) { |
830 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, | 830 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, |
831 | mtu-exthdrlen); | 831 | mtu - (opt ? opt->optlen : 0)); |
832 | return -EMSGSIZE; | 832 | return -EMSGSIZE; |
833 | } | 833 | } |
834 | 834 | ||
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, | |||
1151 | mtu : 0xFFFF; | 1151 | mtu : 0xFFFF; |
1152 | 1152 | ||
1153 | if (cork->length + size > maxnonfragsize - fragheaderlen) { | 1153 | if (cork->length + size > maxnonfragsize - fragheaderlen) { |
1154 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu); | 1154 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, |
1155 | mtu - (opt ? opt->optlen : 0)); | ||
1155 | return -EMSGSIZE; | 1156 | return -EMSGSIZE; |
1156 | } | 1157 | } |
1157 | 1158 | ||
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index f13bd91d9a56..a313c3fbeb46 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) | |||
423 | static struct xt_target synproxy_tg4_reg __read_mostly = { | 423 | static struct xt_target synproxy_tg4_reg __read_mostly = { |
424 | .name = "SYNPROXY", | 424 | .name = "SYNPROXY", |
425 | .family = NFPROTO_IPV4, | 425 | .family = NFPROTO_IPV4, |
426 | .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), | ||
426 | .target = synproxy_tg4, | 427 | .target = synproxy_tg4, |
427 | .targetsize = sizeof(struct xt_synproxy_info), | 428 | .targetsize = sizeof(struct xt_synproxy_info), |
428 | .checkentry = synproxy_tg4_check, | 429 | .checkentry = synproxy_tg4_check, |
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index fff5ba1a33b7..4a5e94ac314a 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c | |||
@@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
72 | { | 72 | { |
73 | const struct nft_reject *priv = nft_expr_priv(expr); | 73 | const struct nft_reject *priv = nft_expr_priv(expr); |
74 | 74 | ||
75 | if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) | 75 | if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) |
76 | goto nla_put_failure; | 76 | goto nla_put_failure; |
77 | 77 | ||
78 | switch (priv->type) { | 78 | switch (priv->type) { |
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 269a89ecd2f4..f7e522c558ba 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c | |||
@@ -6,13 +6,6 @@ | |||
6 | #include <linux/memcontrol.h> | 6 | #include <linux/memcontrol.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | 8 | ||
9 | static void memcg_tcp_enter_memory_pressure(struct sock *sk) | ||
10 | { | ||
11 | if (sk->sk_cgrp->memory_pressure) | ||
12 | sk->sk_cgrp->memory_pressure = 1; | ||
13 | } | ||
14 | EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure); | ||
15 | |||
16 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | 9 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) |
17 | { | 10 | { |
18 | /* | 11 | /* |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 44f6a20fa29d..a7e4729e974b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -560,15 +560,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, | |||
560 | __be16 sport, __be16 dport, | 560 | __be16 sport, __be16 dport, |
561 | struct udp_table *udptable) | 561 | struct udp_table *udptable) |
562 | { | 562 | { |
563 | struct sock *sk; | ||
564 | const struct iphdr *iph = ip_hdr(skb); | 563 | const struct iphdr *iph = ip_hdr(skb); |
565 | 564 | ||
566 | if (unlikely(sk = skb_steal_sock(skb))) | 565 | return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, |
567 | return sk; | 566 | iph->daddr, dport, inet_iif(skb), |
568 | else | 567 | udptable); |
569 | return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, | ||
570 | iph->daddr, dport, inet_iif(skb), | ||
571 | udptable); | ||
572 | } | 568 | } |
573 | 569 | ||
574 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, | 570 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, |
@@ -1603,12 +1599,16 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
1603 | kfree_skb(skb1); | 1599 | kfree_skb(skb1); |
1604 | } | 1600 | } |
1605 | 1601 | ||
1606 | static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | 1602 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
1603 | * For UDP, we use xchg() to guard against concurrent changes. | ||
1604 | */ | ||
1605 | static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | ||
1607 | { | 1606 | { |
1608 | struct dst_entry *dst = skb_dst(skb); | 1607 | struct dst_entry *old; |
1609 | 1608 | ||
1610 | dst_hold(dst); | 1609 | dst_hold(dst); |
1611 | sk->sk_rx_dst = dst; | 1610 | old = xchg(&sk->sk_rx_dst, dst); |
1611 | dst_release(old); | ||
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | /* | 1614 | /* |
@@ -1739,15 +1739,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1739 | if (udp4_csum_init(skb, uh, proto)) | 1739 | if (udp4_csum_init(skb, uh, proto)) |
1740 | goto csum_error; | 1740 | goto csum_error; |
1741 | 1741 | ||
1742 | if (skb->sk) { | 1742 | sk = skb_steal_sock(skb); |
1743 | if (sk) { | ||
1744 | struct dst_entry *dst = skb_dst(skb); | ||
1743 | int ret; | 1745 | int ret; |
1744 | sk = skb->sk; | ||
1745 | 1746 | ||
1746 | if (unlikely(sk->sk_rx_dst == NULL)) | 1747 | if (unlikely(sk->sk_rx_dst != dst)) |
1747 | udp_sk_rx_dst_set(sk, skb); | 1748 | udp_sk_rx_dst_set(sk, dst); |
1748 | 1749 | ||
1749 | ret = udp_queue_rcv_skb(sk, skb); | 1750 | ret = udp_queue_rcv_skb(sk, skb); |
1750 | 1751 | sock_put(sk); | |
1751 | /* a return value > 0 means to resubmit the input, but | 1752 | /* a return value > 0 means to resubmit the input, but |
1752 | * it wants the return to be -protocol, or 0 | 1753 | * it wants the return to be -protocol, or 0 |
1753 | */ | 1754 | */ |
@@ -1913,17 +1914,20 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, | |||
1913 | 1914 | ||
1914 | void udp_v4_early_demux(struct sk_buff *skb) | 1915 | void udp_v4_early_demux(struct sk_buff *skb) |
1915 | { | 1916 | { |
1916 | const struct iphdr *iph = ip_hdr(skb); | 1917 | struct net *net = dev_net(skb->dev); |
1917 | const struct udphdr *uh = udp_hdr(skb); | 1918 | const struct iphdr *iph; |
1919 | const struct udphdr *uh; | ||
1918 | struct sock *sk; | 1920 | struct sock *sk; |
1919 | struct dst_entry *dst; | 1921 | struct dst_entry *dst; |
1920 | struct net *net = dev_net(skb->dev); | ||
1921 | int dif = skb->dev->ifindex; | 1922 | int dif = skb->dev->ifindex; |
1922 | 1923 | ||
1923 | /* validate the packet */ | 1924 | /* validate the packet */ |
1924 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) | 1925 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) |
1925 | return; | 1926 | return; |
1926 | 1927 | ||
1928 | iph = ip_hdr(skb); | ||
1929 | uh = udp_hdr(skb); | ||
1930 | |||
1927 | if (skb->pkt_type == PACKET_BROADCAST || | 1931 | if (skb->pkt_type == PACKET_BROADCAST || |
1928 | skb->pkt_type == PACKET_MULTICAST) | 1932 | skb->pkt_type == PACKET_MULTICAST) |
1929 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, | 1933 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, |
@@ -2474,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
2474 | netdev_features_t features) | 2478 | netdev_features_t features) |
2475 | { | 2479 | { |
2476 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 2480 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
2481 | u16 mac_offset = skb->mac_header; | ||
2477 | int mac_len = skb->mac_len; | 2482 | int mac_len = skb->mac_len; |
2478 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); | 2483 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); |
2479 | __be16 protocol = skb->protocol; | 2484 | __be16 protocol = skb->protocol; |
@@ -2493,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
2493 | /* segment inner packet. */ | 2498 | /* segment inner packet. */ |
2494 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 2499 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
2495 | segs = skb_mac_gso_segment(skb, enc_features); | 2500 | segs = skb_mac_gso_segment(skb, enc_features); |
2496 | if (!segs || IS_ERR(segs)) | 2501 | if (!segs || IS_ERR(segs)) { |
2502 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, | ||
2503 | mac_len); | ||
2497 | goto out; | 2504 | goto out; |
2505 | } | ||
2498 | 2506 | ||
2499 | outer_hlen = skb_tnl_header_len(skb); | 2507 | outer_hlen = skb_tnl_header_len(skb); |
2500 | skb = segs; | 2508 | skb = segs; |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 83206de2bc76..79c62bdcd3c5 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
41 | { | 41 | { |
42 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 42 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
43 | unsigned int mss; | 43 | unsigned int mss; |
44 | int offset; | ||
45 | __wsum csum; | ||
46 | |||
47 | if (skb->encapsulation && | ||
48 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { | ||
49 | segs = skb_udp_tunnel_segment(skb, features); | ||
50 | goto out; | ||
51 | } | ||
44 | 52 | ||
45 | mss = skb_shinfo(skb)->gso_size; | 53 | mss = skb_shinfo(skb)->gso_size; |
46 | if (unlikely(skb->len <= mss)) | 54 | if (unlikely(skb->len <= mss)) |
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
63 | goto out; | 71 | goto out; |
64 | } | 72 | } |
65 | 73 | ||
74 | /* Do software UFO. Complete and fill in the UDP checksum as | ||
75 | * HW cannot do checksum of UDP packets sent as multiple | ||
76 | * IP fragments. | ||
77 | */ | ||
78 | offset = skb_checksum_start_offset(skb); | ||
79 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
80 | offset += skb->csum_offset; | ||
81 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
82 | skb->ip_summed = CHECKSUM_NONE; | ||
83 | |||
66 | /* Fragment the skb. IP headers of the fragments are updated in | 84 | /* Fragment the skb. IP headers of the fragments are updated in |
67 | * inet_gso_segment() | 85 | * inet_gso_segment() |
68 | */ | 86 | */ |
69 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) | 87 | segs = skb_segment(skb, features); |
70 | segs = skb_udp_tunnel_segment(skb, features); | ||
71 | else { | ||
72 | int offset; | ||
73 | __wsum csum; | ||
74 | |||
75 | /* Do software UFO. Complete and fill in the UDP checksum as | ||
76 | * HW cannot do checksum of UDP packets sent as multiple | ||
77 | * IP fragments. | ||
78 | */ | ||
79 | offset = skb_checksum_start_offset(skb); | ||
80 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
81 | offset += skb->csum_offset; | ||
82 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
83 | skb->ip_summed = CHECKSUM_NONE; | ||
84 | |||
85 | segs = skb_segment(skb, features); | ||
86 | } | ||
87 | out: | 88 | out: |
88 | return segs; | 89 | return segs; |
89 | } | 90 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 12c97d8aa6bb..abe46a4228ce 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
1671 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | 1671 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
1672 | { | 1672 | { |
1673 | struct in6_addr addr; | 1673 | struct in6_addr addr; |
1674 | if (ifp->prefix_len == 127) /* RFC 6164 */ | 1674 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1675 | return; | 1675 | return; |
1676 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1676 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
1677 | if (ipv6_addr_any(&addr)) | 1677 | if (ipv6_addr_any(&addr)) |
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | |||
1682 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) | 1682 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
1683 | { | 1683 | { |
1684 | struct in6_addr addr; | 1684 | struct in6_addr addr; |
1685 | if (ifp->prefix_len == 127) /* RFC 6164 */ | 1685 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1686 | return; | 1686 | return; |
1687 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1687 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
1688 | if (ipv6_addr_any(&addr)) | 1688 | if (ipv6_addr_any(&addr)) |
@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
2509 | struct inet6_ifaddr *ifp; | 2509 | struct inet6_ifaddr *ifp; |
2510 | 2510 | ||
2511 | ifp = ipv6_add_addr(idev, addr, NULL, plen, | 2511 | ifp = ipv6_add_addr(idev, addr, NULL, plen, |
2512 | scope, IFA_F_PERMANENT, 0, 0); | 2512 | scope, IFA_F_PERMANENT, |
2513 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
2513 | if (!IS_ERR(ifp)) { | 2514 | if (!IS_ERR(ifp)) { |
2514 | spin_lock_bh(&ifp->lock); | 2515 | spin_lock_bh(&ifp->lock); |
2515 | ifp->flags &= ~IFA_F_TENTATIVE; | 2516 | ifp->flags &= ~IFA_F_TENTATIVE; |
@@ -2613,7 +2614,7 @@ static void init_loopback(struct net_device *dev) | |||
2613 | if (sp_ifa->rt) | 2614 | if (sp_ifa->rt) |
2614 | continue; | 2615 | continue; |
2615 | 2616 | ||
2616 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); | 2617 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false); |
2617 | 2618 | ||
2618 | /* Failure cases are ignored */ | 2619 | /* Failure cases are ignored */ |
2619 | if (!IS_ERR(sp_rt)) { | 2620 | if (!IS_ERR(sp_rt)) { |
@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr | |||
2637 | #endif | 2638 | #endif |
2638 | 2639 | ||
2639 | 2640 | ||
2640 | ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); | 2641 | ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, |
2642 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
2641 | if (!IS_ERR(ifp)) { | 2643 | if (!IS_ERR(ifp)) { |
2642 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); | 2644 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); |
2643 | addrconf_dad_start(ifp); | 2645 | addrconf_dad_start(ifp); |
@@ -3456,7 +3458,12 @@ restart: | |||
3456 | &inet6_addr_lst[i], addr_lst) { | 3458 | &inet6_addr_lst[i], addr_lst) { |
3457 | unsigned long age; | 3459 | unsigned long age; |
3458 | 3460 | ||
3459 | if (ifp->flags & IFA_F_PERMANENT) | 3461 | /* When setting preferred_lft to a value not zero or |
3462 | * infinity, while valid_lft is infinity | ||
3463 | * IFA_F_PERMANENT has a non-infinity life time. | ||
3464 | */ | ||
3465 | if ((ifp->flags & IFA_F_PERMANENT) && | ||
3466 | (ifp->prefered_lft == INFINITY_LIFE_TIME)) | ||
3460 | continue; | 3467 | continue; |
3461 | 3468 | ||
3462 | spin_lock(&ifp->lock); | 3469 | spin_lock(&ifp->lock); |
@@ -3481,7 +3488,8 @@ restart: | |||
3481 | ifp->flags |= IFA_F_DEPRECATED; | 3488 | ifp->flags |= IFA_F_DEPRECATED; |
3482 | } | 3489 | } |
3483 | 3490 | ||
3484 | if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)) | 3491 | if ((ifp->valid_lft != INFINITY_LIFE_TIME) && |
3492 | (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))) | ||
3485 | next = ifp->tstamp + ifp->valid_lft * HZ; | 3493 | next = ifp->tstamp + ifp->valid_lft * HZ; |
3486 | 3494 | ||
3487 | spin_unlock(&ifp->lock); | 3495 | spin_unlock(&ifp->lock); |
@@ -3761,7 +3769,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, | |||
3761 | put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), | 3769 | put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), |
3762 | ifa->idev->dev->ifindex); | 3770 | ifa->idev->dev->ifindex); |
3763 | 3771 | ||
3764 | if (!(ifa->flags&IFA_F_PERMANENT)) { | 3772 | if (!((ifa->flags&IFA_F_PERMANENT) && |
3773 | (ifa->prefered_lft == INFINITY_LIFE_TIME))) { | ||
3765 | preferred = ifa->prefered_lft; | 3774 | preferred = ifa->prefered_lft; |
3766 | valid = ifa->valid_lft; | 3775 | valid = ifa->valid_lft; |
3767 | if (preferred != INFINITY_LIFE_TIME) { | 3776 | if (preferred != INFINITY_LIFE_TIME) { |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 8dfe1f4d3c1a..93b1aa34c432 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -73,7 +73,6 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
73 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 73 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
74 | if (flowlabel == NULL) | 74 | if (flowlabel == NULL) |
75 | return -EINVAL; | 75 | return -EINVAL; |
76 | usin->sin6_addr = flowlabel->dst; | ||
77 | } | 76 | } |
78 | } | 77 | } |
79 | 78 | ||
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index e27591635f92..3fd0a578329e 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -122,7 +122,11 @@ out: | |||
122 | static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) | 122 | static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) |
123 | { | 123 | { |
124 | struct rt6_info *rt = (struct rt6_info *) arg->result; | 124 | struct rt6_info *rt = (struct rt6_info *) arg->result; |
125 | struct net_device *dev = rt->rt6i_idev->dev; | 125 | struct net_device *dev = NULL; |
126 | |||
127 | if (rt->rt6i_idev) | ||
128 | dev = rt->rt6i_idev->dev; | ||
129 | |||
126 | /* do not accept result if the route does | 130 | /* do not accept result if the route does |
127 | * not meet the required prefix length | 131 | * not meet the required prefix length |
128 | */ | 132 | */ |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4acdb63495db..e6f931997996 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1193 | 1193 | ||
1194 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + | 1194 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + |
1195 | (opt ? opt->opt_nflen : 0); | 1195 | (opt ? opt->opt_nflen : 0); |
1196 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); | 1196 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - |
1197 | sizeof(struct frag_hdr); | ||
1197 | 1198 | ||
1198 | if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { | 1199 | if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { |
1199 | if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { | 1200 | unsigned int maxnonfragsize, headersize; |
1200 | ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); | 1201 | |
1202 | headersize = sizeof(struct ipv6hdr) + | ||
1203 | (opt ? opt->tot_len : 0) + | ||
1204 | (dst_allfrag(&rt->dst) ? | ||
1205 | sizeof(struct frag_hdr) : 0) + | ||
1206 | rt->rt6i_nfheader_len; | ||
1207 | |||
1208 | maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ? | ||
1209 | mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN; | ||
1210 | |||
1211 | /* dontfrag active */ | ||
1212 | if ((cork->length + length > mtu - headersize) && dontfrag && | ||
1213 | (sk->sk_protocol == IPPROTO_UDP || | ||
1214 | sk->sk_protocol == IPPROTO_RAW)) { | ||
1215 | ipv6_local_rxpmtu(sk, fl6, mtu - headersize + | ||
1216 | sizeof(struct ipv6hdr)); | ||
1217 | goto emsgsize; | ||
1218 | } | ||
1219 | |||
1220 | if (cork->length + length > maxnonfragsize - headersize) { | ||
1221 | emsgsize: | ||
1222 | ipv6_local_error(sk, EMSGSIZE, fl6, | ||
1223 | mtu - headersize + | ||
1224 | sizeof(struct ipv6hdr)); | ||
1201 | return -EMSGSIZE; | 1225 | return -EMSGSIZE; |
1202 | } | 1226 | } |
1203 | } | 1227 | } |
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1222 | * --yoshfuji | 1246 | * --yoshfuji |
1223 | */ | 1247 | */ |
1224 | 1248 | ||
1225 | if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP || | ||
1226 | sk->sk_protocol == IPPROTO_RAW)) { | ||
1227 | ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); | ||
1228 | return -EMSGSIZE; | ||
1229 | } | ||
1230 | |||
1231 | skb = skb_peek_tail(&sk->sk_write_queue); | 1249 | skb = skb_peek_tail(&sk->sk_write_queue); |
1232 | cork->length += length; | 1250 | cork->length += length; |
1233 | if (((length > mtu) || | 1251 | if (((length > mtu) || |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index d6062325db08..7881965a8248 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -103,16 +103,25 @@ struct ip6_tnl_net { | |||
103 | 103 | ||
104 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) | 104 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) |
105 | { | 105 | { |
106 | struct pcpu_tstats sum = { 0 }; | 106 | struct pcpu_tstats tmp, sum = { 0 }; |
107 | int i; | 107 | int i; |
108 | 108 | ||
109 | for_each_possible_cpu(i) { | 109 | for_each_possible_cpu(i) { |
110 | unsigned int start; | ||
110 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | 111 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); |
111 | 112 | ||
112 | sum.rx_packets += tstats->rx_packets; | 113 | do { |
113 | sum.rx_bytes += tstats->rx_bytes; | 114 | start = u64_stats_fetch_begin_bh(&tstats->syncp); |
114 | sum.tx_packets += tstats->tx_packets; | 115 | tmp.rx_packets = tstats->rx_packets; |
115 | sum.tx_bytes += tstats->tx_bytes; | 116 | tmp.rx_bytes = tstats->rx_bytes; |
117 | tmp.tx_packets = tstats->tx_packets; | ||
118 | tmp.tx_bytes = tstats->tx_bytes; | ||
119 | } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); | ||
120 | |||
121 | sum.rx_packets += tmp.rx_packets; | ||
122 | sum.rx_bytes += tmp.rx_bytes; | ||
123 | sum.tx_packets += tmp.tx_packets; | ||
124 | sum.tx_bytes += tmp.tx_bytes; | ||
116 | } | 125 | } |
117 | dev->stats.rx_packets = sum.rx_packets; | 126 | dev->stats.rx_packets = sum.rx_packets; |
118 | dev->stats.rx_bytes = sum.rx_bytes; | 127 | dev->stats.rx_bytes = sum.rx_bytes; |
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
824 | } | 833 | } |
825 | 834 | ||
826 | tstats = this_cpu_ptr(t->dev->tstats); | 835 | tstats = this_cpu_ptr(t->dev->tstats); |
836 | u64_stats_update_begin(&tstats->syncp); | ||
827 | tstats->rx_packets++; | 837 | tstats->rx_packets++; |
828 | tstats->rx_bytes += skb->len; | 838 | tstats->rx_bytes += skb->len; |
839 | u64_stats_update_end(&tstats->syncp); | ||
829 | 840 | ||
830 | netif_rx(skb); | 841 | netif_rx(skb); |
831 | 842 | ||
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed94ba61dda0..7b42d5ef868d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -75,26 +75,6 @@ struct vti6_net { | |||
75 | struct ip6_tnl __rcu **tnls[2]; | 75 | struct ip6_tnl __rcu **tnls[2]; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static struct net_device_stats *vti6_get_stats(struct net_device *dev) | ||
79 | { | ||
80 | struct pcpu_tstats sum = { 0 }; | ||
81 | int i; | ||
82 | |||
83 | for_each_possible_cpu(i) { | ||
84 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | ||
85 | |||
86 | sum.rx_packets += tstats->rx_packets; | ||
87 | sum.rx_bytes += tstats->rx_bytes; | ||
88 | sum.tx_packets += tstats->tx_packets; | ||
89 | sum.tx_bytes += tstats->tx_bytes; | ||
90 | } | ||
91 | dev->stats.rx_packets = sum.rx_packets; | ||
92 | dev->stats.rx_bytes = sum.rx_bytes; | ||
93 | dev->stats.tx_packets = sum.tx_packets; | ||
94 | dev->stats.tx_bytes = sum.tx_bytes; | ||
95 | return &dev->stats; | ||
96 | } | ||
97 | |||
98 | #define for_each_vti6_tunnel_rcu(start) \ | 78 | #define for_each_vti6_tunnel_rcu(start) \ |
99 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | 79 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) |
100 | 80 | ||
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb) | |||
331 | } | 311 | } |
332 | 312 | ||
333 | tstats = this_cpu_ptr(t->dev->tstats); | 313 | tstats = this_cpu_ptr(t->dev->tstats); |
314 | u64_stats_update_begin(&tstats->syncp); | ||
334 | tstats->rx_packets++; | 315 | tstats->rx_packets++; |
335 | tstats->rx_bytes += skb->len; | 316 | tstats->rx_bytes += skb->len; |
317 | u64_stats_update_end(&tstats->syncp); | ||
336 | 318 | ||
337 | skb->mark = 0; | 319 | skb->mark = 0; |
338 | secpath_reset(skb); | 320 | secpath_reset(skb); |
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = { | |||
716 | .ndo_start_xmit = vti6_tnl_xmit, | 698 | .ndo_start_xmit = vti6_tnl_xmit, |
717 | .ndo_do_ioctl = vti6_ioctl, | 699 | .ndo_do_ioctl = vti6_ioctl, |
718 | .ndo_change_mtu = vti6_change_mtu, | 700 | .ndo_change_mtu = vti6_change_mtu, |
719 | .ndo_get_stats = vti6_get_stats, | 701 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
720 | }; | 702 | }; |
721 | 703 | ||
722 | /** | 704 | /** |
@@ -750,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev) | |||
750 | static inline int vti6_dev_init_gen(struct net_device *dev) | 732 | static inline int vti6_dev_init_gen(struct net_device *dev) |
751 | { | 733 | { |
752 | struct ip6_tnl *t = netdev_priv(dev); | 734 | struct ip6_tnl *t = netdev_priv(dev); |
735 | int i; | ||
753 | 736 | ||
754 | t->dev = dev; | 737 | t->dev = dev; |
755 | t->net = dev_net(dev); | 738 | t->net = dev_net(dev); |
756 | dev->tstats = alloc_percpu(struct pcpu_tstats); | 739 | dev->tstats = alloc_percpu(struct pcpu_tstats); |
757 | if (!dev->tstats) | 740 | if (!dev->tstats) |
758 | return -ENOMEM; | 741 | return -ENOMEM; |
742 | for_each_possible_cpu(i) { | ||
743 | struct pcpu_tstats *stats; | ||
744 | stats = per_cpu_ptr(dev->tstats, i); | ||
745 | u64_stats_init(&stats->syncp); | ||
746 | } | ||
759 | return 0; | 747 | return 0; |
760 | } | 748 | } |
761 | 749 | ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3512177deb4d..300865171394 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1277,6 +1277,9 @@ skip_linkparms: | |||
1277 | ri->prefix_len == 0) | 1277 | ri->prefix_len == 0) |
1278 | continue; | 1278 | continue; |
1279 | #endif | 1279 | #endif |
1280 | if (ri->prefix_len == 0 && | ||
1281 | !in6_dev->cnf.accept_ra_defrtr) | ||
1282 | continue; | ||
1280 | if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) | 1283 | if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) |
1281 | continue; | 1284 | continue; |
1282 | rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, | 1285 | rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, |
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index f78f41aca8e9..a0d17270117c 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) | |||
446 | static struct xt_target synproxy_tg6_reg __read_mostly = { | 446 | static struct xt_target synproxy_tg6_reg __read_mostly = { |
447 | .name = "SYNPROXY", | 447 | .name = "SYNPROXY", |
448 | .family = NFPROTO_IPV6, | 448 | .family = NFPROTO_IPV6, |
449 | .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), | ||
449 | .target = synproxy_tg6, | 450 | .target = synproxy_tg6, |
450 | .targetsize = sizeof(struct xt_synproxy_info), | 451 | .targetsize = sizeof(struct xt_synproxy_info), |
451 | .checkentry = synproxy_tg6_check, | 452 | .checkentry = synproxy_tg6_check, |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 7fb4e14c467f..b6bb87e55805 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -792,7 +792,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
792 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 792 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
793 | if (flowlabel == NULL) | 793 | if (flowlabel == NULL) |
794 | return -EINVAL; | 794 | return -EINVAL; |
795 | daddr = &flowlabel->dst; | ||
796 | } | 795 | } |
797 | } | 796 | } |
798 | 797 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7faa9d5e1503..4b4944c3e4c4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops); | |||
84 | 84 | ||
85 | static int ip6_pkt_discard(struct sk_buff *skb); | 85 | static int ip6_pkt_discard(struct sk_buff *skb); |
86 | static int ip6_pkt_discard_out(struct sk_buff *skb); | 86 | static int ip6_pkt_discard_out(struct sk_buff *skb); |
87 | static int ip6_pkt_prohibit(struct sk_buff *skb); | ||
88 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); | ||
87 | static void ip6_link_failure(struct sk_buff *skb); | 89 | static void ip6_link_failure(struct sk_buff *skb); |
88 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | 90 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
89 | struct sk_buff *skb, u32 mtu); | 91 | struct sk_buff *skb, u32 mtu); |
@@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = { | |||
234 | 236 | ||
235 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 237 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
236 | 238 | ||
237 | static int ip6_pkt_prohibit(struct sk_buff *skb); | ||
238 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); | ||
239 | |||
240 | static const struct rt6_info ip6_prohibit_entry_template = { | 239 | static const struct rt6_info ip6_prohibit_entry_template = { |
241 | .dst = { | 240 | .dst = { |
242 | .__refcnt = ATOMIC_INIT(1), | 241 | .__refcnt = ATOMIC_INIT(1), |
@@ -1565,21 +1564,24 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1565 | goto out; | 1564 | goto out; |
1566 | } | 1565 | } |
1567 | } | 1566 | } |
1568 | rt->dst.output = ip6_pkt_discard_out; | ||
1569 | rt->dst.input = ip6_pkt_discard; | ||
1570 | rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; | 1567 | rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; |
1571 | switch (cfg->fc_type) { | 1568 | switch (cfg->fc_type) { |
1572 | case RTN_BLACKHOLE: | 1569 | case RTN_BLACKHOLE: |
1573 | rt->dst.error = -EINVAL; | 1570 | rt->dst.error = -EINVAL; |
1571 | rt->dst.output = dst_discard; | ||
1572 | rt->dst.input = dst_discard; | ||
1574 | break; | 1573 | break; |
1575 | case RTN_PROHIBIT: | 1574 | case RTN_PROHIBIT: |
1576 | rt->dst.error = -EACCES; | 1575 | rt->dst.error = -EACCES; |
1576 | rt->dst.output = ip6_pkt_prohibit_out; | ||
1577 | rt->dst.input = ip6_pkt_prohibit; | ||
1577 | break; | 1578 | break; |
1578 | case RTN_THROW: | 1579 | case RTN_THROW: |
1579 | rt->dst.error = -EAGAIN; | ||
1580 | break; | ||
1581 | default: | 1580 | default: |
1582 | rt->dst.error = -ENETUNREACH; | 1581 | rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN |
1582 | : -ENETUNREACH; | ||
1583 | rt->dst.output = ip6_pkt_discard_out; | ||
1584 | rt->dst.input = ip6_pkt_discard; | ||
1583 | break; | 1585 | break; |
1584 | } | 1586 | } |
1585 | goto install_route; | 1587 | goto install_route; |
@@ -1903,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, | |||
1903 | else | 1905 | else |
1904 | rt->rt6i_gateway = *dest; | 1906 | rt->rt6i_gateway = *dest; |
1905 | rt->rt6i_flags = ort->rt6i_flags; | 1907 | rt->rt6i_flags = ort->rt6i_flags; |
1906 | if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == | 1908 | rt6_set_from(rt, ort); |
1907 | (RTF_DEFAULT | RTF_ADDRCONF)) | ||
1908 | rt6_set_from(rt, ort); | ||
1909 | rt->rt6i_metric = 0; | 1909 | rt->rt6i_metric = 0; |
1910 | 1910 | ||
1911 | #ifdef CONFIG_IPV6_SUBTREES | 1911 | #ifdef CONFIG_IPV6_SUBTREES |
@@ -2144,8 +2144,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb) | |||
2144 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); | 2144 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); |
2145 | } | 2145 | } |
2146 | 2146 | ||
2147 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
2148 | |||
2149 | static int ip6_pkt_prohibit(struct sk_buff *skb) | 2147 | static int ip6_pkt_prohibit(struct sk_buff *skb) |
2150 | { | 2148 | { |
2151 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); | 2149 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); |
@@ -2157,8 +2155,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb) | |||
2157 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); | 2155 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); |
2158 | } | 2156 | } |
2159 | 2157 | ||
2160 | #endif | ||
2161 | |||
2162 | /* | 2158 | /* |
2163 | * Allocate a dst for local (unicast / anycast) address. | 2159 | * Allocate a dst for local (unicast / anycast) address. |
2164 | */ | 2160 | */ |
@@ -2168,12 +2164,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2168 | bool anycast) | 2164 | bool anycast) |
2169 | { | 2165 | { |
2170 | struct net *net = dev_net(idev->dev); | 2166 | struct net *net = dev_net(idev->dev); |
2171 | struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); | 2167 | struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, |
2172 | 2168 | DST_NOCOUNT, NULL); | |
2173 | if (!rt) { | 2169 | if (!rt) |
2174 | net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n"); | ||
2175 | return ERR_PTR(-ENOMEM); | 2170 | return ERR_PTR(-ENOMEM); |
2176 | } | ||
2177 | 2171 | ||
2178 | in6_dev_hold(idev); | 2172 | in6_dev_hold(idev); |
2179 | 2173 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 366fbba3359a..d3005b34476a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
702 | } | 702 | } |
703 | 703 | ||
704 | tstats = this_cpu_ptr(tunnel->dev->tstats); | 704 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
705 | u64_stats_update_begin(&tstats->syncp); | ||
705 | tstats->rx_packets++; | 706 | tstats->rx_packets++; |
706 | tstats->rx_bytes += skb->len; | 707 | tstats->rx_bytes += skb->len; |
708 | u64_stats_update_end(&tstats->syncp); | ||
707 | 709 | ||
708 | netif_rx(skb); | 710 | netif_rx(skb); |
709 | 711 | ||
@@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
924 | if (tunnel->parms.iph.daddr && skb_dst(skb)) | 926 | if (tunnel->parms.iph.daddr && skb_dst(skb)) |
925 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | 927 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
926 | 928 | ||
927 | if (skb->len > mtu) { | 929 | if (skb->len > mtu && !skb_is_gso(skb)) { |
928 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 930 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
929 | ip_rt_put(rt); | 931 | ip_rt_put(rt); |
930 | goto tx_error; | 932 | goto tx_error; |
@@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
966 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 968 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
967 | 969 | ||
968 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); | 970 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); |
969 | if (IS_ERR(skb)) | 971 | if (IS_ERR(skb)) { |
972 | ip_rt_put(rt); | ||
970 | goto out; | 973 | goto out; |
974 | } | ||
971 | 975 | ||
972 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, | 976 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, |
973 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 977 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0740f93a114a..f67033b4bb66 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -156,7 +156,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
156 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 156 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
157 | if (flowlabel == NULL) | 157 | if (flowlabel == NULL) |
158 | return -EINVAL; | 158 | return -EINVAL; |
159 | usin->sin6_addr = flowlabel->dst; | ||
160 | fl6_sock_release(flowlabel); | 159 | fl6_sock_release(flowlabel); |
161 | } | 160 | } |
162 | } | 161 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index bcd5699313c3..089c741a3992 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1140,7 +1140,6 @@ do_udp_sendmsg: | |||
1140 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 1140 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
1141 | if (flowlabel == NULL) | 1141 | if (flowlabel == NULL) |
1142 | return -EINVAL; | 1142 | return -EINVAL; |
1143 | daddr = &flowlabel->dst; | ||
1144 | } | 1143 | } |
1145 | } | 1144 | } |
1146 | 1145 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index d9b437e55007..bb6e206ea70b 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -528,7 +528,6 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
528 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 528 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
529 | if (flowlabel == NULL) | 529 | if (flowlabel == NULL) |
530 | return -EINVAL; | 530 | return -EINVAL; |
531 | daddr = &flowlabel->dst; | ||
532 | } | 531 | } |
533 | } | 532 | } |
534 | 533 | ||
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7b01b9f5846c..c71b699eb555 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
715 | unsigned long cpu_flags; | 715 | unsigned long cpu_flags; |
716 | size_t copied = 0; | 716 | size_t copied = 0; |
717 | u32 peek_seq = 0; | 717 | u32 peek_seq = 0; |
718 | u32 *seq; | 718 | u32 *seq, skb_len; |
719 | unsigned long used; | 719 | unsigned long used; |
720 | int target; /* Read at least this many bytes */ | 720 | int target; /* Read at least this many bytes */ |
721 | long timeo; | 721 | long timeo; |
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
812 | } | 812 | } |
813 | continue; | 813 | continue; |
814 | found_ok_skb: | 814 | found_ok_skb: |
815 | skb_len = skb->len; | ||
815 | /* Ok so how much can we use? */ | 816 | /* Ok so how much can we use? */ |
816 | used = skb->len - offset; | 817 | used = skb->len - offset; |
817 | if (len < used) | 818 | if (len < used) |
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
844 | } | 845 | } |
845 | 846 | ||
846 | /* Partial read */ | 847 | /* Partial read */ |
847 | if (used + offset < skb->len) | 848 | if (used + offset < skb_len) |
848 | continue; | 849 | continue; |
849 | } while (len > 0); | 850 | } while (len > 0); |
850 | 851 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 95667b088c5b..364ce0c5962f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1368,7 +1368,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, | |||
1368 | changed |= | 1368 | changed |= |
1369 | ieee80211_mps_set_sta_local_pm(sta, | 1369 | ieee80211_mps_set_sta_local_pm(sta, |
1370 | params->local_pm); | 1370 | params->local_pm); |
1371 | ieee80211_bss_info_change_notify(sdata, changed); | 1371 | ieee80211_mbss_info_change_notify(sdata, changed); |
1372 | #endif | 1372 | #endif |
1373 | } | 1373 | } |
1374 | 1374 | ||
@@ -2488,8 +2488,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
2488 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2488 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2489 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2489 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2490 | 2490 | ||
2491 | if (sdata->vif.type != NL80211_IFTYPE_STATION && | 2491 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
2492 | sdata->vif.type != NL80211_IFTYPE_MESH_POINT) | ||
2493 | return -EOPNOTSUPP; | 2492 | return -EOPNOTSUPP; |
2494 | 2493 | ||
2495 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) | 2494 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) |
@@ -3120,9 +3119,17 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, | |||
3120 | params->chandef.chan->band) | 3119 | params->chandef.chan->band) |
3121 | return -EINVAL; | 3120 | return -EINVAL; |
3122 | 3121 | ||
3122 | ifmsh->chsw_init = true; | ||
3123 | if (!ifmsh->pre_value) | ||
3124 | ifmsh->pre_value = 1; | ||
3125 | else | ||
3126 | ifmsh->pre_value++; | ||
3127 | |||
3123 | err = ieee80211_mesh_csa_beacon(sdata, params, true); | 3128 | err = ieee80211_mesh_csa_beacon(sdata, params, true); |
3124 | if (err < 0) | 3129 | if (err < 0) { |
3130 | ifmsh->chsw_init = false; | ||
3125 | return err; | 3131 | return err; |
3132 | } | ||
3126 | break; | 3133 | break; |
3127 | #endif | 3134 | #endif |
3128 | default: | 3135 | default: |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 531be040b9ae..27a39de89679 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -823,6 +823,10 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
823 | if (err) | 823 | if (err) |
824 | return false; | 824 | return false; |
825 | 825 | ||
826 | /* channel switch is not supported, disconnect */ | ||
827 | if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) | ||
828 | goto disconnect; | ||
829 | |||
826 | params.count = csa_ie.count; | 830 | params.count = csa_ie.count; |
827 | params.chandef = csa_ie.chandef; | 831 | params.chandef = csa_ie.chandef; |
828 | 832 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 29dc505be125..4aea4e791113 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1228,6 +1228,7 @@ struct ieee80211_csa_ie { | |||
1228 | u8 mode; | 1228 | u8 mode; |
1229 | u8 count; | 1229 | u8 count; |
1230 | u8 ttl; | 1230 | u8 ttl; |
1231 | u16 pre_value; | ||
1231 | }; | 1232 | }; |
1232 | 1233 | ||
1233 | /* Parsed Information Elements */ | 1234 | /* Parsed Information Elements */ |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index ff101ea1d9ae..a0757913046e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev) | |||
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, | 1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, |
1064 | struct sk_buff *skb) | 1064 | struct sk_buff *skb, |
1065 | void *accel_priv) | ||
1065 | { | 1066 | { |
1066 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | 1067 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); |
1067 | } | 1068 | } |
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | |||
1078 | }; | 1079 | }; |
1079 | 1080 | ||
1080 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, | 1081 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, |
1081 | struct sk_buff *skb) | 1082 | struct sk_buff *skb, |
1083 | void *accel_priv) | ||
1082 | { | 1084 | { |
1083 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1085 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1084 | struct ieee80211_local *local = sdata->local; | 1086 | struct ieee80211_local *local = sdata->local; |
@@ -1325,7 +1327,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | |||
1325 | sdata->vif.bss_conf.bssid = NULL; | 1327 | sdata->vif.bss_conf.bssid = NULL; |
1326 | break; | 1328 | break; |
1327 | case NL80211_IFTYPE_AP_VLAN: | 1329 | case NL80211_IFTYPE_AP_VLAN: |
1328 | break; | ||
1329 | case NL80211_IFTYPE_P2P_DEVICE: | 1330 | case NL80211_IFTYPE_P2P_DEVICE: |
1330 | sdata->vif.bss_conf.bssid = sdata->vif.addr; | 1331 | sdata->vif.bss_conf.bssid = sdata->vif.addr; |
1331 | break; | 1332 | break; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 21d5d44444d0..7d1c3ac48ed9 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -940,6 +940,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
940 | wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", | 940 | wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", |
941 | result); | 941 | result); |
942 | 942 | ||
943 | local->hw.conf.flags = IEEE80211_CONF_IDLE; | ||
944 | |||
943 | ieee80211_led_init(local); | 945 | ieee80211_led_init(local); |
944 | 946 | ||
945 | rtnl_lock(); | 947 | rtnl_lock(); |
@@ -1047,6 +1049,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1047 | 1049 | ||
1048 | cancel_work_sync(&local->restart_work); | 1050 | cancel_work_sync(&local->restart_work); |
1049 | cancel_work_sync(&local->reconfig_filter); | 1051 | cancel_work_sync(&local->reconfig_filter); |
1052 | flush_work(&local->sched_scan_stopped_work); | ||
1050 | 1053 | ||
1051 | ieee80211_clear_tx_pending(local); | 1054 | ieee80211_clear_tx_pending(local); |
1052 | rate_control_deinitialize(local); | 1055 | rate_control_deinitialize(local); |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 896fe3bd599e..ba105257d03f 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -943,14 +943,19 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, | |||
943 | params.chandef.chan->center_freq); | 943 | params.chandef.chan->center_freq); |
944 | 944 | ||
945 | params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; | 945 | params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT; |
946 | if (beacon) | 946 | if (beacon) { |
947 | ifmsh->chsw_ttl = csa_ie.ttl - 1; | 947 | ifmsh->chsw_ttl = csa_ie.ttl - 1; |
948 | else | 948 | if (ifmsh->pre_value >= csa_ie.pre_value) |
949 | ifmsh->chsw_ttl = 0; | 949 | return false; |
950 | ifmsh->pre_value = csa_ie.pre_value; | ||
951 | } | ||
950 | 952 | ||
951 | if (ifmsh->chsw_ttl > 0) | 953 | if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) { |
952 | if (ieee80211_mesh_csa_beacon(sdata, ¶ms, false) < 0) | 954 | if (ieee80211_mesh_csa_beacon(sdata, ¶ms, false) < 0) |
953 | return false; | 955 | return false; |
956 | } else { | ||
957 | return false; | ||
958 | } | ||
954 | 959 | ||
955 | sdata->csa_radar_required = params.radar_required; | 960 | sdata->csa_radar_required = params.radar_required; |
956 | 961 | ||
@@ -1163,7 +1168,6 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1163 | offset_ttl = (len < 42) ? 7 : 10; | 1168 | offset_ttl = (len < 42) ? 7 : 10; |
1164 | *(pos + offset_ttl) -= 1; | 1169 | *(pos + offset_ttl) -= 1; |
1165 | *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; | 1170 | *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; |
1166 | sdata->u.mesh.chsw_ttl = *(pos + offset_ttl); | ||
1167 | 1171 | ||
1168 | memcpy(mgmt_fwd, mgmt, len); | 1172 | memcpy(mgmt_fwd, mgmt, len); |
1169 | eth_broadcast_addr(mgmt_fwd->da); | 1173 | eth_broadcast_addr(mgmt_fwd->da); |
@@ -1182,7 +1186,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1182 | u16 pre_value; | 1186 | u16 pre_value; |
1183 | bool fwd_csa = true; | 1187 | bool fwd_csa = true; |
1184 | size_t baselen; | 1188 | size_t baselen; |
1185 | u8 *pos, ttl; | 1189 | u8 *pos; |
1186 | 1190 | ||
1187 | if (mgmt->u.action.u.measurement.action_code != | 1191 | if (mgmt->u.action.u.measurement.action_code != |
1188 | WLAN_ACTION_SPCT_CHL_SWITCH) | 1192 | WLAN_ACTION_SPCT_CHL_SWITCH) |
@@ -1193,8 +1197,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1193 | u.action.u.chan_switch.variable); | 1197 | u.action.u.chan_switch.variable); |
1194 | ieee802_11_parse_elems(pos, len - baselen, false, &elems); | 1198 | ieee802_11_parse_elems(pos, len - baselen, false, &elems); |
1195 | 1199 | ||
1196 | ttl = elems.mesh_chansw_params_ie->mesh_ttl; | 1200 | ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; |
1197 | if (!--ttl) | 1201 | if (!--ifmsh->chsw_ttl) |
1198 | fwd_csa = false; | 1202 | fwd_csa = false; |
1199 | 1203 | ||
1200 | pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); | 1204 | pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d7504ab61a34..b3a3ce316656 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1910,6 +1910,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, | |||
1910 | if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) | 1910 | if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) |
1911 | already = true; | 1911 | already = true; |
1912 | 1912 | ||
1913 | ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; | ||
1914 | |||
1913 | mutex_unlock(&sdata->local->mtx); | 1915 | mutex_unlock(&sdata->local->mtx); |
1914 | 1916 | ||
1915 | if (already) | 1917 | if (already) |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 5d60779a0c1b..4096ff6cc24f 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -226,7 +226,7 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) | |||
226 | nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); | 226 | nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); |
227 | 227 | ||
228 | nsecs += minstrel_mcs_groups[group].duration[rate]; | 228 | nsecs += minstrel_mcs_groups[group].duration[rate]; |
229 | tp = 1000000 * ((mr->probability * 1000) / nsecs); | 229 | tp = 1000000 * ((prob * 1000) / nsecs); |
230 | 230 | ||
231 | mr->cur_tp = MINSTREL_TRUNC(tp); | 231 | mr->cur_tp = MINSTREL_TRUNC(tp); |
232 | } | 232 | } |
@@ -277,13 +277,15 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
277 | if (!(mg->supported & BIT(i))) | 277 | if (!(mg->supported & BIT(i))) |
278 | continue; | 278 | continue; |
279 | 279 | ||
280 | index = MCS_GROUP_RATES * group + i; | ||
281 | |||
280 | /* initialize rates selections starting indexes */ | 282 | /* initialize rates selections starting indexes */ |
281 | if (!mg_rates_valid) { | 283 | if (!mg_rates_valid) { |
282 | mg->max_tp_rate = mg->max_tp_rate2 = | 284 | mg->max_tp_rate = mg->max_tp_rate2 = |
283 | mg->max_prob_rate = i; | 285 | mg->max_prob_rate = i; |
284 | if (!mi_rates_valid) { | 286 | if (!mi_rates_valid) { |
285 | mi->max_tp_rate = mi->max_tp_rate2 = | 287 | mi->max_tp_rate = mi->max_tp_rate2 = |
286 | mi->max_prob_rate = i; | 288 | mi->max_prob_rate = index; |
287 | mi_rates_valid = true; | 289 | mi_rates_valid = true; |
288 | } | 290 | } |
289 | mg_rates_valid = true; | 291 | mg_rates_valid = true; |
@@ -291,7 +293,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
291 | 293 | ||
292 | mr = &mg->rates[i]; | 294 | mr = &mg->rates[i]; |
293 | mr->retry_updated = false; | 295 | mr->retry_updated = false; |
294 | index = MCS_GROUP_RATES * group + i; | ||
295 | minstrel_calc_rate_ewma(mr); | 296 | minstrel_calc_rate_ewma(mr); |
296 | minstrel_ht_calc_tp(mi, group, i); | 297 | minstrel_ht_calc_tp(mi, group, i); |
297 | 298 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index caecef870c0e..2b0debb0422b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
911 | u16 sc; | 911 | u16 sc; |
912 | u8 tid, ack_policy; | 912 | u8 tid, ack_policy; |
913 | 913 | ||
914 | if (!ieee80211_is_data_qos(hdr->frame_control)) | 914 | if (!ieee80211_is_data_qos(hdr->frame_control) || |
915 | is_multicast_ether_addr(hdr->addr1)) | ||
915 | goto dont_reorder; | 916 | goto dont_reorder; |
916 | 917 | ||
917 | /* | 918 | /* |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 5ad66a83ef7f..bcc4833d7542 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -1088,6 +1088,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) | |||
1088 | 1088 | ||
1089 | trace_api_sched_scan_stopped(local); | 1089 | trace_api_sched_scan_stopped(local); |
1090 | 1090 | ||
1091 | ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work); | 1091 | schedule_work(&local->sched_scan_stopped_work); |
1092 | } | 1092 | } |
1093 | EXPORT_SYMBOL(ieee80211_sched_scan_stopped); | 1093 | EXPORT_SYMBOL(ieee80211_sched_scan_stopped); |
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index a40da20b32e0..6ab009070084 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c | |||
@@ -78,6 +78,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
78 | if (elems->mesh_chansw_params_ie) { | 78 | if (elems->mesh_chansw_params_ie) { |
79 | csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; | 79 | csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl; |
80 | csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; | 80 | csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags; |
81 | csa_ie->pre_value = le16_to_cpu( | ||
82 | elems->mesh_chansw_params_ie->mesh_pre_value); | ||
81 | } | 83 | } |
82 | 84 | ||
83 | new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); | 85 | new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c558b246ef00..ca7fa7f0613d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
463 | { | 463 | { |
464 | struct sta_info *sta = tx->sta; | 464 | struct sta_info *sta = tx->sta; |
465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
466 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
467 | struct ieee80211_local *local = tx->local; | 466 | struct ieee80211_local *local = tx->local; |
468 | 467 | ||
469 | if (unlikely(!sta)) | 468 | if (unlikely(!sta)) |
@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
474 | !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { | 473 | !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { |
475 | int ac = skb_get_queue_mapping(tx->skb); | 474 | int ac = skb_get_queue_mapping(tx->skb); |
476 | 475 | ||
477 | /* only deauth, disassoc and action are bufferable MMPDUs */ | ||
478 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
479 | !ieee80211_is_deauth(hdr->frame_control) && | ||
480 | !ieee80211_is_disassoc(hdr->frame_control) && | ||
481 | !ieee80211_is_action(hdr->frame_control)) { | ||
482 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | ||
483 | return TX_CONTINUE; | ||
484 | } | ||
485 | |||
486 | ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", | 476 | ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", |
487 | sta->sta.addr, sta->sta.aid, ac); | 477 | sta->sta.addr, sta->sta.aid, ac); |
488 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 478 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
525 | static ieee80211_tx_result debug_noinline | 515 | static ieee80211_tx_result debug_noinline |
526 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | 516 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) |
527 | { | 517 | { |
518 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
519 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
520 | |||
528 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) | 521 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) |
529 | return TX_CONTINUE; | 522 | return TX_CONTINUE; |
530 | 523 | ||
524 | /* only deauth, disassoc and action are bufferable MMPDUs */ | ||
525 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
526 | !ieee80211_is_deauth(hdr->frame_control) && | ||
527 | !ieee80211_is_disassoc(hdr->frame_control) && | ||
528 | !ieee80211_is_action(hdr->frame_control)) { | ||
529 | if (tx->flags & IEEE80211_TX_UNICAST) | ||
530 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | ||
531 | return TX_CONTINUE; | ||
532 | } | ||
533 | |||
531 | if (tx->flags & IEEE80211_TX_UNICAST) | 534 | if (tx->flags & IEEE80211_TX_UNICAST) |
532 | return ieee80211_tx_h_unicast_ps_buf(tx); | 535 | return ieee80211_tx_h_unicast_ps_buf(tx); |
533 | else | 536 | else |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 592a18171f95..9f9b9bd3fd44 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -2278,17 +2278,15 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work) | |||
2278 | { | 2278 | { |
2279 | struct ieee80211_local *local = | 2279 | struct ieee80211_local *local = |
2280 | container_of(work, struct ieee80211_local, radar_detected_work); | 2280 | container_of(work, struct ieee80211_local, radar_detected_work); |
2281 | struct cfg80211_chan_def chandef; | 2281 | struct cfg80211_chan_def chandef = local->hw.conf.chandef; |
2282 | 2282 | ||
2283 | ieee80211_dfs_cac_cancel(local); | 2283 | ieee80211_dfs_cac_cancel(local); |
2284 | 2284 | ||
2285 | if (local->use_chanctx) | 2285 | if (local->use_chanctx) |
2286 | /* currently not handled */ | 2286 | /* currently not handled */ |
2287 | WARN_ON(1); | 2287 | WARN_ON(1); |
2288 | else { | 2288 | else |
2289 | chandef = local->hw.conf.chandef; | ||
2290 | cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); | 2289 | cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); |
2291 | } | ||
2292 | } | 2290 | } |
2293 | 2291 | ||
2294 | void ieee80211_radar_detected(struct ieee80211_hw *hw) | 2292 | void ieee80211_radar_detected(struct ieee80211_hw *hw) |
@@ -2459,14 +2457,9 @@ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, | |||
2459 | WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; | 2457 | WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; |
2460 | put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ | 2458 | put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ |
2461 | pos += 2; | 2459 | pos += 2; |
2462 | if (!ifmsh->pre_value) | ||
2463 | ifmsh->pre_value = 1; | ||
2464 | else | ||
2465 | ifmsh->pre_value++; | ||
2466 | pre_value = cpu_to_le16(ifmsh->pre_value); | 2460 | pre_value = cpu_to_le16(ifmsh->pre_value); |
2467 | memcpy(pos, &pre_value, 2); /* Precedence Value */ | 2461 | memcpy(pos, &pre_value, 2); /* Precedence Value */ |
2468 | pos += 2; | 2462 | pos += 2; |
2469 | ifmsh->chsw_init = true; | ||
2470 | } | 2463 | } |
2471 | 2464 | ||
2472 | ieee80211_tx_skb(sdata, skb); | 2465 | ieee80211_tx_skb(sdata, skb); |
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index 2bc2dec20b00..6226803fc490 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c | |||
@@ -59,7 +59,7 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1, | |||
59 | u32 *multi) | 59 | u32 *multi) |
60 | { | 60 | { |
61 | return ip1->ipcmp == ip2->ipcmp && | 61 | return ip1->ipcmp == ip2->ipcmp && |
62 | ip2->ccmp == ip2->ccmp; | 62 | ip1->ccmp == ip2->ccmp; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline int | 65 | static inline int |
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index c8beafd401aa..5a355a46d1dc 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <net/ip_vs.h> | 63 | #include <net/ip_vs.h> |
64 | #include <net/netfilter/nf_conntrack_core.h> | 64 | #include <net/netfilter/nf_conntrack_core.h> |
65 | #include <net/netfilter/nf_conntrack_expect.h> | 65 | #include <net/netfilter/nf_conntrack_expect.h> |
66 | #include <net/netfilter/nf_conntrack_seqadj.h> | ||
66 | #include <net/netfilter/nf_conntrack_helper.h> | 67 | #include <net/netfilter/nf_conntrack_helper.h> |
67 | #include <net/netfilter/nf_conntrack_zones.h> | 68 | #include <net/netfilter/nf_conntrack_zones.h> |
68 | 69 | ||
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) | |||
97 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | 98 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) |
98 | return; | 99 | return; |
99 | 100 | ||
101 | /* Applications may adjust TCP seqs */ | ||
102 | if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && | ||
103 | !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) | ||
104 | return; | ||
105 | |||
100 | /* | 106 | /* |
101 | * The connection is not yet in the hashtable, so we update it. | 107 | * The connection is not yet in the hashtable, so we update it. |
102 | * CIP->VIP will remain the same, so leave the tuple in | 108 | * CIP->VIP will remain the same, so leave the tuple in |
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index 17c1bcb182c6..f6e2ae91a80b 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c | |||
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, | |||
36 | if (off == 0) | 36 | if (off == 0) |
37 | return 0; | 37 | return 0; |
38 | 38 | ||
39 | if (unlikely(!seqadj)) { | ||
40 | WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n"); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
39 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); | 44 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); |
40 | 45 | ||
41 | spin_lock_bh(&ct->lock); | 46 | spin_lock_bh(&ct->lock); |
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index 902fb0a6b38a..7a394df0deb7 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c | |||
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net) | |||
97 | void nf_conntrack_tstamp_pernet_fini(struct net *net) | 97 | void nf_conntrack_tstamp_pernet_fini(struct net *net) |
98 | { | 98 | { |
99 | nf_conntrack_tstamp_fini_sysctl(net); | 99 | nf_conntrack_tstamp_fini_sysctl(net); |
100 | nf_ct_extend_unregister(&tstamp_extend); | ||
101 | } | 100 | } |
102 | 101 | ||
103 | int nf_conntrack_tstamp_init(void) | 102 | int nf_conntrack_tstamp_init(void) |
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index f02b3605823e..1fb2258c3535 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c | |||
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb, | |||
34 | struct nf_conntrack_expect *exp) | 34 | struct nf_conntrack_expect *exp) |
35 | { | 35 | { |
36 | char buffer[sizeof("4294967296 65635")]; | 36 | char buffer[sizeof("4294967296 65635")]; |
37 | struct nf_conn *ct = exp->master; | ||
38 | union nf_inet_addr newaddr; | ||
37 | u_int16_t port; | 39 | u_int16_t port; |
38 | unsigned int ret; | 40 | unsigned int ret; |
39 | 41 | ||
40 | /* Reply comes from server. */ | 42 | /* Reply comes from server. */ |
43 | newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; | ||
44 | |||
41 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 45 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
42 | exp->dir = IP_CT_DIR_REPLY; | 46 | exp->dir = IP_CT_DIR_REPLY; |
43 | exp->expectfn = nf_nat_follow_master; | 47 | exp->expectfn = nf_nat_follow_master; |
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb, | |||
57 | } | 61 | } |
58 | 62 | ||
59 | if (port == 0) { | 63 | if (port == 0) { |
60 | nf_ct_helper_log(skb, exp->master, "all ports in use"); | 64 | nf_ct_helper_log(skb, ct, "all ports in use"); |
61 | return NF_DROP; | 65 | return NF_DROP; |
62 | } | 66 | } |
63 | 67 | ||
64 | ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, | 68 | /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 |
65 | protoff, matchoff, matchlen, buffer, | 69 | * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 |
66 | strlen(buffer)); | 70 | * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 |
71 | * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 | ||
72 | * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 | ||
73 | * | ||
74 | * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, | ||
75 | * 255.255.255.255==4294967296, 10 digits) | ||
76 | * P: bound port (min 1 d, max 5d (65635)) | ||
77 | * F: filename (min 1 d ) | ||
78 | * S: size (min 1 d ) | ||
79 | * 0x01, \n: terminators | ||
80 | */ | ||
81 | /* AAA = "us", ie. where server normally talks to. */ | ||
82 | snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); | ||
83 | pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", | ||
84 | buffer, &newaddr.ip, port); | ||
85 | |||
86 | ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, | ||
87 | matchlen, buffer, strlen(buffer)); | ||
67 | if (ret != NF_ACCEPT) { | 88 | if (ret != NF_ACCEPT) { |
68 | nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); | 89 | nf_ct_helper_log(skb, ct, "cannot mangle packet"); |
69 | nf_ct_unexpect_related(exp); | 90 | nf_ct_unexpect_related(exp); |
70 | } | 91 | } |
92 | |||
71 | return ret; | 93 | return ret; |
72 | } | 94 | } |
73 | 95 | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index dcddc49c0e08..71a9f49a768b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table) | |||
312 | int err, i = 0; | 312 | int err, i = 0; |
313 | 313 | ||
314 | list_for_each_entry(chain, &table->chains, list) { | 314 | list_for_each_entry(chain, &table->chains, list) { |
315 | if (!(chain->flags & NFT_BASE_CHAIN)) | ||
316 | continue; | ||
317 | |||
315 | err = nf_register_hook(&nft_base_chain(chain)->ops); | 318 | err = nf_register_hook(&nft_base_chain(chain)->ops); |
316 | if (err < 0) | 319 | if (err < 0) |
317 | goto err; | 320 | goto err; |
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table) | |||
321 | return 0; | 324 | return 0; |
322 | err: | 325 | err: |
323 | list_for_each_entry(chain, &table->chains, list) { | 326 | list_for_each_entry(chain, &table->chains, list) { |
327 | if (!(chain->flags & NFT_BASE_CHAIN)) | ||
328 | continue; | ||
329 | |||
324 | if (i-- <= 0) | 330 | if (i-- <= 0) |
325 | break; | 331 | break; |
326 | 332 | ||
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table) | |||
333 | { | 339 | { |
334 | struct nft_chain *chain; | 340 | struct nft_chain *chain; |
335 | 341 | ||
336 | list_for_each_entry(chain, &table->chains, list) | 342 | list_for_each_entry(chain, &table->chains, list) { |
337 | nf_unregister_hook(&nft_base_chain(chain)->ops); | 343 | if (chain->flags & NFT_BASE_CHAIN) |
344 | nf_unregister_hook(&nft_base_chain(chain)->ops); | ||
345 | } | ||
338 | 346 | ||
339 | return 0; | 347 | return 0; |
340 | } | 348 | } |
@@ -1717,6 +1725,19 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule) | |||
1717 | return -ENOENT; | 1725 | return -ENOENT; |
1718 | } | 1726 | } |
1719 | 1727 | ||
1728 | static int nf_table_delrule_by_chain(struct nft_ctx *ctx) | ||
1729 | { | ||
1730 | struct nft_rule *rule; | ||
1731 | int err; | ||
1732 | |||
1733 | list_for_each_entry(rule, &ctx->chain->rules, list) { | ||
1734 | err = nf_tables_delrule_one(ctx, rule); | ||
1735 | if (err < 0) | ||
1736 | return err; | ||
1737 | } | ||
1738 | return 0; | ||
1739 | } | ||
1740 | |||
1720 | static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | 1741 | static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, |
1721 | const struct nlmsghdr *nlh, | 1742 | const struct nlmsghdr *nlh, |
1722 | const struct nlattr * const nla[]) | 1743 | const struct nlattr * const nla[]) |
@@ -1725,8 +1746,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | |||
1725 | const struct nft_af_info *afi; | 1746 | const struct nft_af_info *afi; |
1726 | struct net *net = sock_net(skb->sk); | 1747 | struct net *net = sock_net(skb->sk); |
1727 | const struct nft_table *table; | 1748 | const struct nft_table *table; |
1728 | struct nft_chain *chain; | 1749 | struct nft_chain *chain = NULL; |
1729 | struct nft_rule *rule, *tmp; | 1750 | struct nft_rule *rule; |
1730 | int family = nfmsg->nfgen_family, err = 0; | 1751 | int family = nfmsg->nfgen_family, err = 0; |
1731 | struct nft_ctx ctx; | 1752 | struct nft_ctx ctx; |
1732 | 1753 | ||
@@ -1738,22 +1759,29 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | |||
1738 | if (IS_ERR(table)) | 1759 | if (IS_ERR(table)) |
1739 | return PTR_ERR(table); | 1760 | return PTR_ERR(table); |
1740 | 1761 | ||
1741 | chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); | 1762 | if (nla[NFTA_RULE_CHAIN]) { |
1742 | if (IS_ERR(chain)) | 1763 | chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); |
1743 | return PTR_ERR(chain); | 1764 | if (IS_ERR(chain)) |
1765 | return PTR_ERR(chain); | ||
1766 | } | ||
1744 | 1767 | ||
1745 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1768 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); |
1746 | 1769 | ||
1747 | if (nla[NFTA_RULE_HANDLE]) { | 1770 | if (chain) { |
1748 | rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); | 1771 | if (nla[NFTA_RULE_HANDLE]) { |
1749 | if (IS_ERR(rule)) | 1772 | rule = nf_tables_rule_lookup(chain, |
1750 | return PTR_ERR(rule); | 1773 | nla[NFTA_RULE_HANDLE]); |
1774 | if (IS_ERR(rule)) | ||
1775 | return PTR_ERR(rule); | ||
1751 | 1776 | ||
1752 | err = nf_tables_delrule_one(&ctx, rule); | ||
1753 | } else { | ||
1754 | /* Remove all rules in this chain */ | ||
1755 | list_for_each_entry_safe(rule, tmp, &chain->rules, list) { | ||
1756 | err = nf_tables_delrule_one(&ctx, rule); | 1777 | err = nf_tables_delrule_one(&ctx, rule); |
1778 | } else { | ||
1779 | err = nf_table_delrule_by_chain(&ctx); | ||
1780 | } | ||
1781 | } else { | ||
1782 | list_for_each_entry(chain, &table->chains, list) { | ||
1783 | ctx.chain = chain; | ||
1784 | err = nf_table_delrule_by_chain(&ctx); | ||
1757 | if (err < 0) | 1785 | if (err < 0) |
1758 | break; | 1786 | break; |
1759 | } | 1787 | } |
@@ -2078,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, | |||
2078 | struct netlink_callback *cb) | 2106 | struct netlink_callback *cb) |
2079 | { | 2107 | { |
2080 | const struct nft_set *set; | 2108 | const struct nft_set *set; |
2081 | unsigned int idx = 0, s_idx = cb->args[0]; | 2109 | unsigned int idx, s_idx = cb->args[0]; |
2082 | struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; | 2110 | struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; |
2083 | 2111 | ||
2084 | if (cb->args[1]) | 2112 | if (cb->args[1]) |
2085 | return skb->len; | 2113 | return skb->len; |
2086 | 2114 | ||
2087 | list_for_each_entry(table, &ctx->afi->tables, list) { | 2115 | list_for_each_entry(table, &ctx->afi->tables, list) { |
2088 | if (cur_table && cur_table != table) | 2116 | if (cur_table) { |
2089 | continue; | 2117 | if (cur_table != table) |
2118 | continue; | ||
2090 | 2119 | ||
2120 | cur_table = NULL; | ||
2121 | } | ||
2091 | ctx->table = table; | 2122 | ctx->table = table; |
2123 | idx = 0; | ||
2092 | list_for_each_entry(set, &ctx->table->sets, list) { | 2124 | list_for_each_entry(set, &ctx->table->sets, list) { |
2093 | if (idx < s_idx) | 2125 | if (idx < s_idx) |
2094 | goto cont; | 2126 | goto cont; |
@@ -2350,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, | |||
2350 | enum nft_registers dreg; | 2382 | enum nft_registers dreg; |
2351 | 2383 | ||
2352 | dreg = nft_type_to_reg(set->dtype); | 2384 | dreg = nft_type_to_reg(set->dtype); |
2353 | return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype); | 2385 | return nft_validate_data_load(ctx, dreg, &elem->data, |
2386 | set->dtype == NFT_DATA_VERDICT ? | ||
2387 | NFT_DATA_VERDICT : NFT_DATA_VALUE); | ||
2354 | } | 2388 | } |
2355 | 2389 | ||
2356 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | 2390 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 3c4b69e5fe17..a155d19a225e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net) | |||
1053 | #ifdef CONFIG_PROC_FS | 1053 | #ifdef CONFIG_PROC_FS |
1054 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); | 1054 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); |
1055 | #endif | 1055 | #endif |
1056 | nf_log_unset(net, &nfulnl_logger); | ||
1056 | } | 1057 | } |
1057 | 1058 | ||
1058 | static struct pernet_operations nfnl_log_net_ops = { | 1059 | static struct pernet_operations nfnl_log_net_ops = { |
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 8e0bb75e7c51..55c939f5371f 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr, | |||
31 | { | 31 | { |
32 | struct nft_exthdr *priv = nft_expr_priv(expr); | 32 | struct nft_exthdr *priv = nft_expr_priv(expr); |
33 | struct nft_data *dest = &data[priv->dreg]; | 33 | struct nft_data *dest = &data[priv->dreg]; |
34 | unsigned int offset; | 34 | unsigned int offset = 0; |
35 | int err; | 35 | int err; |
36 | 36 | ||
37 | err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); | 37 | err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9ff035c71403..a3910fc2122b 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -325,21 +325,24 @@ static void htable_gc(unsigned long htlong) | |||
325 | add_timer(&ht->timer); | 325 | add_timer(&ht->timer); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void htable_destroy(struct xt_hashlimit_htable *hinfo) | 328 | static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) |
329 | { | 329 | { |
330 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); | 330 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); |
331 | struct proc_dir_entry *parent; | 331 | struct proc_dir_entry *parent; |
332 | 332 | ||
333 | del_timer_sync(&hinfo->timer); | ||
334 | |||
335 | if (hinfo->family == NFPROTO_IPV4) | 333 | if (hinfo->family == NFPROTO_IPV4) |
336 | parent = hashlimit_net->ipt_hashlimit; | 334 | parent = hashlimit_net->ipt_hashlimit; |
337 | else | 335 | else |
338 | parent = hashlimit_net->ip6t_hashlimit; | 336 | parent = hashlimit_net->ip6t_hashlimit; |
339 | 337 | ||
340 | if(parent != NULL) | 338 | if (parent != NULL) |
341 | remove_proc_entry(hinfo->name, parent); | 339 | remove_proc_entry(hinfo->name, parent); |
340 | } | ||
342 | 341 | ||
342 | static void htable_destroy(struct xt_hashlimit_htable *hinfo) | ||
343 | { | ||
344 | del_timer_sync(&hinfo->timer); | ||
345 | htable_remove_proc_entry(hinfo); | ||
343 | htable_selective_cleanup(hinfo, select_all); | 346 | htable_selective_cleanup(hinfo, select_all); |
344 | kfree(hinfo->name); | 347 | kfree(hinfo->name); |
345 | vfree(hinfo); | 348 | vfree(hinfo); |
@@ -883,21 +886,15 @@ static int __net_init hashlimit_proc_net_init(struct net *net) | |||
883 | static void __net_exit hashlimit_proc_net_exit(struct net *net) | 886 | static void __net_exit hashlimit_proc_net_exit(struct net *net) |
884 | { | 887 | { |
885 | struct xt_hashlimit_htable *hinfo; | 888 | struct xt_hashlimit_htable *hinfo; |
886 | struct proc_dir_entry *pde; | ||
887 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); | 889 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); |
888 | 890 | ||
889 | /* recent_net_exit() is called before recent_mt_destroy(). Make sure | 891 | /* hashlimit_net_exit() is called before hashlimit_mt_destroy(). |
890 | * that the parent xt_recent proc entry is is empty before trying to | 892 | * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc |
891 | * remove it. | 893 | * entries is empty before trying to remove it. |
892 | */ | 894 | */ |
893 | mutex_lock(&hashlimit_mutex); | 895 | mutex_lock(&hashlimit_mutex); |
894 | pde = hashlimit_net->ipt_hashlimit; | ||
895 | if (pde == NULL) | ||
896 | pde = hashlimit_net->ip6t_hashlimit; | ||
897 | |||
898 | hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) | 896 | hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) |
899 | remove_proc_entry(hinfo->name, pde); | 897 | htable_remove_proc_entry(hinfo); |
900 | |||
901 | hashlimit_net->ipt_hashlimit = NULL; | 898 | hashlimit_net->ipt_hashlimit = NULL; |
902 | hashlimit_net->ip6t_hashlimit = NULL; | 899 | hashlimit_net->ip6t_hashlimit = NULL; |
903 | mutex_unlock(&hashlimit_mutex); | 900 | mutex_unlock(&hashlimit_mutex); |
diff --git a/net/nfc/core.c b/net/nfc/core.c index 872529105abc..83b9927e7d19 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c | |||
@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, | |||
384 | { | 384 | { |
385 | dev->dep_link_up = true; | 385 | dev->dep_link_up = true; |
386 | 386 | ||
387 | if (!dev->active_target) { | 387 | if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { |
388 | struct nfc_target *target; | 388 | struct nfc_target *target; |
389 | 389 | ||
390 | target = nfc_find_target(dev, target_idx); | 390 | target = nfc_find_target(dev, target_idx); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ba2548bd85bf..88cfbc189558 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -237,6 +237,30 @@ struct packet_skb_cb { | |||
237 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); | 237 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); |
238 | static void __fanout_link(struct sock *sk, struct packet_sock *po); | 238 | static void __fanout_link(struct sock *sk, struct packet_sock *po); |
239 | 239 | ||
240 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) | ||
241 | { | ||
242 | struct net_device *dev; | ||
243 | |||
244 | rcu_read_lock(); | ||
245 | dev = rcu_dereference(po->cached_dev); | ||
246 | if (likely(dev)) | ||
247 | dev_hold(dev); | ||
248 | rcu_read_unlock(); | ||
249 | |||
250 | return dev; | ||
251 | } | ||
252 | |||
253 | static void packet_cached_dev_assign(struct packet_sock *po, | ||
254 | struct net_device *dev) | ||
255 | { | ||
256 | rcu_assign_pointer(po->cached_dev, dev); | ||
257 | } | ||
258 | |||
259 | static void packet_cached_dev_reset(struct packet_sock *po) | ||
260 | { | ||
261 | RCU_INIT_POINTER(po->cached_dev, NULL); | ||
262 | } | ||
263 | |||
240 | /* register_prot_hook must be invoked with the po->bind_lock held, | 264 | /* register_prot_hook must be invoked with the po->bind_lock held, |
241 | * or from a context in which asynchronous accesses to the packet | 265 | * or from a context in which asynchronous accesses to the packet |
242 | * socket is not possible (packet_create()). | 266 | * socket is not possible (packet_create()). |
@@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk) | |||
246 | struct packet_sock *po = pkt_sk(sk); | 270 | struct packet_sock *po = pkt_sk(sk); |
247 | 271 | ||
248 | if (!po->running) { | 272 | if (!po->running) { |
249 | if (po->fanout) { | 273 | if (po->fanout) |
250 | __fanout_link(sk, po); | 274 | __fanout_link(sk, po); |
251 | } else { | 275 | else |
252 | dev_add_pack(&po->prot_hook); | 276 | dev_add_pack(&po->prot_hook); |
253 | rcu_assign_pointer(po->cached_dev, po->prot_hook.dev); | ||
254 | } | ||
255 | 277 | ||
256 | sock_hold(sk); | 278 | sock_hold(sk); |
257 | po->running = 1; | 279 | po->running = 1; |
@@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync) | |||
270 | struct packet_sock *po = pkt_sk(sk); | 292 | struct packet_sock *po = pkt_sk(sk); |
271 | 293 | ||
272 | po->running = 0; | 294 | po->running = 0; |
273 | if (po->fanout) { | 295 | |
296 | if (po->fanout) | ||
274 | __fanout_unlink(sk, po); | 297 | __fanout_unlink(sk, po); |
275 | } else { | 298 | else |
276 | __dev_remove_pack(&po->prot_hook); | 299 | __dev_remove_pack(&po->prot_hook); |
277 | RCU_INIT_POINTER(po->cached_dev, NULL); | ||
278 | } | ||
279 | 300 | ||
280 | __sock_put(sk); | 301 | __sock_put(sk); |
281 | 302 | ||
@@ -2059,19 +2080,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
2059 | return tp_len; | 2080 | return tp_len; |
2060 | } | 2081 | } |
2061 | 2082 | ||
2062 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) | ||
2063 | { | ||
2064 | struct net_device *dev; | ||
2065 | |||
2066 | rcu_read_lock(); | ||
2067 | dev = rcu_dereference(po->cached_dev); | ||
2068 | if (dev) | ||
2069 | dev_hold(dev); | ||
2070 | rcu_read_unlock(); | ||
2071 | |||
2072 | return dev; | ||
2073 | } | ||
2074 | |||
2075 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | 2083 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) |
2076 | { | 2084 | { |
2077 | struct sk_buff *skb; | 2085 | struct sk_buff *skb; |
@@ -2088,7 +2096,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
2088 | 2096 | ||
2089 | mutex_lock(&po->pg_vec_lock); | 2097 | mutex_lock(&po->pg_vec_lock); |
2090 | 2098 | ||
2091 | if (saddr == NULL) { | 2099 | if (likely(saddr == NULL)) { |
2092 | dev = packet_cached_dev_get(po); | 2100 | dev = packet_cached_dev_get(po); |
2093 | proto = po->num; | 2101 | proto = po->num; |
2094 | addr = NULL; | 2102 | addr = NULL; |
@@ -2242,7 +2250,7 @@ static int packet_snd(struct socket *sock, | |||
2242 | * Get and verify the address. | 2250 | * Get and verify the address. |
2243 | */ | 2251 | */ |
2244 | 2252 | ||
2245 | if (saddr == NULL) { | 2253 | if (likely(saddr == NULL)) { |
2246 | dev = packet_cached_dev_get(po); | 2254 | dev = packet_cached_dev_get(po); |
2247 | proto = po->num; | 2255 | proto = po->num; |
2248 | addr = NULL; | 2256 | addr = NULL; |
@@ -2451,6 +2459,8 @@ static int packet_release(struct socket *sock) | |||
2451 | 2459 | ||
2452 | spin_lock(&po->bind_lock); | 2460 | spin_lock(&po->bind_lock); |
2453 | unregister_prot_hook(sk, false); | 2461 | unregister_prot_hook(sk, false); |
2462 | packet_cached_dev_reset(po); | ||
2463 | |||
2454 | if (po->prot_hook.dev) { | 2464 | if (po->prot_hook.dev) { |
2455 | dev_put(po->prot_hook.dev); | 2465 | dev_put(po->prot_hook.dev); |
2456 | po->prot_hook.dev = NULL; | 2466 | po->prot_hook.dev = NULL; |
@@ -2506,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc | |||
2506 | 2516 | ||
2507 | spin_lock(&po->bind_lock); | 2517 | spin_lock(&po->bind_lock); |
2508 | unregister_prot_hook(sk, true); | 2518 | unregister_prot_hook(sk, true); |
2519 | |||
2509 | po->num = protocol; | 2520 | po->num = protocol; |
2510 | po->prot_hook.type = protocol; | 2521 | po->prot_hook.type = protocol; |
2511 | if (po->prot_hook.dev) | 2522 | if (po->prot_hook.dev) |
2512 | dev_put(po->prot_hook.dev); | 2523 | dev_put(po->prot_hook.dev); |
2513 | po->prot_hook.dev = dev; | ||
2514 | 2524 | ||
2525 | po->prot_hook.dev = dev; | ||
2515 | po->ifindex = dev ? dev->ifindex : 0; | 2526 | po->ifindex = dev ? dev->ifindex : 0; |
2516 | 2527 | ||
2528 | packet_cached_dev_assign(po, dev); | ||
2529 | |||
2517 | if (protocol == 0) | 2530 | if (protocol == 0) |
2518 | goto out_unlock; | 2531 | goto out_unlock; |
2519 | 2532 | ||
@@ -2626,7 +2639,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, | |||
2626 | po = pkt_sk(sk); | 2639 | po = pkt_sk(sk); |
2627 | sk->sk_family = PF_PACKET; | 2640 | sk->sk_family = PF_PACKET; |
2628 | po->num = proto; | 2641 | po->num = proto; |
2629 | RCU_INIT_POINTER(po->cached_dev, NULL); | 2642 | |
2643 | packet_cached_dev_reset(po); | ||
2630 | 2644 | ||
2631 | sk->sk_destruct = packet_sock_destruct; | 2645 | sk->sk_destruct = packet_sock_destruct; |
2632 | sk_refcnt_debug_inc(sk); | 2646 | sk_refcnt_debug_inc(sk); |
@@ -3337,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this, | |||
3337 | sk->sk_error_report(sk); | 3351 | sk->sk_error_report(sk); |
3338 | } | 3352 | } |
3339 | if (msg == NETDEV_UNREGISTER) { | 3353 | if (msg == NETDEV_UNREGISTER) { |
3354 | packet_cached_dev_reset(po); | ||
3340 | po->ifindex = -1; | 3355 | po->ifindex = -1; |
3341 | if (po->prot_hook.dev) | 3356 | if (po->prot_hook.dev) |
3342 | dev_put(po->prot_hook.dev); | 3357 | dev_put(po->prot_hook.dev); |
diff --git a/net/rds/ib.c b/net/rds/ib.c index b4c8b0022fee..ba2dffeff608 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr) | |||
338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); | 338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
339 | /* due to this, we will claim to support iWARP devices unless we | 339 | /* due to this, we will claim to support iWARP devices unless we |
340 | check node_type. */ | 340 | check node_type. */ |
341 | if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) | 341 | if (ret || !cm_id->device || |
342 | cm_id->device->node_type != RDMA_NODE_IB_CA) | ||
342 | ret = -EADDRNOTAVAIL; | 343 | ret = -EADDRNOTAVAIL; |
343 | 344 | ||
344 | rdsdebug("addr %pI4 ret %d node type %d\n", | 345 | rdsdebug("addr %pI4 ret %d node type %d\n", |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index e59094981175..37be6e226d1b 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
552 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | 552 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
553 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 553 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
554 | scat = &rm->data.op_sg[sg]; | 554 | scat = &rm->data.op_sg[sg]; |
555 | ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 555 | ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); |
556 | ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); | 556 | return sizeof(struct rds_header) + ret; |
557 | return ret; | ||
558 | } | 557 | } |
559 | 558 | ||
560 | /* FIXME we may overallocate here */ | 559 | /* FIXME we may overallocate here */ |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 33af77246bfe..62ced6516c58 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1253 | 1253 | ||
1254 | if (msg->msg_name) { | 1254 | if (msg->msg_name) { |
1255 | struct sockaddr_rose *srose; | 1255 | struct sockaddr_rose *srose; |
1256 | struct full_sockaddr_rose *full_srose = msg->msg_name; | ||
1256 | 1257 | ||
1257 | memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); | 1258 | memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); |
1258 | srose = msg->msg_name; | 1259 | srose = msg->msg_name; |
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1260 | srose->srose_addr = rose->dest_addr; | 1261 | srose->srose_addr = rose->dest_addr; |
1261 | srose->srose_call = rose->dest_call; | 1262 | srose->srose_call = rose->dest_call; |
1262 | srose->srose_ndigis = rose->dest_ndigis; | 1263 | srose->srose_ndigis = rose->dest_ndigis; |
1263 | if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { | 1264 | for (n = 0 ; n < rose->dest_ndigis ; n++) |
1264 | struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; | 1265 | full_srose->srose_digis[n] = rose->dest_digis[n]; |
1265 | for (n = 0 ; n < rose->dest_ndigis ; n++) | 1266 | msg->msg_namelen = sizeof(struct full_sockaddr_rose); |
1266 | full_srose->srose_digis[n] = rose->dest_digis[n]; | ||
1267 | msg->msg_namelen = sizeof(struct full_sockaddr_rose); | ||
1268 | } else { | ||
1269 | if (rose->dest_ndigis >= 1) { | ||
1270 | srose->srose_ndigis = 1; | ||
1271 | srose->srose_digi = rose->dest_digis[0]; | ||
1272 | } | ||
1273 | msg->msg_namelen = sizeof(struct sockaddr_rose); | ||
1274 | } | ||
1275 | } | 1267 | } |
1276 | 1268 | ||
1277 | skb_free_datagram(sk, skb); | 1269 | skb_free_datagram(sk, skb); |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index fd7072827a40..69cb848e8345 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act) | |||
270 | { | 270 | { |
271 | struct tc_action_ops *a, **ap; | 271 | struct tc_action_ops *a, **ap; |
272 | 272 | ||
273 | /* Must supply act, dump, cleanup and init */ | ||
274 | if (!act->act || !act->dump || !act->cleanup || !act->init) | ||
275 | return -EINVAL; | ||
276 | |||
277 | /* Supply defaults */ | ||
278 | if (!act->lookup) | ||
279 | act->lookup = tcf_hash_search; | ||
280 | if (!act->walk) | ||
281 | act->walk = tcf_generic_walker; | ||
282 | |||
273 | write_lock(&act_mod_lock); | 283 | write_lock(&act_mod_lock); |
274 | for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { | 284 | for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { |
275 | if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { | 285 | if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { |
@@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act, | |||
381 | } | 391 | } |
382 | while ((a = act) != NULL) { | 392 | while ((a = act) != NULL) { |
383 | repeat: | 393 | repeat: |
384 | if (a->ops && a->ops->act) { | 394 | if (a->ops) { |
385 | ret = a->ops->act(skb, a, res); | 395 | ret = a->ops->act(skb, a, res); |
386 | if (TC_MUNGED & skb->tc_verd) { | 396 | if (TC_MUNGED & skb->tc_verd) { |
387 | /* copied already, allow trampling */ | 397 | /* copied already, allow trampling */ |
@@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind) | |||
405 | struct tc_action *a; | 415 | struct tc_action *a; |
406 | 416 | ||
407 | for (a = act; a; a = act) { | 417 | for (a = act; a; a = act) { |
408 | if (a->ops && a->ops->cleanup) { | 418 | if (a->ops) { |
409 | if (a->ops->cleanup(a, bind) == ACT_P_DELETED) | 419 | if (a->ops->cleanup(a, bind) == ACT_P_DELETED) |
410 | module_put(a->ops->owner); | 420 | module_put(a->ops->owner); |
411 | act = act->next; | 421 | act = act->next; |
@@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
424 | { | 434 | { |
425 | int err = -EINVAL; | 435 | int err = -EINVAL; |
426 | 436 | ||
427 | if (a->ops == NULL || a->ops->dump == NULL) | 437 | if (a->ops == NULL) |
428 | return err; | 438 | return err; |
429 | return a->ops->dump(skb, a, bind, ref); | 439 | return a->ops->dump(skb, a, bind, ref); |
430 | } | 440 | } |
@@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
436 | unsigned char *b = skb_tail_pointer(skb); | 446 | unsigned char *b = skb_tail_pointer(skb); |
437 | struct nlattr *nest; | 447 | struct nlattr *nest; |
438 | 448 | ||
439 | if (a->ops == NULL || a->ops->dump == NULL) | 449 | if (a->ops == NULL) |
440 | return err; | 450 | return err; |
441 | 451 | ||
442 | if (nla_put_string(skb, TCA_KIND, a->ops->kind)) | 452 | if (nla_put_string(skb, TCA_KIND, a->ops->kind)) |
@@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid) | |||
723 | a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); | 733 | a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); |
724 | if (a->ops == NULL) | 734 | if (a->ops == NULL) |
725 | goto err_free; | 735 | goto err_free; |
726 | if (a->ops->lookup == NULL) | ||
727 | goto err_mod; | ||
728 | err = -ENOENT; | 736 | err = -ENOENT; |
729 | if (a->ops->lookup(a, index) == 0) | 737 | if (a->ops->lookup(a, index) == 0) |
730 | goto err_mod; | 738 | goto err_mod; |
@@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1084 | memset(&a, 0, sizeof(struct tc_action)); | 1092 | memset(&a, 0, sizeof(struct tc_action)); |
1085 | a.ops = a_o; | 1093 | a.ops = a_o; |
1086 | 1094 | ||
1087 | if (a_o->walk == NULL) { | ||
1088 | WARN(1, "tc_dump_action: %s !capable of dumping table\n", | ||
1089 | a_o->kind); | ||
1090 | goto out_module_put; | ||
1091 | } | ||
1092 | |||
1093 | nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 1095 | nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
1094 | cb->nlh->nlmsg_type, sizeof(*t), 0); | 1096 | cb->nlh->nlmsg_type, sizeof(*t), 0); |
1095 | if (!nlh) | 1097 | if (!nlh) |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 3a4c0caa1f7d..11fe1a416433 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est, | |||
77 | &csum_idx_gen, &csum_hash_info); | 77 | &csum_idx_gen, &csum_hash_info); |
78 | if (IS_ERR(pc)) | 78 | if (IS_ERR(pc)) |
79 | return PTR_ERR(pc); | 79 | return PTR_ERR(pc); |
80 | p = to_tcf_csum(pc); | ||
81 | ret = ACT_P_CREATED; | 80 | ret = ACT_P_CREATED; |
82 | } else { | 81 | } else { |
83 | p = to_tcf_csum(pc); | 82 | if (bind)/* dont override defaults */ |
84 | if (!ovr) { | 83 | return 0; |
85 | tcf_hash_release(pc, bind, &csum_hash_info); | 84 | tcf_hash_release(pc, bind, &csum_hash_info); |
85 | if (!ovr) | ||
86 | return -EEXIST; | 86 | return -EEXIST; |
87 | } | ||
88 | } | 87 | } |
89 | 88 | ||
89 | p = to_tcf_csum(pc); | ||
90 | spin_lock_bh(&p->tcf_lock); | 90 | spin_lock_bh(&p->tcf_lock); |
91 | p->tcf_action = parm->action; | 91 | p->tcf_action = parm->action; |
92 | p->update_flags = parm->update_flags; | 92 | p->update_flags = parm->update_flags; |
@@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = { | |||
585 | .act = tcf_csum, | 585 | .act = tcf_csum, |
586 | .dump = tcf_csum_dump, | 586 | .dump = tcf_csum_dump, |
587 | .cleanup = tcf_csum_cleanup, | 587 | .cleanup = tcf_csum_cleanup, |
588 | .lookup = tcf_hash_search, | ||
589 | .init = tcf_csum_init, | 588 | .init = tcf_csum_init, |
590 | .walk = tcf_generic_walker | ||
591 | }; | 589 | }; |
592 | 590 | ||
593 | MODULE_DESCRIPTION("Checksum updating actions"); | 591 | MODULE_DESCRIPTION("Checksum updating actions"); |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index fd2b3cff5fa2..eb9ba60ebab4 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
102 | return PTR_ERR(pc); | 102 | return PTR_ERR(pc); |
103 | ret = ACT_P_CREATED; | 103 | ret = ACT_P_CREATED; |
104 | } else { | 104 | } else { |
105 | if (!ovr) { | 105 | if (bind)/* dont override defaults */ |
106 | tcf_hash_release(pc, bind, &gact_hash_info); | 106 | return 0; |
107 | tcf_hash_release(pc, bind, &gact_hash_info); | ||
108 | if (!ovr) | ||
107 | return -EEXIST; | 109 | return -EEXIST; |
108 | } | ||
109 | } | 110 | } |
110 | 111 | ||
111 | gact = to_gact(pc); | 112 | gact = to_gact(pc); |
@@ -206,9 +207,7 @@ static struct tc_action_ops act_gact_ops = { | |||
206 | .act = tcf_gact, | 207 | .act = tcf_gact, |
207 | .dump = tcf_gact_dump, | 208 | .dump = tcf_gact_dump, |
208 | .cleanup = tcf_gact_cleanup, | 209 | .cleanup = tcf_gact_cleanup, |
209 | .lookup = tcf_hash_search, | ||
210 | .init = tcf_gact_init, | 210 | .init = tcf_gact_init, |
211 | .walk = tcf_generic_walker | ||
212 | }; | 211 | }; |
213 | 212 | ||
214 | MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); | 213 | MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60d88b6b9560..dcbfe8ce04a6 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
141 | return PTR_ERR(pc); | 141 | return PTR_ERR(pc); |
142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
143 | } else { | 143 | } else { |
144 | if (!ovr) { | 144 | if (bind)/* dont override defaults */ |
145 | tcf_ipt_release(to_ipt(pc), bind); | 145 | return 0; |
146 | tcf_ipt_release(to_ipt(pc), bind); | ||
147 | |||
148 | if (!ovr) | ||
146 | return -EEXIST; | 149 | return -EEXIST; |
147 | } | ||
148 | } | 150 | } |
149 | ipt = to_ipt(pc); | 151 | ipt = to_ipt(pc); |
150 | 152 | ||
@@ -298,9 +300,7 @@ static struct tc_action_ops act_ipt_ops = { | |||
298 | .act = tcf_ipt, | 300 | .act = tcf_ipt, |
299 | .dump = tcf_ipt_dump, | 301 | .dump = tcf_ipt_dump, |
300 | .cleanup = tcf_ipt_cleanup, | 302 | .cleanup = tcf_ipt_cleanup, |
301 | .lookup = tcf_hash_search, | ||
302 | .init = tcf_ipt_init, | 303 | .init = tcf_ipt_init, |
303 | .walk = tcf_generic_walker | ||
304 | }; | 304 | }; |
305 | 305 | ||
306 | static struct tc_action_ops act_xt_ops = { | 306 | static struct tc_action_ops act_xt_ops = { |
@@ -312,9 +312,7 @@ static struct tc_action_ops act_xt_ops = { | |||
312 | .act = tcf_ipt, | 312 | .act = tcf_ipt, |
313 | .dump = tcf_ipt_dump, | 313 | .dump = tcf_ipt_dump, |
314 | .cleanup = tcf_ipt_cleanup, | 314 | .cleanup = tcf_ipt_cleanup, |
315 | .lookup = tcf_hash_search, | ||
316 | .init = tcf_ipt_init, | 315 | .init = tcf_ipt_init, |
317 | .walk = tcf_generic_walker | ||
318 | }; | 316 | }; |
319 | 317 | ||
320 | MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); | 318 | MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 977c10e0631b..252378121ce7 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = { | |||
271 | .act = tcf_mirred, | 271 | .act = tcf_mirred, |
272 | .dump = tcf_mirred_dump, | 272 | .dump = tcf_mirred_dump, |
273 | .cleanup = tcf_mirred_cleanup, | 273 | .cleanup = tcf_mirred_cleanup, |
274 | .lookup = tcf_hash_search, | ||
275 | .init = tcf_mirred_init, | 274 | .init = tcf_mirred_init, |
276 | .walk = tcf_generic_walker | ||
277 | }; | 275 | }; |
278 | 276 | ||
279 | MODULE_AUTHOR("Jamal Hadi Salim(2002)"); | 277 | MODULE_AUTHOR("Jamal Hadi Salim(2002)"); |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 876f0ef29694..76869538d028 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
70 | &nat_idx_gen, &nat_hash_info); | 70 | &nat_idx_gen, &nat_hash_info); |
71 | if (IS_ERR(pc)) | 71 | if (IS_ERR(pc)) |
72 | return PTR_ERR(pc); | 72 | return PTR_ERR(pc); |
73 | p = to_tcf_nat(pc); | ||
74 | ret = ACT_P_CREATED; | 73 | ret = ACT_P_CREATED; |
75 | } else { | 74 | } else { |
76 | p = to_tcf_nat(pc); | 75 | if (bind) |
77 | if (!ovr) { | 76 | return 0; |
78 | tcf_hash_release(pc, bind, &nat_hash_info); | 77 | tcf_hash_release(pc, bind, &nat_hash_info); |
78 | if (!ovr) | ||
79 | return -EEXIST; | 79 | return -EEXIST; |
80 | } | ||
81 | } | 80 | } |
81 | p = to_tcf_nat(pc); | ||
82 | 82 | ||
83 | spin_lock_bh(&p->tcf_lock); | 83 | spin_lock_bh(&p->tcf_lock); |
84 | p->old_addr = parm->old_addr; | 84 | p->old_addr = parm->old_addr; |
@@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = { | |||
308 | .act = tcf_nat, | 308 | .act = tcf_nat, |
309 | .dump = tcf_nat_dump, | 309 | .dump = tcf_nat_dump, |
310 | .cleanup = tcf_nat_cleanup, | 310 | .cleanup = tcf_nat_cleanup, |
311 | .lookup = tcf_hash_search, | ||
312 | .init = tcf_nat_init, | 311 | .init = tcf_nat_init, |
313 | .walk = tcf_generic_walker | ||
314 | }; | 312 | }; |
315 | 313 | ||
316 | MODULE_DESCRIPTION("Stateless NAT actions"); | 314 | MODULE_DESCRIPTION("Stateless NAT actions"); |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 7ed78c9e505c..7aa2dcd989f8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
84 | ret = ACT_P_CREATED; | 84 | ret = ACT_P_CREATED; |
85 | } else { | 85 | } else { |
86 | p = to_pedit(pc); | 86 | p = to_pedit(pc); |
87 | if (!ovr) { | 87 | tcf_hash_release(pc, bind, &pedit_hash_info); |
88 | tcf_hash_release(pc, bind, &pedit_hash_info); | 88 | if (bind) |
89 | return 0; | ||
90 | if (!ovr) | ||
89 | return -EEXIST; | 91 | return -EEXIST; |
90 | } | 92 | |
91 | if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { | 93 | if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { |
92 | keys = kmalloc(ksize, GFP_KERNEL); | 94 | keys = kmalloc(ksize, GFP_KERNEL); |
93 | if (keys == NULL) | 95 | if (keys == NULL) |
@@ -243,9 +245,7 @@ static struct tc_action_ops act_pedit_ops = { | |||
243 | .act = tcf_pedit, | 245 | .act = tcf_pedit, |
244 | .dump = tcf_pedit_dump, | 246 | .dump = tcf_pedit_dump, |
245 | .cleanup = tcf_pedit_cleanup, | 247 | .cleanup = tcf_pedit_cleanup, |
246 | .lookup = tcf_hash_search, | ||
247 | .init = tcf_pedit_init, | 248 | .init = tcf_pedit_init, |
248 | .walk = tcf_generic_walker | ||
249 | }; | 249 | }; |
250 | 250 | ||
251 | MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); | 251 | MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 272d8e924cf6..ef246d87e68b 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
177 | if (bind) { | 177 | if (bind) { |
178 | police->tcf_bindcnt += 1; | 178 | police->tcf_bindcnt += 1; |
179 | police->tcf_refcnt += 1; | 179 | police->tcf_refcnt += 1; |
180 | return 0; | ||
180 | } | 181 | } |
181 | if (ovr) | 182 | if (ovr) |
182 | goto override; | 183 | goto override; |
183 | return ret; | 184 | /* not replacing */ |
185 | return -EEXIST; | ||
184 | } | 186 | } |
185 | } | 187 | } |
186 | 188 | ||
@@ -407,7 +409,6 @@ static struct tc_action_ops act_police_ops = { | |||
407 | .act = tcf_act_police, | 409 | .act = tcf_act_police, |
408 | .dump = tcf_act_police_dump, | 410 | .dump = tcf_act_police_dump, |
409 | .cleanup = tcf_act_police_cleanup, | 411 | .cleanup = tcf_act_police_cleanup, |
410 | .lookup = tcf_hash_search, | ||
411 | .init = tcf_act_police_locate, | 412 | .init = tcf_act_police_locate, |
412 | .walk = tcf_act_police_walker | 413 | .walk = tcf_act_police_walker |
413 | }; | 414 | }; |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 7725eb4ab756..f7b45ab85388 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, | |||
142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
143 | } else { | 143 | } else { |
144 | d = to_defact(pc); | 144 | d = to_defact(pc); |
145 | if (!ovr) { | 145 | |
146 | tcf_simp_release(d, bind); | 146 | if (bind) |
147 | return 0; | ||
148 | tcf_simp_release(d, bind); | ||
149 | if (!ovr) | ||
147 | return -EEXIST; | 150 | return -EEXIST; |
148 | } | 151 | |
149 | reset_policy(d, defdata, parm); | 152 | reset_policy(d, defdata, parm); |
150 | } | 153 | } |
151 | 154 | ||
@@ -201,7 +204,6 @@ static struct tc_action_ops act_simp_ops = { | |||
201 | .dump = tcf_simp_dump, | 204 | .dump = tcf_simp_dump, |
202 | .cleanup = tcf_simp_cleanup, | 205 | .cleanup = tcf_simp_cleanup, |
203 | .init = tcf_simp_init, | 206 | .init = tcf_simp_init, |
204 | .walk = tcf_generic_walker, | ||
205 | }; | 207 | }; |
206 | 208 | ||
207 | MODULE_AUTHOR("Jamal Hadi Salim(2005)"); | 209 | MODULE_AUTHOR("Jamal Hadi Salim(2005)"); |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index cb4221171f93..8fe9d25c3008 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
120 | ret = ACT_P_CREATED; | 120 | ret = ACT_P_CREATED; |
121 | } else { | 121 | } else { |
122 | d = to_skbedit(pc); | 122 | d = to_skbedit(pc); |
123 | if (!ovr) { | 123 | if (bind) |
124 | tcf_hash_release(pc, bind, &skbedit_hash_info); | 124 | return 0; |
125 | tcf_hash_release(pc, bind, &skbedit_hash_info); | ||
126 | if (!ovr) | ||
125 | return -EEXIST; | 127 | return -EEXIST; |
126 | } | ||
127 | } | 128 | } |
128 | 129 | ||
129 | spin_lock_bh(&d->tcf_lock); | 130 | spin_lock_bh(&d->tcf_lock); |
@@ -203,7 +204,6 @@ static struct tc_action_ops act_skbedit_ops = { | |||
203 | .dump = tcf_skbedit_dump, | 204 | .dump = tcf_skbedit_dump, |
204 | .cleanup = tcf_skbedit_cleanup, | 205 | .cleanup = tcf_skbedit_cleanup, |
205 | .init = tcf_skbedit_init, | 206 | .init = tcf_skbedit_init, |
206 | .walk = tcf_generic_walker, | ||
207 | }; | 207 | }; |
208 | 208 | ||
209 | MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); | 209 | MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 922a09406ba7..7fc899a943a8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
126 | 126 | ||
127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
128 | if (!netif_xmit_frozen_or_stopped(txq)) | 128 | if (!netif_xmit_frozen_or_stopped(txq)) |
129 | ret = dev_hard_start_xmit(skb, dev, txq, NULL); | 129 | ret = dev_hard_start_xmit(skb, dev, txq); |
130 | 130 | ||
131 | HARD_TX_UNLOCK(dev, txq); | 131 | HARD_TX_UNLOCK(dev, txq); |
132 | 132 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 0e1e38b40025..717b2108f852 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1477,11 +1477,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1477 | sch_tree_lock(sch); | 1477 | sch_tree_lock(sch); |
1478 | } | 1478 | } |
1479 | 1479 | ||
1480 | rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; | ||
1481 | |||
1482 | ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; | ||
1483 | |||
1484 | psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); | ||
1485 | psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); | ||
1486 | |||
1480 | /* it used to be a nasty bug here, we have to check that node | 1487 | /* it used to be a nasty bug here, we have to check that node |
1481 | * is really leaf before changing cl->un.leaf ! | 1488 | * is really leaf before changing cl->un.leaf ! |
1482 | */ | 1489 | */ |
1483 | if (!cl->level) { | 1490 | if (!cl->level) { |
1484 | cl->quantum = hopt->rate.rate / q->rate2quantum; | 1491 | u64 quantum = cl->rate.rate_bytes_ps; |
1492 | |||
1493 | do_div(quantum, q->rate2quantum); | ||
1494 | cl->quantum = min_t(u64, quantum, INT_MAX); | ||
1495 | |||
1485 | if (!hopt->quantum && cl->quantum < 1000) { | 1496 | if (!hopt->quantum && cl->quantum < 1000) { |
1486 | pr_warning( | 1497 | pr_warning( |
1487 | "HTB: quantum of class %X is small. Consider r2q change.\n", | 1498 | "HTB: quantum of class %X is small. Consider r2q change.\n", |
@@ -1500,13 +1511,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1500 | cl->prio = TC_HTB_NUMPRIO - 1; | 1511 | cl->prio = TC_HTB_NUMPRIO - 1; |
1501 | } | 1512 | } |
1502 | 1513 | ||
1503 | rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; | ||
1504 | |||
1505 | ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; | ||
1506 | |||
1507 | psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); | ||
1508 | psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); | ||
1509 | |||
1510 | cl->buffer = PSCHED_TICKS2NS(hopt->buffer); | 1514 | cl->buffer = PSCHED_TICKS2NS(hopt->buffer); |
1511 | cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); | 1515 | cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); |
1512 | 1516 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index a6090051c5db..887e672f9d7d 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -118,6 +118,32 @@ struct tbf_sched_data { | |||
118 | }; | 118 | }; |
119 | 119 | ||
120 | 120 | ||
121 | /* Time to Length, convert time in ns to length in bytes | ||
122 | * to determinate how many bytes can be sent in given time. | ||
123 | */ | ||
124 | static u64 psched_ns_t2l(const struct psched_ratecfg *r, | ||
125 | u64 time_in_ns) | ||
126 | { | ||
127 | /* The formula is : | ||
128 | * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC | ||
129 | */ | ||
130 | u64 len = time_in_ns * r->rate_bytes_ps; | ||
131 | |||
132 | do_div(len, NSEC_PER_SEC); | ||
133 | |||
134 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { | ||
135 | do_div(len, 53); | ||
136 | len = len * 48; | ||
137 | } | ||
138 | |||
139 | if (len > r->overhead) | ||
140 | len -= r->overhead; | ||
141 | else | ||
142 | len = 0; | ||
143 | |||
144 | return len; | ||
145 | } | ||
146 | |||
121 | /* | 147 | /* |
122 | * Return length of individual segments of a gso packet, | 148 | * Return length of individual segments of a gso packet, |
123 | * including all headers (MAC, IP, TCP/UDP) | 149 | * including all headers (MAC, IP, TCP/UDP) |
@@ -289,10 +315,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
289 | struct tbf_sched_data *q = qdisc_priv(sch); | 315 | struct tbf_sched_data *q = qdisc_priv(sch); |
290 | struct nlattr *tb[TCA_TBF_MAX + 1]; | 316 | struct nlattr *tb[TCA_TBF_MAX + 1]; |
291 | struct tc_tbf_qopt *qopt; | 317 | struct tc_tbf_qopt *qopt; |
292 | struct qdisc_rate_table *rtab = NULL; | ||
293 | struct qdisc_rate_table *ptab = NULL; | ||
294 | struct Qdisc *child = NULL; | 318 | struct Qdisc *child = NULL; |
295 | int max_size, n; | 319 | struct psched_ratecfg rate; |
320 | struct psched_ratecfg peak; | ||
321 | u64 max_size; | ||
322 | s64 buffer, mtu; | ||
296 | u64 rate64 = 0, prate64 = 0; | 323 | u64 rate64 = 0, prate64 = 0; |
297 | 324 | ||
298 | err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); | 325 | err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); |
@@ -304,38 +331,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
304 | goto done; | 331 | goto done; |
305 | 332 | ||
306 | qopt = nla_data(tb[TCA_TBF_PARMS]); | 333 | qopt = nla_data(tb[TCA_TBF_PARMS]); |
307 | rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); | 334 | if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) |
308 | if (rtab == NULL) | 335 | qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, |
309 | goto done; | 336 | tb[TCA_TBF_RTAB])); |
310 | |||
311 | if (qopt->peakrate.rate) { | ||
312 | if (qopt->peakrate.rate > qopt->rate.rate) | ||
313 | ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); | ||
314 | if (ptab == NULL) | ||
315 | goto done; | ||
316 | } | ||
317 | |||
318 | for (n = 0; n < 256; n++) | ||
319 | if (rtab->data[n] > qopt->buffer) | ||
320 | break; | ||
321 | max_size = (n << qopt->rate.cell_log) - 1; | ||
322 | if (ptab) { | ||
323 | int size; | ||
324 | |||
325 | for (n = 0; n < 256; n++) | ||
326 | if (ptab->data[n] > qopt->mtu) | ||
327 | break; | ||
328 | size = (n << qopt->peakrate.cell_log) - 1; | ||
329 | if (size < max_size) | ||
330 | max_size = size; | ||
331 | } | ||
332 | if (max_size < 0) | ||
333 | goto done; | ||
334 | 337 | ||
335 | if (max_size < psched_mtu(qdisc_dev(sch))) | 338 | if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) |
336 | pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", | 339 | qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, |
337 | max_size, qdisc_dev(sch)->name, | 340 | tb[TCA_TBF_PTAB])); |
338 | psched_mtu(qdisc_dev(sch))); | ||
339 | 341 | ||
340 | if (q->qdisc != &noop_qdisc) { | 342 | if (q->qdisc != &noop_qdisc) { |
341 | err = fifo_set_limit(q->qdisc, qopt->limit); | 343 | err = fifo_set_limit(q->qdisc, qopt->limit); |
@@ -349,6 +351,39 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
349 | } | 351 | } |
350 | } | 352 | } |
351 | 353 | ||
354 | buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); | ||
355 | mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); | ||
356 | |||
357 | if (tb[TCA_TBF_RATE64]) | ||
358 | rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); | ||
359 | psched_ratecfg_precompute(&rate, &qopt->rate, rate64); | ||
360 | |||
361 | max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); | ||
362 | |||
363 | if (qopt->peakrate.rate) { | ||
364 | if (tb[TCA_TBF_PRATE64]) | ||
365 | prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); | ||
366 | psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); | ||
367 | if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { | ||
368 | pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", | ||
369 | peak.rate_bytes_ps, rate.rate_bytes_ps); | ||
370 | err = -EINVAL; | ||
371 | goto done; | ||
372 | } | ||
373 | |||
374 | max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); | ||
375 | } | ||
376 | |||
377 | if (max_size < psched_mtu(qdisc_dev(sch))) | ||
378 | pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", | ||
379 | max_size, qdisc_dev(sch)->name, | ||
380 | psched_mtu(qdisc_dev(sch))); | ||
381 | |||
382 | if (!max_size) { | ||
383 | err = -EINVAL; | ||
384 | goto done; | ||
385 | } | ||
386 | |||
352 | sch_tree_lock(sch); | 387 | sch_tree_lock(sch); |
353 | if (child) { | 388 | if (child) { |
354 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); | 389 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); |
@@ -362,13 +397,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
362 | q->tokens = q->buffer; | 397 | q->tokens = q->buffer; |
363 | q->ptokens = q->mtu; | 398 | q->ptokens = q->mtu; |
364 | 399 | ||
365 | if (tb[TCA_TBF_RATE64]) | 400 | memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); |
366 | rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); | 401 | if (qopt->peakrate.rate) { |
367 | psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64); | 402 | memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); |
368 | if (ptab) { | ||
369 | if (tb[TCA_TBF_PRATE64]) | ||
370 | prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); | ||
371 | psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64); | ||
372 | q->peak_present = true; | 403 | q->peak_present = true; |
373 | } else { | 404 | } else { |
374 | q->peak_present = false; | 405 | q->peak_present = false; |
@@ -377,10 +408,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
377 | sch_tree_unlock(sch); | 408 | sch_tree_unlock(sch); |
378 | err = 0; | 409 | err = 0; |
379 | done: | 410 | done: |
380 | if (rtab) | ||
381 | qdisc_put_rtab(rtab); | ||
382 | if (ptab) | ||
383 | qdisc_put_rtab(ptab); | ||
384 | return err; | 411 | return err; |
385 | } | 412 | } |
386 | 413 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 68a27f9796d2..31ed008c8e13 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -154,8 +154,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
154 | 154 | ||
155 | asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; | 155 | asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; |
156 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; | 156 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; |
157 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = | 157 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; |
158 | min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ; | ||
159 | 158 | ||
160 | /* Initializes the timers */ | 159 | /* Initializes the timers */ |
161 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) | 160 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) |
@@ -291,8 +290,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
291 | asoc->peer.ipv6_address = 1; | 290 | asoc->peer.ipv6_address = 1; |
292 | INIT_LIST_HEAD(&asoc->asocs); | 291 | INIT_LIST_HEAD(&asoc->asocs); |
293 | 292 | ||
294 | asoc->autoclose = sp->autoclose; | ||
295 | |||
296 | asoc->default_stream = sp->default_stream; | 293 | asoc->default_stream = sp->default_stream; |
297 | asoc->default_ppid = sp->default_ppid; | 294 | asoc->default_ppid = sp->default_ppid; |
298 | asoc->default_flags = sp->default_flags; | 295 | asoc->default_flags = sp->default_flags; |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 0e2644d0a773..0fb140f8f088 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -581,7 +581,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
581 | unsigned long timeout; | 581 | unsigned long timeout; |
582 | 582 | ||
583 | /* Restart the AUTOCLOSE timer when sending data. */ | 583 | /* Restart the AUTOCLOSE timer when sending data. */ |
584 | if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { | 584 | if (sctp_state(asoc, ESTABLISHED) && |
585 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { | ||
585 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | 586 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; |
586 | timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | 587 | timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; |
587 | 588 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index f51ba985a36e..59268f6e2c36 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | |||
208 | INIT_LIST_HEAD(&q->retransmit); | 208 | INIT_LIST_HEAD(&q->retransmit); |
209 | INIT_LIST_HEAD(&q->sacked); | 209 | INIT_LIST_HEAD(&q->sacked); |
210 | INIT_LIST_HEAD(&q->abandoned); | 210 | INIT_LIST_HEAD(&q->abandoned); |
211 | |||
212 | q->empty = 1; | ||
213 | } | 211 | } |
214 | 212 | ||
215 | /* Free the outqueue structure and any related pending chunks. | 213 | /* Free the outqueue structure and any related pending chunks. |
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
332 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); | 330 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); |
333 | else | 331 | else |
334 | SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); | 332 | SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); |
335 | q->empty = 0; | ||
336 | break; | 333 | break; |
337 | } | 334 | } |
338 | } else { | 335 | } else { |
@@ -654,7 +651,6 @@ redo: | |||
654 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) | 651 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) |
655 | chunk->fast_retransmit = SCTP_DONT_FRTX; | 652 | chunk->fast_retransmit = SCTP_DONT_FRTX; |
656 | 653 | ||
657 | q->empty = 0; | ||
658 | q->asoc->stats.rtxchunks++; | 654 | q->asoc->stats.rtxchunks++; |
659 | break; | 655 | break; |
660 | } | 656 | } |
@@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
1065 | 1061 | ||
1066 | sctp_transport_reset_timers(transport); | 1062 | sctp_transport_reset_timers(transport); |
1067 | 1063 | ||
1068 | q->empty = 0; | ||
1069 | |||
1070 | /* Only let one DATA chunk get bundled with a | 1064 | /* Only let one DATA chunk get bundled with a |
1071 | * COOKIE-ECHO chunk. | 1065 | * COOKIE-ECHO chunk. |
1072 | */ | 1066 | */ |
@@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
1275 | "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, | 1269 | "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, |
1276 | asoc->adv_peer_ack_point); | 1270 | asoc->adv_peer_ack_point); |
1277 | 1271 | ||
1278 | /* See if all chunks are acked. | 1272 | return sctp_outq_is_empty(q); |
1279 | * Make sure the empty queue handler will get run later. | ||
1280 | */ | ||
1281 | q->empty = (list_empty(&q->out_chunk_list) && | ||
1282 | list_empty(&q->retransmit)); | ||
1283 | if (!q->empty) | ||
1284 | goto finish; | ||
1285 | |||
1286 | list_for_each_entry(transport, transport_list, transports) { | ||
1287 | q->empty = q->empty && list_empty(&transport->transmitted); | ||
1288 | if (!q->empty) | ||
1289 | goto finish; | ||
1290 | } | ||
1291 | |||
1292 | pr_debug("%s: sack queue is empty\n", __func__); | ||
1293 | finish: | ||
1294 | return q->empty; | ||
1295 | } | 1273 | } |
1296 | 1274 | ||
1297 | /* Is the outqueue empty? */ | 1275 | /* Is the outqueue empty? |
1276 | * The queue is empty when we have not pending data, no in-flight data | ||
1277 | * and nothing pending retransmissions. | ||
1278 | */ | ||
1298 | int sctp_outq_is_empty(const struct sctp_outq *q) | 1279 | int sctp_outq_is_empty(const struct sctp_outq *q) |
1299 | { | 1280 | { |
1300 | return q->empty; | 1281 | return q->out_qlen == 0 && q->outstanding_bytes == 0 && |
1282 | list_empty(&q->retransmit); | ||
1301 | } | 1283 | } |
1302 | 1284 | ||
1303 | /******************************************************************** | 1285 | /******************************************************************** |
diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 53c452efb40b..5e68b94ee640 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/sctp/sctp.h> | 38 | #include <net/sctp/sctp.h> |
39 | #include <net/sctp/sm.h> | 39 | #include <net/sctp/sm.h> |
40 | 40 | ||
41 | MODULE_SOFTDEP("pre: sctp"); | ||
41 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); | 42 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); |
42 | MODULE_DESCRIPTION("SCTP snooper"); | 43 | MODULE_DESCRIPTION("SCTP snooper"); |
43 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = { | |||
182 | .entry = jsctp_sf_eat_sack, | 183 | .entry = jsctp_sf_eat_sack, |
183 | }; | 184 | }; |
184 | 185 | ||
186 | static __init int sctp_setup_jprobe(void) | ||
187 | { | ||
188 | int ret = register_jprobe(&sctp_recv_probe); | ||
189 | |||
190 | if (ret) { | ||
191 | if (request_module("sctp")) | ||
192 | goto out; | ||
193 | ret = register_jprobe(&sctp_recv_probe); | ||
194 | } | ||
195 | |||
196 | out: | ||
197 | return ret; | ||
198 | } | ||
199 | |||
185 | static __init int sctpprobe_init(void) | 200 | static __init int sctpprobe_init(void) |
186 | { | 201 | { |
187 | int ret = -ENOMEM; | 202 | int ret = -ENOMEM; |
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void) | |||
202 | &sctpprobe_fops)) | 217 | &sctpprobe_fops)) |
203 | goto free_kfifo; | 218 | goto free_kfifo; |
204 | 219 | ||
205 | ret = register_jprobe(&sctp_recv_probe); | 220 | ret = sctp_setup_jprobe(); |
206 | if (ret) | 221 | if (ret) |
207 | goto remove_proc; | 222 | goto remove_proc; |
208 | 223 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index dfe3f36ff2aa..a26065be7289 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -820,7 +820,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
820 | SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); | 820 | SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); |
821 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); | 821 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); |
822 | 822 | ||
823 | if (new_asoc->autoclose) | 823 | if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) |
824 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | 824 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, |
825 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 825 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
826 | 826 | ||
@@ -908,7 +908,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, | |||
908 | SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); | 908 | SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); |
909 | SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); | 909 | SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); |
910 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); | 910 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); |
911 | if (asoc->autoclose) | 911 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) |
912 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | 912 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, |
913 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 913 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
914 | 914 | ||
@@ -2970,7 +2970,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, | |||
2970 | if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) | 2970 | if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) |
2971 | force = SCTP_FORCE(); | 2971 | force = SCTP_FORCE(); |
2972 | 2972 | ||
2973 | if (asoc->autoclose) { | 2973 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { |
2974 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | 2974 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
2975 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 2975 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
2976 | } | 2976 | } |
@@ -3878,7 +3878,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, | |||
3878 | SCTP_CHUNK(chunk)); | 3878 | SCTP_CHUNK(chunk)); |
3879 | 3879 | ||
3880 | /* Count this as receiving DATA. */ | 3880 | /* Count this as receiving DATA. */ |
3881 | if (asoc->autoclose) { | 3881 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { |
3882 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | 3882 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
3883 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 3883 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
3884 | } | 3884 | } |
@@ -5267,7 +5267,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( | |||
5267 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | 5267 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
5268 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | 5268 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); |
5269 | 5269 | ||
5270 | if (asoc->autoclose) | 5270 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) |
5271 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 5271 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
5272 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 5272 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
5273 | 5273 | ||
@@ -5346,7 +5346,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( | |||
5346 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | 5346 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
5347 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | 5347 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); |
5348 | 5348 | ||
5349 | if (asoc->autoclose) | 5349 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) |
5350 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 5350 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
5351 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 5351 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
5352 | 5352 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 72046b9729a8..42b709c95cf3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2196,6 +2196,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, | |||
2196 | unsigned int optlen) | 2196 | unsigned int optlen) |
2197 | { | 2197 | { |
2198 | struct sctp_sock *sp = sctp_sk(sk); | 2198 | struct sctp_sock *sp = sctp_sk(sk); |
2199 | struct net *net = sock_net(sk); | ||
2199 | 2200 | ||
2200 | /* Applicable to UDP-style socket only */ | 2201 | /* Applicable to UDP-style socket only */ |
2201 | if (sctp_style(sk, TCP)) | 2202 | if (sctp_style(sk, TCP)) |
@@ -2205,6 +2206,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, | |||
2205 | if (copy_from_user(&sp->autoclose, optval, optlen)) | 2206 | if (copy_from_user(&sp->autoclose, optval, optlen)) |
2206 | return -EFAULT; | 2207 | return -EFAULT; |
2207 | 2208 | ||
2209 | if (sp->autoclose > net->sctp.max_autoclose) | ||
2210 | sp->autoclose = net->sctp.max_autoclose; | ||
2211 | |||
2208 | return 0; | 2212 | return 0; |
2209 | } | 2213 | } |
2210 | 2214 | ||
@@ -2811,6 +2815,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne | |||
2811 | { | 2815 | { |
2812 | struct sctp_rtoinfo rtoinfo; | 2816 | struct sctp_rtoinfo rtoinfo; |
2813 | struct sctp_association *asoc; | 2817 | struct sctp_association *asoc; |
2818 | unsigned long rto_min, rto_max; | ||
2819 | struct sctp_sock *sp = sctp_sk(sk); | ||
2814 | 2820 | ||
2815 | if (optlen != sizeof (struct sctp_rtoinfo)) | 2821 | if (optlen != sizeof (struct sctp_rtoinfo)) |
2816 | return -EINVAL; | 2822 | return -EINVAL; |
@@ -2824,26 +2830,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne | |||
2824 | if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) | 2830 | if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) |
2825 | return -EINVAL; | 2831 | return -EINVAL; |
2826 | 2832 | ||
2833 | rto_max = rtoinfo.srto_max; | ||
2834 | rto_min = rtoinfo.srto_min; | ||
2835 | |||
2836 | if (rto_max) | ||
2837 | rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; | ||
2838 | else | ||
2839 | rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; | ||
2840 | |||
2841 | if (rto_min) | ||
2842 | rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; | ||
2843 | else | ||
2844 | rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; | ||
2845 | |||
2846 | if (rto_min > rto_max) | ||
2847 | return -EINVAL; | ||
2848 | |||
2827 | if (asoc) { | 2849 | if (asoc) { |
2828 | if (rtoinfo.srto_initial != 0) | 2850 | if (rtoinfo.srto_initial != 0) |
2829 | asoc->rto_initial = | 2851 | asoc->rto_initial = |
2830 | msecs_to_jiffies(rtoinfo.srto_initial); | 2852 | msecs_to_jiffies(rtoinfo.srto_initial); |
2831 | if (rtoinfo.srto_max != 0) | 2853 | asoc->rto_max = rto_max; |
2832 | asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); | 2854 | asoc->rto_min = rto_min; |
2833 | if (rtoinfo.srto_min != 0) | ||
2834 | asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min); | ||
2835 | } else { | 2855 | } else { |
2836 | /* If there is no association or the association-id = 0 | 2856 | /* If there is no association or the association-id = 0 |
2837 | * set the values to the endpoint. | 2857 | * set the values to the endpoint. |
2838 | */ | 2858 | */ |
2839 | struct sctp_sock *sp = sctp_sk(sk); | ||
2840 | |||
2841 | if (rtoinfo.srto_initial != 0) | 2859 | if (rtoinfo.srto_initial != 0) |
2842 | sp->rtoinfo.srto_initial = rtoinfo.srto_initial; | 2860 | sp->rtoinfo.srto_initial = rtoinfo.srto_initial; |
2843 | if (rtoinfo.srto_max != 0) | 2861 | sp->rtoinfo.srto_max = rto_max; |
2844 | sp->rtoinfo.srto_max = rtoinfo.srto_max; | 2862 | sp->rtoinfo.srto_min = rto_min; |
2845 | if (rtoinfo.srto_min != 0) | ||
2846 | sp->rtoinfo.srto_min = rtoinfo.srto_min; | ||
2847 | } | 2863 | } |
2848 | 2864 | ||
2849 | return 0; | 2865 | return 0; |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 6b36561a1b3b..b0565afb61c7 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -56,11 +56,16 @@ extern long sysctl_sctp_mem[3]; | |||
56 | extern int sysctl_sctp_rmem[3]; | 56 | extern int sysctl_sctp_rmem[3]; |
57 | extern int sysctl_sctp_wmem[3]; | 57 | extern int sysctl_sctp_wmem[3]; |
58 | 58 | ||
59 | static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, | 59 | static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, |
60 | int write, | 60 | void __user *buffer, size_t *lenp, |
61 | loff_t *ppos); | ||
62 | static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, | ||
63 | void __user *buffer, size_t *lenp, | ||
64 | loff_t *ppos); | ||
65 | static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | ||
61 | void __user *buffer, size_t *lenp, | 66 | void __user *buffer, size_t *lenp, |
62 | |||
63 | loff_t *ppos); | 67 | loff_t *ppos); |
68 | |||
64 | static struct ctl_table sctp_table[] = { | 69 | static struct ctl_table sctp_table[] = { |
65 | { | 70 | { |
66 | .procname = "sctp_mem", | 71 | .procname = "sctp_mem", |
@@ -102,17 +107,17 @@ static struct ctl_table sctp_net_table[] = { | |||
102 | .data = &init_net.sctp.rto_min, | 107 | .data = &init_net.sctp.rto_min, |
103 | .maxlen = sizeof(unsigned int), | 108 | .maxlen = sizeof(unsigned int), |
104 | .mode = 0644, | 109 | .mode = 0644, |
105 | .proc_handler = proc_dointvec_minmax, | 110 | .proc_handler = proc_sctp_do_rto_min, |
106 | .extra1 = &one, | 111 | .extra1 = &one, |
107 | .extra2 = &timer_max | 112 | .extra2 = &init_net.sctp.rto_max |
108 | }, | 113 | }, |
109 | { | 114 | { |
110 | .procname = "rto_max", | 115 | .procname = "rto_max", |
111 | .data = &init_net.sctp.rto_max, | 116 | .data = &init_net.sctp.rto_max, |
112 | .maxlen = sizeof(unsigned int), | 117 | .maxlen = sizeof(unsigned int), |
113 | .mode = 0644, | 118 | .mode = 0644, |
114 | .proc_handler = proc_dointvec_minmax, | 119 | .proc_handler = proc_sctp_do_rto_max, |
115 | .extra1 = &one, | 120 | .extra1 = &init_net.sctp.rto_min, |
116 | .extra2 = &timer_max | 121 | .extra2 = &timer_max |
117 | }, | 122 | }, |
118 | { | 123 | { |
@@ -294,8 +299,7 @@ static struct ctl_table sctp_net_table[] = { | |||
294 | { /* sentinel */ } | 299 | { /* sentinel */ } |
295 | }; | 300 | }; |
296 | 301 | ||
297 | static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, | 302 | static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, |
298 | int write, | ||
299 | void __user *buffer, size_t *lenp, | 303 | void __user *buffer, size_t *lenp, |
300 | loff_t *ppos) | 304 | loff_t *ppos) |
301 | { | 305 | { |
@@ -342,6 +346,60 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, | |||
342 | return ret; | 346 | return ret; |
343 | } | 347 | } |
344 | 348 | ||
349 | static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, | ||
350 | void __user *buffer, size_t *lenp, | ||
351 | loff_t *ppos) | ||
352 | { | ||
353 | struct net *net = current->nsproxy->net_ns; | ||
354 | int new_value; | ||
355 | struct ctl_table tbl; | ||
356 | unsigned int min = *(unsigned int *) ctl->extra1; | ||
357 | unsigned int max = *(unsigned int *) ctl->extra2; | ||
358 | int ret; | ||
359 | |||
360 | memset(&tbl, 0, sizeof(struct ctl_table)); | ||
361 | tbl.maxlen = sizeof(unsigned int); | ||
362 | |||
363 | if (write) | ||
364 | tbl.data = &new_value; | ||
365 | else | ||
366 | tbl.data = &net->sctp.rto_min; | ||
367 | ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); | ||
368 | if (write) { | ||
369 | if (ret || new_value > max || new_value < min) | ||
370 | return -EINVAL; | ||
371 | net->sctp.rto_min = new_value; | ||
372 | } | ||
373 | return ret; | ||
374 | } | ||
375 | |||
376 | static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | ||
377 | void __user *buffer, size_t *lenp, | ||
378 | loff_t *ppos) | ||
379 | { | ||
380 | struct net *net = current->nsproxy->net_ns; | ||
381 | int new_value; | ||
382 | struct ctl_table tbl; | ||
383 | unsigned int min = *(unsigned int *) ctl->extra1; | ||
384 | unsigned int max = *(unsigned int *) ctl->extra2; | ||
385 | int ret; | ||
386 | |||
387 | memset(&tbl, 0, sizeof(struct ctl_table)); | ||
388 | tbl.maxlen = sizeof(unsigned int); | ||
389 | |||
390 | if (write) | ||
391 | tbl.data = &new_value; | ||
392 | else | ||
393 | tbl.data = &net->sctp.rto_max; | ||
394 | ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); | ||
395 | if (write) { | ||
396 | if (ret || new_value > max || new_value < min) | ||
397 | return -EINVAL; | ||
398 | net->sctp.rto_max = new_value; | ||
399 | } | ||
400 | return ret; | ||
401 | } | ||
402 | |||
345 | int sctp_sysctl_net_register(struct net *net) | 403 | int sctp_sysctl_net_register(struct net *net) |
346 | { | 404 | { |
347 | struct ctl_table *table; | 405 | struct ctl_table *table; |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index e332efb124cc..efc46ffed1fd 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -573,7 +573,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t) | |||
573 | u32 old_cwnd = t->cwnd; | 573 | u32 old_cwnd = t->cwnd; |
574 | u32 max_burst_bytes; | 574 | u32 max_burst_bytes; |
575 | 575 | ||
576 | if (t->burst_limited) | 576 | if (t->burst_limited || asoc->max_burst == 0) |
577 | return; | 577 | return; |
578 | 578 | ||
579 | max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); | 579 | max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); |
diff --git a/net/tipc/core.c b/net/tipc/core.c index fd4eeeaa972a..c6d3f75a9e1b 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -113,7 +113,6 @@ err: | |||
113 | static void tipc_core_stop(void) | 113 | static void tipc_core_stop(void) |
114 | { | 114 | { |
115 | tipc_netlink_stop(); | 115 | tipc_netlink_stop(); |
116 | tipc_handler_stop(); | ||
117 | tipc_cfg_stop(); | 116 | tipc_cfg_stop(); |
118 | tipc_subscr_stop(); | 117 | tipc_subscr_stop(); |
119 | tipc_nametbl_stop(); | 118 | tipc_nametbl_stop(); |
@@ -146,9 +145,10 @@ static int tipc_core_start(void) | |||
146 | res = tipc_subscr_start(); | 145 | res = tipc_subscr_start(); |
147 | if (!res) | 146 | if (!res) |
148 | res = tipc_cfg_init(); | 147 | res = tipc_cfg_init(); |
149 | if (res) | 148 | if (res) { |
149 | tipc_handler_stop(); | ||
150 | tipc_core_stop(); | 150 | tipc_core_stop(); |
151 | 151 | } | |
152 | return res; | 152 | return res; |
153 | } | 153 | } |
154 | 154 | ||
@@ -178,6 +178,7 @@ static int __init tipc_init(void) | |||
178 | 178 | ||
179 | static void __exit tipc_exit(void) | 179 | static void __exit tipc_exit(void) |
180 | { | 180 | { |
181 | tipc_handler_stop(); | ||
181 | tipc_core_stop_net(); | 182 | tipc_core_stop_net(); |
182 | tipc_core_stop(); | 183 | tipc_core_stop(); |
183 | pr_info("Deactivated\n"); | 184 | pr_info("Deactivated\n"); |
diff --git a/net/tipc/handler.c b/net/tipc/handler.c index b36f0fcd9bdf..e4bc8a296744 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c | |||
@@ -56,12 +56,13 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument) | |||
56 | { | 56 | { |
57 | struct queue_item *item; | 57 | struct queue_item *item; |
58 | 58 | ||
59 | spin_lock_bh(&qitem_lock); | ||
59 | if (!handler_enabled) { | 60 | if (!handler_enabled) { |
60 | pr_err("Signal request ignored by handler\n"); | 61 | pr_err("Signal request ignored by handler\n"); |
62 | spin_unlock_bh(&qitem_lock); | ||
61 | return -ENOPROTOOPT; | 63 | return -ENOPROTOOPT; |
62 | } | 64 | } |
63 | 65 | ||
64 | spin_lock_bh(&qitem_lock); | ||
65 | item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); | 66 | item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC); |
66 | if (!item) { | 67 | if (!item) { |
67 | pr_err("Signal queue out of memory\n"); | 68 | pr_err("Signal queue out of memory\n"); |
@@ -112,10 +113,14 @@ void tipc_handler_stop(void) | |||
112 | struct list_head *l, *n; | 113 | struct list_head *l, *n; |
113 | struct queue_item *item; | 114 | struct queue_item *item; |
114 | 115 | ||
115 | if (!handler_enabled) | 116 | spin_lock_bh(&qitem_lock); |
117 | if (!handler_enabled) { | ||
118 | spin_unlock_bh(&qitem_lock); | ||
116 | return; | 119 | return; |
117 | 120 | } | |
118 | handler_enabled = 0; | 121 | handler_enabled = 0; |
122 | spin_unlock_bh(&qitem_lock); | ||
123 | |||
119 | tasklet_kill(&tipc_tasklet); | 124 | tasklet_kill(&tipc_tasklet); |
120 | 125 | ||
121 | spin_lock_bh(&qitem_lock); | 126 | spin_lock_bh(&qitem_lock); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 69cd9bf3f561..13b987745820 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) | |||
1498 | int type; | 1498 | int type; |
1499 | 1499 | ||
1500 | head = head->next; | 1500 | head = head->next; |
1501 | buf->next = NULL; | ||
1501 | 1502 | ||
1502 | /* Ensure bearer is still enabled */ | 1503 | /* Ensure bearer is still enabled */ |
1503 | if (unlikely(!b_ptr->active)) | 1504 | if (unlikely(!b_ptr->active)) |
diff --git a/net/tipc/port.c b/net/tipc/port.c index c081a7632302..d43f3182b1d4 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk, | |||
251 | return p_ptr; | 251 | return p_ptr; |
252 | } | 252 | } |
253 | 253 | ||
254 | int tipc_deleteport(u32 ref) | 254 | int tipc_deleteport(struct tipc_port *p_ptr) |
255 | { | 255 | { |
256 | struct tipc_port *p_ptr; | ||
257 | struct sk_buff *buf = NULL; | 256 | struct sk_buff *buf = NULL; |
258 | 257 | ||
259 | tipc_withdraw(ref, 0, NULL); | 258 | tipc_withdraw(p_ptr, 0, NULL); |
260 | p_ptr = tipc_port_lock(ref); | ||
261 | if (!p_ptr) | ||
262 | return -EINVAL; | ||
263 | 259 | ||
264 | tipc_ref_discard(ref); | 260 | spin_lock_bh(p_ptr->lock); |
265 | tipc_port_unlock(p_ptr); | 261 | tipc_ref_discard(p_ptr->ref); |
262 | spin_unlock_bh(p_ptr->lock); | ||
266 | 263 | ||
267 | k_cancel_timer(&p_ptr->timer); | 264 | k_cancel_timer(&p_ptr->timer); |
268 | if (p_ptr->connected) { | 265 | if (p_ptr->connected) { |
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp) | |||
704 | } | 701 | } |
705 | 702 | ||
706 | 703 | ||
707 | int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | 704 | int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, |
705 | struct tipc_name_seq const *seq) | ||
708 | { | 706 | { |
709 | struct tipc_port *p_ptr; | ||
710 | struct publication *publ; | 707 | struct publication *publ; |
711 | u32 key; | 708 | u32 key; |
712 | int res = -EINVAL; | ||
713 | 709 | ||
714 | p_ptr = tipc_port_lock(ref); | 710 | if (p_ptr->connected) |
715 | if (!p_ptr) | ||
716 | return -EINVAL; | 711 | return -EINVAL; |
712 | key = p_ptr->ref + p_ptr->pub_count + 1; | ||
713 | if (key == p_ptr->ref) | ||
714 | return -EADDRINUSE; | ||
717 | 715 | ||
718 | if (p_ptr->connected) | ||
719 | goto exit; | ||
720 | key = ref + p_ptr->pub_count + 1; | ||
721 | if (key == ref) { | ||
722 | res = -EADDRINUSE; | ||
723 | goto exit; | ||
724 | } | ||
725 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, | 716 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, |
726 | scope, p_ptr->ref, key); | 717 | scope, p_ptr->ref, key); |
727 | if (publ) { | 718 | if (publ) { |
728 | list_add(&publ->pport_list, &p_ptr->publications); | 719 | list_add(&publ->pport_list, &p_ptr->publications); |
729 | p_ptr->pub_count++; | 720 | p_ptr->pub_count++; |
730 | p_ptr->published = 1; | 721 | p_ptr->published = 1; |
731 | res = 0; | 722 | return 0; |
732 | } | 723 | } |
733 | exit: | 724 | return -EINVAL; |
734 | tipc_port_unlock(p_ptr); | ||
735 | return res; | ||
736 | } | 725 | } |
737 | 726 | ||
738 | int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | 727 | int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, |
728 | struct tipc_name_seq const *seq) | ||
739 | { | 729 | { |
740 | struct tipc_port *p_ptr; | ||
741 | struct publication *publ; | 730 | struct publication *publ; |
742 | struct publication *tpubl; | 731 | struct publication *tpubl; |
743 | int res = -EINVAL; | 732 | int res = -EINVAL; |
744 | 733 | ||
745 | p_ptr = tipc_port_lock(ref); | ||
746 | if (!p_ptr) | ||
747 | return -EINVAL; | ||
748 | if (!seq) { | 734 | if (!seq) { |
749 | list_for_each_entry_safe(publ, tpubl, | 735 | list_for_each_entry_safe(publ, tpubl, |
750 | &p_ptr->publications, pport_list) { | 736 | &p_ptr->publications, pport_list) { |
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | |||
771 | } | 757 | } |
772 | if (list_empty(&p_ptr->publications)) | 758 | if (list_empty(&p_ptr->publications)) |
773 | p_ptr->published = 0; | 759 | p_ptr->published = 0; |
774 | tipc_port_unlock(p_ptr); | ||
775 | return res; | 760 | return res; |
776 | } | 761 | } |
777 | 762 | ||
diff --git a/net/tipc/port.h b/net/tipc/port.h index 912253597343..34f12bd4074e 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h | |||
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err); | |||
116 | 116 | ||
117 | void tipc_acknowledge(u32 port_ref, u32 ack); | 117 | void tipc_acknowledge(u32 port_ref, u32 ack); |
118 | 118 | ||
119 | int tipc_deleteport(u32 portref); | 119 | int tipc_deleteport(struct tipc_port *p_ptr); |
120 | 120 | ||
121 | int tipc_portimportance(u32 portref, unsigned int *importance); | 121 | int tipc_portimportance(u32 portref, unsigned int *importance); |
122 | int tipc_set_portimportance(u32 portref, unsigned int importance); | 122 | int tipc_set_portimportance(u32 portref, unsigned int importance); |
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable); | |||
127 | int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); | 127 | int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); |
128 | int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); | 128 | int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); |
129 | 129 | ||
130 | int tipc_publish(u32 portref, unsigned int scope, | 130 | int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, |
131 | struct tipc_name_seq const *name_seq); | 131 | struct tipc_name_seq const *name_seq); |
132 | int tipc_withdraw(u32 portref, unsigned int scope, | 132 | int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, |
133 | struct tipc_name_seq const *name_seq); | 133 | struct tipc_name_seq const *name_seq); |
134 | 134 | ||
135 | int tipc_connect(u32 portref, struct tipc_portid const *port); | 135 | int tipc_connect(u32 portref, struct tipc_portid const *port); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3b61851bb927..e741416d1d24 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -354,7 +354,7 @@ static int release(struct socket *sock) | |||
354 | * Delete TIPC port; this ensures no more messages are queued | 354 | * Delete TIPC port; this ensures no more messages are queued |
355 | * (also disconnects an active connection & sends a 'FIN-' to peer) | 355 | * (also disconnects an active connection & sends a 'FIN-' to peer) |
356 | */ | 356 | */ |
357 | res = tipc_deleteport(tport->ref); | 357 | res = tipc_deleteport(tport); |
358 | 358 | ||
359 | /* Discard any remaining (connection-based) messages in receive queue */ | 359 | /* Discard any remaining (connection-based) messages in receive queue */ |
360 | __skb_queue_purge(&sk->sk_receive_queue); | 360 | __skb_queue_purge(&sk->sk_receive_queue); |
@@ -386,30 +386,46 @@ static int release(struct socket *sock) | |||
386 | */ | 386 | */ |
387 | static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) | 387 | static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) |
388 | { | 388 | { |
389 | struct sock *sk = sock->sk; | ||
389 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 390 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
390 | u32 portref = tipc_sk_port(sock->sk)->ref; | 391 | struct tipc_port *tport = tipc_sk_port(sock->sk); |
392 | int res = -EINVAL; | ||
391 | 393 | ||
392 | if (unlikely(!uaddr_len)) | 394 | lock_sock(sk); |
393 | return tipc_withdraw(portref, 0, NULL); | 395 | if (unlikely(!uaddr_len)) { |
396 | res = tipc_withdraw(tport, 0, NULL); | ||
397 | goto exit; | ||
398 | } | ||
394 | 399 | ||
395 | if (uaddr_len < sizeof(struct sockaddr_tipc)) | 400 | if (uaddr_len < sizeof(struct sockaddr_tipc)) { |
396 | return -EINVAL; | 401 | res = -EINVAL; |
397 | if (addr->family != AF_TIPC) | 402 | goto exit; |
398 | return -EAFNOSUPPORT; | 403 | } |
404 | if (addr->family != AF_TIPC) { | ||
405 | res = -EAFNOSUPPORT; | ||
406 | goto exit; | ||
407 | } | ||
399 | 408 | ||
400 | if (addr->addrtype == TIPC_ADDR_NAME) | 409 | if (addr->addrtype == TIPC_ADDR_NAME) |
401 | addr->addr.nameseq.upper = addr->addr.nameseq.lower; | 410 | addr->addr.nameseq.upper = addr->addr.nameseq.lower; |
402 | else if (addr->addrtype != TIPC_ADDR_NAMESEQ) | 411 | else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { |
403 | return -EAFNOSUPPORT; | 412 | res = -EAFNOSUPPORT; |
413 | goto exit; | ||
414 | } | ||
404 | 415 | ||
405 | if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && | 416 | if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && |
406 | (addr->addr.nameseq.type != TIPC_TOP_SRV) && | 417 | (addr->addr.nameseq.type != TIPC_TOP_SRV) && |
407 | (addr->addr.nameseq.type != TIPC_CFG_SRV)) | 418 | (addr->addr.nameseq.type != TIPC_CFG_SRV)) { |
408 | return -EACCES; | 419 | res = -EACCES; |
420 | goto exit; | ||
421 | } | ||
409 | 422 | ||
410 | return (addr->scope > 0) ? | 423 | res = (addr->scope > 0) ? |
411 | tipc_publish(portref, addr->scope, &addr->addr.nameseq) : | 424 | tipc_publish(tport, addr->scope, &addr->addr.nameseq) : |
412 | tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); | 425 | tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq); |
426 | exit: | ||
427 | release_sock(sk); | ||
428 | return res; | ||
413 | } | 429 | } |
414 | 430 | ||
415 | /** | 431 | /** |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 01625ccc3ae6..a427623ee574 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, | |||
530 | static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, | 530 | static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, |
531 | struct msghdr *, size_t, int); | 531 | struct msghdr *, size_t, int); |
532 | 532 | ||
533 | static void unix_set_peek_off(struct sock *sk, int val) | 533 | static int unix_set_peek_off(struct sock *sk, int val) |
534 | { | 534 | { |
535 | struct unix_sock *u = unix_sk(sk); | 535 | struct unix_sock *u = unix_sk(sk); |
536 | 536 | ||
537 | mutex_lock(&u->readlock); | 537 | if (mutex_lock_interruptible(&u->readlock)) |
538 | return -EINTR; | ||
539 | |||
538 | sk->sk_peek_off = val; | 540 | sk->sk_peek_off = val; |
539 | mutex_unlock(&u->readlock); | 541 | mutex_unlock(&u->readlock); |
542 | |||
543 | return 0; | ||
540 | } | 544 | } |
541 | 545 | ||
542 | 546 | ||
@@ -714,7 +718,9 @@ static int unix_autobind(struct socket *sock) | |||
714 | int err; | 718 | int err; |
715 | unsigned int retries = 0; | 719 | unsigned int retries = 0; |
716 | 720 | ||
717 | mutex_lock(&u->readlock); | 721 | err = mutex_lock_interruptible(&u->readlock); |
722 | if (err) | ||
723 | return err; | ||
718 | 724 | ||
719 | err = 0; | 725 | err = 0; |
720 | if (u->addr) | 726 | if (u->addr) |
@@ -873,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
873 | goto out; | 879 | goto out; |
874 | addr_len = err; | 880 | addr_len = err; |
875 | 881 | ||
876 | mutex_lock(&u->readlock); | 882 | err = mutex_lock_interruptible(&u->readlock); |
883 | if (err) | ||
884 | goto out; | ||
877 | 885 | ||
878 | err = -EINVAL; | 886 | err = -EINVAL; |
879 | if (u->addr) | 887 | if (u->addr) |
diff --git a/net/wireless/core.c b/net/wireless/core.c index aff959e5a1b3..52b865fb7351 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -451,6 +451,15 @@ int wiphy_register(struct wiphy *wiphy) | |||
451 | int i; | 451 | int i; |
452 | u16 ifmodes = wiphy->interface_modes; | 452 | u16 ifmodes = wiphy->interface_modes; |
453 | 453 | ||
454 | /* support for 5/10 MHz is broken due to nl80211 API mess - disable */ | ||
455 | wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ; | ||
456 | |||
457 | /* | ||
458 | * There are major locking problems in nl80211/mac80211 for CSA, | ||
459 | * disable for all drivers until this has been reworked. | ||
460 | */ | ||
461 | wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH; | ||
462 | |||
454 | #ifdef CONFIG_PM | 463 | #ifdef CONFIG_PM |
455 | if (WARN_ON(wiphy->wowlan && | 464 | if (WARN_ON(wiphy->wowlan && |
456 | (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && | 465 | (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 9d797df56649..89737ee2669a 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -262,7 +262,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | |||
262 | 262 | ||
263 | /* try to find an IBSS channel if none requested ... */ | 263 | /* try to find an IBSS channel if none requested ... */ |
264 | if (!wdev->wext.ibss.chandef.chan) { | 264 | if (!wdev->wext.ibss.chandef.chan) { |
265 | wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; | 265 | struct ieee80211_channel *new_chan = NULL; |
266 | 266 | ||
267 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 267 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
268 | struct ieee80211_supported_band *sband; | 268 | struct ieee80211_supported_band *sband; |
@@ -278,18 +278,19 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | |||
278 | continue; | 278 | continue; |
279 | if (chan->flags & IEEE80211_CHAN_DISABLED) | 279 | if (chan->flags & IEEE80211_CHAN_DISABLED) |
280 | continue; | 280 | continue; |
281 | wdev->wext.ibss.chandef.chan = chan; | 281 | new_chan = chan; |
282 | wdev->wext.ibss.chandef.center_freq1 = | ||
283 | chan->center_freq; | ||
284 | break; | 282 | break; |
285 | } | 283 | } |
286 | 284 | ||
287 | if (wdev->wext.ibss.chandef.chan) | 285 | if (new_chan) |
288 | break; | 286 | break; |
289 | } | 287 | } |
290 | 288 | ||
291 | if (!wdev->wext.ibss.chandef.chan) | 289 | if (!new_chan) |
292 | return -EINVAL; | 290 | return -EINVAL; |
291 | |||
292 | cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan, | ||
293 | NL80211_CHAN_NO_HT); | ||
293 | } | 294 | } |
294 | 295 | ||
295 | /* don't join -- SSID is not there */ | 296 | /* don't join -- SSID is not there */ |
@@ -363,9 +364,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | |||
363 | return err; | 364 | return err; |
364 | 365 | ||
365 | if (chan) { | 366 | if (chan) { |
366 | wdev->wext.ibss.chandef.chan = chan; | 367 | cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan, |
367 | wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; | 368 | NL80211_CHAN_NO_HT); |
368 | wdev->wext.ibss.chandef.center_freq1 = freq; | ||
369 | wdev->wext.ibss.channel_fixed = true; | 369 | wdev->wext.ibss.channel_fixed = true; |
370 | } else { | 370 | } else { |
371 | /* cfg80211_ibss_wext_join will pick one if needed */ | 371 | /* cfg80211_ibss_wext_join will pick one if needed */ |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a1eb21073176..138dc3bb8b67 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -2687,7 +2687,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
2687 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 2687 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
2688 | NL80211_CMD_NEW_KEY); | 2688 | NL80211_CMD_NEW_KEY); |
2689 | if (!hdr) | 2689 | if (!hdr) |
2690 | return -ENOBUFS; | 2690 | goto nla_put_failure; |
2691 | 2691 | ||
2692 | cookie.msg = msg; | 2692 | cookie.msg = msg; |
2693 | cookie.idx = key_idx; | 2693 | cookie.idx = key_idx; |
@@ -5349,6 +5349,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
5349 | err = -EINVAL; | 5349 | err = -EINVAL; |
5350 | goto out_free; | 5350 | goto out_free; |
5351 | } | 5351 | } |
5352 | |||
5353 | if (!wiphy->bands[band]) | ||
5354 | continue; | ||
5355 | |||
5352 | err = ieee80211_get_ratemask(wiphy->bands[band], | 5356 | err = ieee80211_get_ratemask(wiphy->bands[band], |
5353 | nla_data(attr), | 5357 | nla_data(attr), |
5354 | nla_len(attr), | 5358 | nla_len(attr), |
@@ -9633,8 +9637,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
9633 | nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) | 9637 | nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) |
9634 | goto nla_put_failure; | 9638 | goto nla_put_failure; |
9635 | 9639 | ||
9636 | if (req->flags) | 9640 | if (req->flags && |
9637 | nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags); | 9641 | nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) |
9642 | goto nla_put_failure; | ||
9638 | 9643 | ||
9639 | return 0; | 9644 | return 0; |
9640 | nla_put_failure: | 9645 | nla_put_failure: |
@@ -11093,6 +11098,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, | |||
11093 | struct nlattr *reasons; | 11098 | struct nlattr *reasons; |
11094 | 11099 | ||
11095 | reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); | 11100 | reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); |
11101 | if (!reasons) | ||
11102 | goto free_msg; | ||
11096 | 11103 | ||
11097 | if (wakeup->disconnect && | 11104 | if (wakeup->disconnect && |
11098 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) | 11105 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) |
@@ -11118,16 +11125,18 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, | |||
11118 | wakeup->pattern_idx)) | 11125 | wakeup->pattern_idx)) |
11119 | goto free_msg; | 11126 | goto free_msg; |
11120 | 11127 | ||
11121 | if (wakeup->tcp_match) | 11128 | if (wakeup->tcp_match && |
11122 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH); | 11129 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH)) |
11130 | goto free_msg; | ||
11123 | 11131 | ||
11124 | if (wakeup->tcp_connlost) | 11132 | if (wakeup->tcp_connlost && |
11125 | nla_put_flag(msg, | 11133 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST)) |
11126 | NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST); | 11134 | goto free_msg; |
11127 | 11135 | ||
11128 | if (wakeup->tcp_nomoretokens) | 11136 | if (wakeup->tcp_nomoretokens && |
11129 | nla_put_flag(msg, | 11137 | nla_put_flag(msg, |
11130 | NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS); | 11138 | NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS)) |
11139 | goto free_msg; | ||
11131 | 11140 | ||
11132 | if (wakeup->packet) { | 11141 | if (wakeup->packet) { |
11133 | u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; | 11142 | u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; |
@@ -11263,24 +11272,29 @@ void cfg80211_ft_event(struct net_device *netdev, | |||
11263 | return; | 11272 | return; |
11264 | 11273 | ||
11265 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); | 11274 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); |
11266 | if (!hdr) { | 11275 | if (!hdr) |
11267 | nlmsg_free(msg); | 11276 | goto out; |
11268 | return; | ||
11269 | } | ||
11270 | 11277 | ||
11271 | nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | 11278 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || |
11272 | nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | 11279 | nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || |
11273 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap); | 11280 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap)) |
11274 | if (ft_event->ies) | 11281 | goto out; |
11275 | nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies); | 11282 | |
11276 | if (ft_event->ric_ies) | 11283 | if (ft_event->ies && |
11277 | nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, | 11284 | nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies)) |
11278 | ft_event->ric_ies); | 11285 | goto out; |
11286 | if (ft_event->ric_ies && | ||
11287 | nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, | ||
11288 | ft_event->ric_ies)) | ||
11289 | goto out; | ||
11279 | 11290 | ||
11280 | genlmsg_end(msg, hdr); | 11291 | genlmsg_end(msg, hdr); |
11281 | 11292 | ||
11282 | genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, | 11293 | genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, |
11283 | NL80211_MCGRP_MLME, GFP_KERNEL); | 11294 | NL80211_MCGRP_MLME, GFP_KERNEL); |
11295 | return; | ||
11296 | out: | ||
11297 | nlmsg_free(msg); | ||
11284 | } | 11298 | } |
11285 | EXPORT_SYMBOL(cfg80211_ft_event); | 11299 | EXPORT_SYMBOL(cfg80211_ft_event); |
11286 | 11300 | ||
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index a271c27fac77..722da616438c 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c | |||
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init( | |||
124 | /* find payload start allowing for extended bitmap(s) */ | 124 | /* find payload start allowing for extended bitmap(s) */ |
125 | 125 | ||
126 | if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { | 126 | if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { |
127 | if ((unsigned long)iterator->_arg - | ||
128 | (unsigned long)iterator->_rtheader + sizeof(uint32_t) > | ||
129 | (unsigned long)iterator->_max_length) | ||
130 | return -EINVAL; | ||
127 | while (get_unaligned_le32(iterator->_arg) & | 131 | while (get_unaligned_le32(iterator->_arg) & |
128 | (1 << IEEE80211_RADIOTAP_EXT)) { | 132 | (1 << IEEE80211_RADIOTAP_EXT)) { |
129 | iterator->_arg += sizeof(uint32_t); | 133 | iterator->_arg += sizeof(uint32_t); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 65f800890d70..d3c5bd7c6b51 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
632 | } | 632 | } |
633 | #endif | 633 | #endif |
634 | 634 | ||
635 | if (!bss && (status == WLAN_STATUS_SUCCESS)) { | ||
636 | WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); | ||
637 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
638 | wdev->ssid, wdev->ssid_len, | ||
639 | WLAN_CAPABILITY_ESS, | ||
640 | WLAN_CAPABILITY_ESS); | ||
641 | if (bss) | ||
642 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
643 | } | ||
644 | |||
635 | if (wdev->current_bss) { | 645 | if (wdev->current_bss) { |
636 | cfg80211_unhold_bss(wdev->current_bss); | 646 | cfg80211_unhold_bss(wdev->current_bss); |
637 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); | 647 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
649 | return; | 659 | return; |
650 | } | 660 | } |
651 | 661 | ||
652 | if (!bss) { | 662 | if (WARN_ON(!bss)) |
653 | WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); | 663 | return; |
654 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
655 | wdev->ssid, wdev->ssid_len, | ||
656 | WLAN_CAPABILITY_ESS, | ||
657 | WLAN_CAPABILITY_ESS); | ||
658 | if (WARN_ON(!bss)) | ||
659 | return; | ||
660 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
661 | } | ||
662 | 664 | ||
663 | wdev->current_bss = bss_from_pub(bss); | 665 | wdev->current_bss = bss_from_pub(bss); |
664 | 666 | ||
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 32b10f53d0b4..2dcb37736d84 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh | |||
@@ -82,7 +82,9 @@ kallsyms() | |||
82 | kallsymopt="${kallsymopt} --all-symbols" | 82 | kallsymopt="${kallsymopt} --all-symbols" |
83 | fi | 83 | fi |
84 | 84 | ||
85 | kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" | 85 | if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then |
86 | kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" | ||
87 | fi | ||
86 | 88 | ||
87 | local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ | 89 | local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ |
88 | ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" | 90 | ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" |
diff --git a/scripts/sortextable.c b/scripts/sortextable.c index 5f7a8b663cb9..7941fbdfb050 100644 --- a/scripts/sortextable.c +++ b/scripts/sortextable.c | |||
@@ -31,6 +31,10 @@ | |||
31 | #include <tools/be_byteshift.h> | 31 | #include <tools/be_byteshift.h> |
32 | #include <tools/le_byteshift.h> | 32 | #include <tools/le_byteshift.h> |
33 | 33 | ||
34 | #ifndef EM_ARCOMPACT | ||
35 | #define EM_ARCOMPACT 93 | ||
36 | #endif | ||
37 | |||
34 | #ifndef EM_AARCH64 | 38 | #ifndef EM_AARCH64 |
35 | #define EM_AARCH64 183 | 39 | #define EM_AARCH64 183 |
36 | #endif | 40 | #endif |
@@ -268,6 +272,7 @@ do_file(char const *const fname) | |||
268 | case EM_S390: | 272 | case EM_S390: |
269 | custom_sort = sort_relative_table; | 273 | custom_sort = sort_relative_table; |
270 | break; | 274 | break; |
275 | case EM_ARCOMPACT: | ||
271 | case EM_ARM: | 276 | case EM_ARM: |
272 | case EM_AARCH64: | 277 | case EM_AARCH64: |
273 | case EM_MIPS: | 278 | case EM_MIPS: |
diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 7f44c3207a9b..8137b27d641d 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c | |||
@@ -70,7 +70,7 @@ int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep) | |||
70 | * | 70 | * |
71 | * TODO: Encrypt the stored data with a temporary key. | 71 | * TODO: Encrypt the stored data with a temporary key. |
72 | */ | 72 | */ |
73 | file = shmem_file_setup("", datalen, 0); | 73 | file = shmem_kernel_file_setup("", datalen, 0); |
74 | if (IS_ERR(file)) { | 74 | if (IS_ERR(file)) { |
75 | ret = PTR_ERR(file); | 75 | ret = PTR_ERR(file); |
76 | goto err_quota; | 76 | goto err_quota; |
diff --git a/security/keys/key.c b/security/keys/key.c index 55d110f0aced..6e21c11e48bc 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -272,7 +272,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | /* allocate and initialise the key and its description */ | 274 | /* allocate and initialise the key and its description */ |
275 | key = kmem_cache_alloc(key_jar, GFP_KERNEL); | 275 | key = kmem_cache_zalloc(key_jar, GFP_KERNEL); |
276 | if (!key) | 276 | if (!key) |
277 | goto no_memory_2; | 277 | goto no_memory_2; |
278 | 278 | ||
@@ -293,18 +293,12 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
293 | key->uid = uid; | 293 | key->uid = uid; |
294 | key->gid = gid; | 294 | key->gid = gid; |
295 | key->perm = perm; | 295 | key->perm = perm; |
296 | key->flags = 0; | ||
297 | key->expiry = 0; | ||
298 | key->payload.data = NULL; | ||
299 | key->security = NULL; | ||
300 | 296 | ||
301 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) | 297 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) |
302 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; | 298 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; |
303 | if (flags & KEY_ALLOC_TRUSTED) | 299 | if (flags & KEY_ALLOC_TRUSTED) |
304 | key->flags |= 1 << KEY_FLAG_TRUSTED; | 300 | key->flags |= 1 << KEY_FLAG_TRUSTED; |
305 | 301 | ||
306 | memset(&key->type_data, 0, sizeof(key->type_data)); | ||
307 | |||
308 | #ifdef KEY_DEBUGGING | 302 | #ifdef KEY_DEBUGGING |
309 | key->magic = KEY_DEBUG_MAGIC; | 303 | key->magic = KEY_DEBUG_MAGIC; |
310 | #endif | 304 | #endif |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 69f0cb7bab7e..d46cbc5e335e 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -160,7 +160,7 @@ static u64 mult_64x32_and_fold(u64 x, u32 y) | |||
160 | static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) | 160 | static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) |
161 | { | 161 | { |
162 | const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; | 162 | const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; |
163 | const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK; | 163 | const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; |
164 | const char *description = index_key->description; | 164 | const char *description = index_key->description; |
165 | unsigned long hash, type; | 165 | unsigned long hash, type; |
166 | u32 piece; | 166 | u32 piece; |
@@ -194,10 +194,10 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde | |||
194 | * ordinary keys by making sure the lowest level segment in the hash is | 194 | * ordinary keys by making sure the lowest level segment in the hash is |
195 | * zero for keyrings and non-zero otherwise. | 195 | * zero for keyrings and non-zero otherwise. |
196 | */ | 196 | */ |
197 | if (index_key->type != &key_type_keyring && (hash & level_mask) == 0) | 197 | if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) |
198 | return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; | 198 | return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; |
199 | if (index_key->type == &key_type_keyring && (hash & level_mask) != 0) | 199 | if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) |
200 | return (hash + (hash << level_shift)) & ~level_mask; | 200 | return (hash + (hash << level_shift)) & ~fan_mask; |
201 | return hash; | 201 | return hash; |
202 | } | 202 | } |
203 | 203 | ||
@@ -279,12 +279,11 @@ static bool keyring_compare_object(const void *object, const void *data) | |||
279 | * Compare the index keys of a pair of objects and determine the bit position | 279 | * Compare the index keys of a pair of objects and determine the bit position |
280 | * at which they differ - if they differ. | 280 | * at which they differ - if they differ. |
281 | */ | 281 | */ |
282 | static int keyring_diff_objects(const void *_a, const void *_b) | 282 | static int keyring_diff_objects(const void *object, const void *data) |
283 | { | 283 | { |
284 | const struct key *key_a = keyring_ptr_to_key(_a); | 284 | const struct key *key_a = keyring_ptr_to_key(object); |
285 | const struct key *key_b = keyring_ptr_to_key(_b); | ||
286 | const struct keyring_index_key *a = &key_a->index_key; | 285 | const struct keyring_index_key *a = &key_a->index_key; |
287 | const struct keyring_index_key *b = &key_b->index_key; | 286 | const struct keyring_index_key *b = data; |
288 | unsigned long seg_a, seg_b; | 287 | unsigned long seg_a, seg_b; |
289 | int level, i; | 288 | int level, i; |
290 | 289 | ||
@@ -691,8 +690,8 @@ descend_to_node: | |||
691 | smp_read_barrier_depends(); | 690 | smp_read_barrier_depends(); |
692 | ptr = ACCESS_ONCE(shortcut->next_node); | 691 | ptr = ACCESS_ONCE(shortcut->next_node); |
693 | BUG_ON(!assoc_array_ptr_is_node(ptr)); | 692 | BUG_ON(!assoc_array_ptr_is_node(ptr)); |
694 | node = assoc_array_ptr_to_node(ptr); | ||
695 | } | 693 | } |
694 | node = assoc_array_ptr_to_node(ptr); | ||
696 | 695 | ||
697 | begin_node: | 696 | begin_node: |
698 | kdebug("begin_node"); | 697 | kdebug("begin_node"); |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 794c3ca49eac..57b0b49f4e6e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <net/ip.h> /* for local_port_range[] */ | 53 | #include <net/ip.h> /* for local_port_range[] */ |
54 | #include <net/sock.h> | 54 | #include <net/sock.h> |
55 | #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ | 55 | #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ |
56 | #include <net/inet_connection_sock.h> | ||
56 | #include <net/net_namespace.h> | 57 | #include <net/net_namespace.h> |
57 | #include <net/netlabel.h> | 58 | #include <net/netlabel.h> |
58 | #include <linux/uaccess.h> | 59 | #include <linux/uaccess.h> |
@@ -95,10 +96,6 @@ | |||
95 | #include "audit.h" | 96 | #include "audit.h" |
96 | #include "avc_ss.h" | 97 | #include "avc_ss.h" |
97 | 98 | ||
98 | #define SB_TYPE_FMT "%s%s%s" | ||
99 | #define SB_SUBTYPE(sb) (sb->s_subtype && sb->s_subtype[0]) | ||
100 | #define SB_TYPE_ARGS(sb) sb->s_type->name, SB_SUBTYPE(sb) ? "." : "", SB_SUBTYPE(sb) ? sb->s_subtype : "" | ||
101 | |||
102 | extern struct security_operations *security_ops; | 99 | extern struct security_operations *security_ops; |
103 | 100 | ||
104 | /* SECMARK reference count */ | 101 | /* SECMARK reference count */ |
@@ -237,6 +234,14 @@ static int inode_alloc_security(struct inode *inode) | |||
237 | return 0; | 234 | return 0; |
238 | } | 235 | } |
239 | 236 | ||
237 | static void inode_free_rcu(struct rcu_head *head) | ||
238 | { | ||
239 | struct inode_security_struct *isec; | ||
240 | |||
241 | isec = container_of(head, struct inode_security_struct, rcu); | ||
242 | kmem_cache_free(sel_inode_cache, isec); | ||
243 | } | ||
244 | |||
240 | static void inode_free_security(struct inode *inode) | 245 | static void inode_free_security(struct inode *inode) |
241 | { | 246 | { |
242 | struct inode_security_struct *isec = inode->i_security; | 247 | struct inode_security_struct *isec = inode->i_security; |
@@ -247,8 +252,16 @@ static void inode_free_security(struct inode *inode) | |||
247 | list_del_init(&isec->list); | 252 | list_del_init(&isec->list); |
248 | spin_unlock(&sbsec->isec_lock); | 253 | spin_unlock(&sbsec->isec_lock); |
249 | 254 | ||
250 | inode->i_security = NULL; | 255 | /* |
251 | kmem_cache_free(sel_inode_cache, isec); | 256 | * The inode may still be referenced in a path walk and |
257 | * a call to selinux_inode_permission() can be made | ||
258 | * after inode_free_security() is called. Ideally, the VFS | ||
259 | * wouldn't do this, but fixing that is a much harder | ||
260 | * job. For now, simply free the i_security via RCU, and | ||
261 | * leave the current inode->i_security pointer intact. | ||
262 | * The inode will be freed after the RCU grace period too. | ||
263 | */ | ||
264 | call_rcu(&isec->rcu, inode_free_rcu); | ||
252 | } | 265 | } |
253 | 266 | ||
254 | static int file_alloc_security(struct file *file) | 267 | static int file_alloc_security(struct file *file) |
@@ -413,8 +426,8 @@ static int sb_finish_set_opts(struct super_block *sb) | |||
413 | the first boot of the SELinux kernel before we have | 426 | the first boot of the SELinux kernel before we have |
414 | assigned xattr values to the filesystem. */ | 427 | assigned xattr values to the filesystem. */ |
415 | if (!root_inode->i_op->getxattr) { | 428 | if (!root_inode->i_op->getxattr) { |
416 | printk(KERN_WARNING "SELinux: (dev %s, type "SB_TYPE_FMT") has no " | 429 | printk(KERN_WARNING "SELinux: (dev %s, type %s) has no " |
417 | "xattr support\n", sb->s_id, SB_TYPE_ARGS(sb)); | 430 | "xattr support\n", sb->s_id, sb->s_type->name); |
418 | rc = -EOPNOTSUPP; | 431 | rc = -EOPNOTSUPP; |
419 | goto out; | 432 | goto out; |
420 | } | 433 | } |
@@ -422,22 +435,22 @@ static int sb_finish_set_opts(struct super_block *sb) | |||
422 | if (rc < 0 && rc != -ENODATA) { | 435 | if (rc < 0 && rc != -ENODATA) { |
423 | if (rc == -EOPNOTSUPP) | 436 | if (rc == -EOPNOTSUPP) |
424 | printk(KERN_WARNING "SELinux: (dev %s, type " | 437 | printk(KERN_WARNING "SELinux: (dev %s, type " |
425 | SB_TYPE_FMT") has no security xattr handler\n", | 438 | "%s) has no security xattr handler\n", |
426 | sb->s_id, SB_TYPE_ARGS(sb)); | 439 | sb->s_id, sb->s_type->name); |
427 | else | 440 | else |
428 | printk(KERN_WARNING "SELinux: (dev %s, type " | 441 | printk(KERN_WARNING "SELinux: (dev %s, type " |
429 | SB_TYPE_FMT") getxattr errno %d\n", sb->s_id, | 442 | "%s) getxattr errno %d\n", sb->s_id, |
430 | SB_TYPE_ARGS(sb), -rc); | 443 | sb->s_type->name, -rc); |
431 | goto out; | 444 | goto out; |
432 | } | 445 | } |
433 | } | 446 | } |
434 | 447 | ||
435 | if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) | 448 | if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) |
436 | printk(KERN_ERR "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), unknown behavior\n", | 449 | printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", |
437 | sb->s_id, SB_TYPE_ARGS(sb)); | 450 | sb->s_id, sb->s_type->name); |
438 | else | 451 | else |
439 | printk(KERN_DEBUG "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), %s\n", | 452 | printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n", |
440 | sb->s_id, SB_TYPE_ARGS(sb), | 453 | sb->s_id, sb->s_type->name, |
441 | labeling_behaviors[sbsec->behavior-1]); | 454 | labeling_behaviors[sbsec->behavior-1]); |
442 | 455 | ||
443 | sbsec->flags |= SE_SBINITIALIZED; | 456 | sbsec->flags |= SE_SBINITIALIZED; |
@@ -600,6 +613,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, | |||
600 | const struct cred *cred = current_cred(); | 613 | const struct cred *cred = current_cred(); |
601 | int rc = 0, i; | 614 | int rc = 0, i; |
602 | struct superblock_security_struct *sbsec = sb->s_security; | 615 | struct superblock_security_struct *sbsec = sb->s_security; |
616 | const char *name = sb->s_type->name; | ||
603 | struct inode *inode = sbsec->sb->s_root->d_inode; | 617 | struct inode *inode = sbsec->sb->s_root->d_inode; |
604 | struct inode_security_struct *root_isec = inode->i_security; | 618 | struct inode_security_struct *root_isec = inode->i_security; |
605 | u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; | 619 | u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; |
@@ -658,8 +672,8 @@ static int selinux_set_mnt_opts(struct super_block *sb, | |||
658 | strlen(mount_options[i]), &sid); | 672 | strlen(mount_options[i]), &sid); |
659 | if (rc) { | 673 | if (rc) { |
660 | printk(KERN_WARNING "SELinux: security_context_to_sid" | 674 | printk(KERN_WARNING "SELinux: security_context_to_sid" |
661 | "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", | 675 | "(%s) failed for (dev %s, type %s) errno=%d\n", |
662 | mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); | 676 | mount_options[i], sb->s_id, name, rc); |
663 | goto out; | 677 | goto out; |
664 | } | 678 | } |
665 | switch (flags[i]) { | 679 | switch (flags[i]) { |
@@ -806,8 +820,7 @@ out: | |||
806 | out_double_mount: | 820 | out_double_mount: |
807 | rc = -EINVAL; | 821 | rc = -EINVAL; |
808 | printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different " | 822 | printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different " |
809 | "security settings for (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, | 823 | "security settings for (dev %s, type %s)\n", sb->s_id, name); |
810 | SB_TYPE_ARGS(sb)); | ||
811 | goto out; | 824 | goto out; |
812 | } | 825 | } |
813 | 826 | ||
@@ -2480,8 +2493,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data) | |||
2480 | rc = security_context_to_sid(mount_options[i], len, &sid); | 2493 | rc = security_context_to_sid(mount_options[i], len, &sid); |
2481 | if (rc) { | 2494 | if (rc) { |
2482 | printk(KERN_WARNING "SELinux: security_context_to_sid" | 2495 | printk(KERN_WARNING "SELinux: security_context_to_sid" |
2483 | "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", | 2496 | "(%s) failed for (dev %s, type %s) errno=%d\n", |
2484 | mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); | 2497 | mount_options[i], sb->s_id, sb->s_type->name, rc); |
2485 | goto out_free_opts; | 2498 | goto out_free_opts; |
2486 | } | 2499 | } |
2487 | rc = -EINVAL; | 2500 | rc = -EINVAL; |
@@ -2519,8 +2532,8 @@ out_free_secdata: | |||
2519 | return rc; | 2532 | return rc; |
2520 | out_bad_option: | 2533 | out_bad_option: |
2521 | printk(KERN_WARNING "SELinux: unable to change security options " | 2534 | printk(KERN_WARNING "SELinux: unable to change security options " |
2522 | "during remount (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, | 2535 | "during remount (dev %s, type=%s)\n", sb->s_id, |
2523 | SB_TYPE_ARGS(sb)); | 2536 | sb->s_type->name); |
2524 | goto out_free_opts; | 2537 | goto out_free_opts; |
2525 | } | 2538 | } |
2526 | 2539 | ||
@@ -3828,7 +3841,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) | |||
3828 | u32 nlbl_sid; | 3841 | u32 nlbl_sid; |
3829 | u32 nlbl_type; | 3842 | u32 nlbl_type; |
3830 | 3843 | ||
3831 | err = selinux_skb_xfrm_sid(skb, &xfrm_sid); | 3844 | err = selinux_xfrm_skb_sid(skb, &xfrm_sid); |
3832 | if (unlikely(err)) | 3845 | if (unlikely(err)) |
3833 | return -EACCES; | 3846 | return -EACCES; |
3834 | err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); | 3847 | err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); |
@@ -3846,6 +3859,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) | |||
3846 | return 0; | 3859 | return 0; |
3847 | } | 3860 | } |
3848 | 3861 | ||
3862 | /** | ||
3863 | * selinux_conn_sid - Determine the child socket label for a connection | ||
3864 | * @sk_sid: the parent socket's SID | ||
3865 | * @skb_sid: the packet's SID | ||
3866 | * @conn_sid: the resulting connection SID | ||
3867 | * | ||
3868 | * If @skb_sid is valid then the user:role:type information from @sk_sid is | ||
3869 | * combined with the MLS information from @skb_sid in order to create | ||
3870 | * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy | ||
3871 | * of @sk_sid. Returns zero on success, negative values on failure. | ||
3872 | * | ||
3873 | */ | ||
3874 | static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid) | ||
3875 | { | ||
3876 | int err = 0; | ||
3877 | |||
3878 | if (skb_sid != SECSID_NULL) | ||
3879 | err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid); | ||
3880 | else | ||
3881 | *conn_sid = sk_sid; | ||
3882 | |||
3883 | return err; | ||
3884 | } | ||
3885 | |||
3849 | /* socket security operations */ | 3886 | /* socket security operations */ |
3850 | 3887 | ||
3851 | static int socket_sockcreate_sid(const struct task_security_struct *tsec, | 3888 | static int socket_sockcreate_sid(const struct task_security_struct *tsec, |
@@ -4313,8 +4350,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
4313 | } | 4350 | } |
4314 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, | 4351 | err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, |
4315 | PEER__RECV, &ad); | 4352 | PEER__RECV, &ad); |
4316 | if (err) | 4353 | if (err) { |
4317 | selinux_netlbl_err(skb, err, 0); | 4354 | selinux_netlbl_err(skb, err, 0); |
4355 | return err; | ||
4356 | } | ||
4318 | } | 4357 | } |
4319 | 4358 | ||
4320 | if (secmark_active) { | 4359 | if (secmark_active) { |
@@ -4452,7 +4491,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb, | |||
4452 | struct sk_security_struct *sksec = sk->sk_security; | 4491 | struct sk_security_struct *sksec = sk->sk_security; |
4453 | int err; | 4492 | int err; |
4454 | u16 family = sk->sk_family; | 4493 | u16 family = sk->sk_family; |
4455 | u32 newsid; | 4494 | u32 connsid; |
4456 | u32 peersid; | 4495 | u32 peersid; |
4457 | 4496 | ||
4458 | /* handle mapped IPv4 packets arriving via IPv6 sockets */ | 4497 | /* handle mapped IPv4 packets arriving via IPv6 sockets */ |
@@ -4462,16 +4501,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb, | |||
4462 | err = selinux_skb_peerlbl_sid(skb, family, &peersid); | 4501 | err = selinux_skb_peerlbl_sid(skb, family, &peersid); |
4463 | if (err) | 4502 | if (err) |
4464 | return err; | 4503 | return err; |
4465 | if (peersid == SECSID_NULL) { | 4504 | err = selinux_conn_sid(sksec->sid, peersid, &connsid); |
4466 | req->secid = sksec->sid; | 4505 | if (err) |
4467 | req->peer_secid = SECSID_NULL; | 4506 | return err; |
4468 | } else { | 4507 | req->secid = connsid; |
4469 | err = security_sid_mls_copy(sksec->sid, peersid, &newsid); | 4508 | req->peer_secid = peersid; |
4470 | if (err) | ||
4471 | return err; | ||
4472 | req->secid = newsid; | ||
4473 | req->peer_secid = peersid; | ||
4474 | } | ||
4475 | 4509 | ||
4476 | return selinux_netlbl_inet_conn_request(req, family); | 4510 | return selinux_netlbl_inet_conn_request(req, family); |
4477 | } | 4511 | } |
@@ -4731,6 +4765,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops, | |||
4731 | static unsigned int selinux_ip_output(struct sk_buff *skb, | 4765 | static unsigned int selinux_ip_output(struct sk_buff *skb, |
4732 | u16 family) | 4766 | u16 family) |
4733 | { | 4767 | { |
4768 | struct sock *sk; | ||
4734 | u32 sid; | 4769 | u32 sid; |
4735 | 4770 | ||
4736 | if (!netlbl_enabled()) | 4771 | if (!netlbl_enabled()) |
@@ -4739,8 +4774,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb, | |||
4739 | /* we do this in the LOCAL_OUT path and not the POST_ROUTING path | 4774 | /* we do this in the LOCAL_OUT path and not the POST_ROUTING path |
4740 | * because we want to make sure we apply the necessary labeling | 4775 | * because we want to make sure we apply the necessary labeling |
4741 | * before IPsec is applied so we can leverage AH protection */ | 4776 | * before IPsec is applied so we can leverage AH protection */ |
4742 | if (skb->sk) { | 4777 | sk = skb->sk; |
4743 | struct sk_security_struct *sksec = skb->sk->sk_security; | 4778 | if (sk) { |
4779 | struct sk_security_struct *sksec; | ||
4780 | |||
4781 | if (sk->sk_state == TCP_LISTEN) | ||
4782 | /* if the socket is the listening state then this | ||
4783 | * packet is a SYN-ACK packet which means it needs to | ||
4784 | * be labeled based on the connection/request_sock and | ||
4785 | * not the parent socket. unfortunately, we can't | ||
4786 | * lookup the request_sock yet as it isn't queued on | ||
4787 | * the parent socket until after the SYN-ACK is sent. | ||
4788 | * the "solution" is to simply pass the packet as-is | ||
4789 | * as any IP option based labeling should be copied | ||
4790 | * from the initial connection request (in the IP | ||
4791 | * layer). it is far from ideal, but until we get a | ||
4792 | * security label in the packet itself this is the | ||
4793 | * best we can do. */ | ||
4794 | return NF_ACCEPT; | ||
4795 | |||
4796 | /* standard practice, label using the parent socket */ | ||
4797 | sksec = sk->sk_security; | ||
4744 | sid = sksec->sid; | 4798 | sid = sksec->sid; |
4745 | } else | 4799 | } else |
4746 | sid = SECINITSID_KERNEL; | 4800 | sid = SECINITSID_KERNEL; |
@@ -4810,27 +4864,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, | |||
4810 | * as fast and as clean as possible. */ | 4864 | * as fast and as clean as possible. */ |
4811 | if (!selinux_policycap_netpeer) | 4865 | if (!selinux_policycap_netpeer) |
4812 | return selinux_ip_postroute_compat(skb, ifindex, family); | 4866 | return selinux_ip_postroute_compat(skb, ifindex, family); |
4867 | |||
4868 | secmark_active = selinux_secmark_enabled(); | ||
4869 | peerlbl_active = selinux_peerlbl_enabled(); | ||
4870 | if (!secmark_active && !peerlbl_active) | ||
4871 | return NF_ACCEPT; | ||
4872 | |||
4873 | sk = skb->sk; | ||
4874 | |||
4813 | #ifdef CONFIG_XFRM | 4875 | #ifdef CONFIG_XFRM |
4814 | /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec | 4876 | /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec |
4815 | * packet transformation so allow the packet to pass without any checks | 4877 | * packet transformation so allow the packet to pass without any checks |
4816 | * since we'll have another chance to perform access control checks | 4878 | * since we'll have another chance to perform access control checks |
4817 | * when the packet is on it's final way out. | 4879 | * when the packet is on it's final way out. |
4818 | * NOTE: there appear to be some IPv6 multicast cases where skb->dst | 4880 | * NOTE: there appear to be some IPv6 multicast cases where skb->dst |
4819 | * is NULL, in this case go ahead and apply access control. */ | 4881 | * is NULL, in this case go ahead and apply access control. |
4820 | if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL) | 4882 | * NOTE: if this is a local socket (skb->sk != NULL) that is in the |
4883 | * TCP listening state we cannot wait until the XFRM processing | ||
4884 | * is done as we will miss out on the SA label if we do; | ||
4885 | * unfortunately, this means more work, but it is only once per | ||
4886 | * connection. */ | ||
4887 | if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL && | ||
4888 | !(sk != NULL && sk->sk_state == TCP_LISTEN)) | ||
4821 | return NF_ACCEPT; | 4889 | return NF_ACCEPT; |
4822 | #endif | 4890 | #endif |
4823 | secmark_active = selinux_secmark_enabled(); | ||
4824 | peerlbl_active = selinux_peerlbl_enabled(); | ||
4825 | if (!secmark_active && !peerlbl_active) | ||
4826 | return NF_ACCEPT; | ||
4827 | 4891 | ||
4828 | /* if the packet is being forwarded then get the peer label from the | ||
4829 | * packet itself; otherwise check to see if it is from a local | ||
4830 | * application or the kernel, if from an application get the peer label | ||
4831 | * from the sending socket, otherwise use the kernel's sid */ | ||
4832 | sk = skb->sk; | ||
4833 | if (sk == NULL) { | 4892 | if (sk == NULL) { |
4893 | /* Without an associated socket the packet is either coming | ||
4894 | * from the kernel or it is being forwarded; check the packet | ||
4895 | * to determine which and if the packet is being forwarded | ||
4896 | * query the packet directly to determine the security label. */ | ||
4834 | if (skb->skb_iif) { | 4897 | if (skb->skb_iif) { |
4835 | secmark_perm = PACKET__FORWARD_OUT; | 4898 | secmark_perm = PACKET__FORWARD_OUT; |
4836 | if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) | 4899 | if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) |
@@ -4839,7 +4902,45 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, | |||
4839 | secmark_perm = PACKET__SEND; | 4902 | secmark_perm = PACKET__SEND; |
4840 | peer_sid = SECINITSID_KERNEL; | 4903 | peer_sid = SECINITSID_KERNEL; |
4841 | } | 4904 | } |
4905 | } else if (sk->sk_state == TCP_LISTEN) { | ||
4906 | /* Locally generated packet but the associated socket is in the | ||
4907 | * listening state which means this is a SYN-ACK packet. In | ||
4908 | * this particular case the correct security label is assigned | ||
4909 | * to the connection/request_sock but unfortunately we can't | ||
4910 | * query the request_sock as it isn't queued on the parent | ||
4911 | * socket until after the SYN-ACK packet is sent; the only | ||
4912 | * viable choice is to regenerate the label like we do in | ||
4913 | * selinux_inet_conn_request(). See also selinux_ip_output() | ||
4914 | * for similar problems. */ | ||
4915 | u32 skb_sid; | ||
4916 | struct sk_security_struct *sksec = sk->sk_security; | ||
4917 | if (selinux_skb_peerlbl_sid(skb, family, &skb_sid)) | ||
4918 | return NF_DROP; | ||
4919 | /* At this point, if the returned skb peerlbl is SECSID_NULL | ||
4920 | * and the packet has been through at least one XFRM | ||
4921 | * transformation then we must be dealing with the "final" | ||
4922 | * form of labeled IPsec packet; since we've already applied | ||
4923 | * all of our access controls on this packet we can safely | ||
4924 | * pass the packet. */ | ||
4925 | if (skb_sid == SECSID_NULL) { | ||
4926 | switch (family) { | ||
4927 | case PF_INET: | ||
4928 | if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) | ||
4929 | return NF_ACCEPT; | ||
4930 | break; | ||
4931 | case PF_INET6: | ||
4932 | if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) | ||
4933 | return NF_ACCEPT; | ||
4934 | default: | ||
4935 | return NF_DROP_ERR(-ECONNREFUSED); | ||
4936 | } | ||
4937 | } | ||
4938 | if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid)) | ||
4939 | return NF_DROP; | ||
4940 | secmark_perm = PACKET__SEND; | ||
4842 | } else { | 4941 | } else { |
4942 | /* Locally generated packet, fetch the security label from the | ||
4943 | * associated socket. */ | ||
4843 | struct sk_security_struct *sksec = sk->sk_security; | 4944 | struct sk_security_struct *sksec = sk->sk_security; |
4844 | peer_sid = sksec->sid; | 4945 | peer_sid = sksec->sid; |
4845 | secmark_perm = PACKET__SEND; | 4946 | secmark_perm = PACKET__SEND; |
@@ -5503,11 +5604,11 @@ static int selinux_setprocattr(struct task_struct *p, | |||
5503 | /* Check for ptracing, and update the task SID if ok. | 5604 | /* Check for ptracing, and update the task SID if ok. |
5504 | Otherwise, leave SID unchanged and fail. */ | 5605 | Otherwise, leave SID unchanged and fail. */ |
5505 | ptsid = 0; | 5606 | ptsid = 0; |
5506 | task_lock(p); | 5607 | rcu_read_lock(); |
5507 | tracer = ptrace_parent(p); | 5608 | tracer = ptrace_parent(p); |
5508 | if (tracer) | 5609 | if (tracer) |
5509 | ptsid = task_sid(tracer); | 5610 | ptsid = task_sid(tracer); |
5510 | task_unlock(p); | 5611 | rcu_read_unlock(); |
5511 | 5612 | ||
5512 | if (tracer) { | 5613 | if (tracer) { |
5513 | error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, | 5614 | error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, |
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index b1dfe1049450..078e553f52f2 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h | |||
@@ -38,7 +38,10 @@ struct task_security_struct { | |||
38 | 38 | ||
39 | struct inode_security_struct { | 39 | struct inode_security_struct { |
40 | struct inode *inode; /* back pointer to inode object */ | 40 | struct inode *inode; /* back pointer to inode object */ |
41 | struct list_head list; /* list of inode_security_struct */ | 41 | union { |
42 | struct list_head list; /* list of inode_security_struct */ | ||
43 | struct rcu_head rcu; /* for freeing the inode_security_struct */ | ||
44 | }; | ||
42 | u32 task_sid; /* SID of creating task */ | 45 | u32 task_sid; /* SID of creating task */ |
43 | u32 sid; /* SID of this object */ | 46 | u32 sid; /* SID of this object */ |
44 | u16 sclass; /* security class of this object */ | 47 | u16 sclass; /* security class of this object */ |
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 0dec76c64cf5..48c3cc94c168 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h | |||
@@ -39,6 +39,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, | |||
39 | int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, | 39 | int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, |
40 | struct common_audit_data *ad, u8 proto); | 40 | struct common_audit_data *ad, u8 proto); |
41 | int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); | 41 | int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); |
42 | int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid); | ||
42 | 43 | ||
43 | static inline void selinux_xfrm_notify_policyload(void) | 44 | static inline void selinux_xfrm_notify_policyload(void) |
44 | { | 45 | { |
@@ -79,11 +80,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, | |||
79 | static inline void selinux_xfrm_notify_policyload(void) | 80 | static inline void selinux_xfrm_notify_policyload(void) |
80 | { | 81 | { |
81 | } | 82 | } |
82 | #endif | ||
83 | 83 | ||
84 | static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) | 84 | static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) |
85 | { | 85 | { |
86 | return selinux_xfrm_decode_session(skb, sid, 0); | 86 | *sid = SECSID_NULL; |
87 | return 0; | ||
87 | } | 88 | } |
89 | #endif | ||
88 | 90 | ||
89 | #endif /* _SELINUX_XFRM_H_ */ | 91 | #endif /* _SELINUX_XFRM_H_ */ |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index ee470a0b5c27..d106733ad987 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -2334,50 +2334,16 @@ int security_fs_use(struct super_block *sb) | |||
2334 | struct ocontext *c; | 2334 | struct ocontext *c; |
2335 | struct superblock_security_struct *sbsec = sb->s_security; | 2335 | struct superblock_security_struct *sbsec = sb->s_security; |
2336 | const char *fstype = sb->s_type->name; | 2336 | const char *fstype = sb->s_type->name; |
2337 | const char *subtype = (sb->s_subtype && sb->s_subtype[0]) ? sb->s_subtype : NULL; | ||
2338 | struct ocontext *base = NULL; | ||
2339 | 2337 | ||
2340 | read_lock(&policy_rwlock); | 2338 | read_lock(&policy_rwlock); |
2341 | 2339 | ||
2342 | for (c = policydb.ocontexts[OCON_FSUSE]; c; c = c->next) { | 2340 | c = policydb.ocontexts[OCON_FSUSE]; |
2343 | char *sub; | 2341 | while (c) { |
2344 | int baselen; | 2342 | if (strcmp(fstype, c->u.name) == 0) |
2345 | |||
2346 | baselen = strlen(fstype); | ||
2347 | |||
2348 | /* if base does not match, this is not the one */ | ||
2349 | if (strncmp(fstype, c->u.name, baselen)) | ||
2350 | continue; | ||
2351 | |||
2352 | /* if there is no subtype, this is the one! */ | ||
2353 | if (!subtype) | ||
2354 | break; | ||
2355 | |||
2356 | /* skip past the base in this entry */ | ||
2357 | sub = c->u.name + baselen; | ||
2358 | |||
2359 | /* entry is only a base. save it. keep looking for subtype */ | ||
2360 | if (sub[0] == '\0') { | ||
2361 | base = c; | ||
2362 | continue; | ||
2363 | } | ||
2364 | |||
2365 | /* entry is not followed by a subtype, so it is not a match */ | ||
2366 | if (sub[0] != '.') | ||
2367 | continue; | ||
2368 | |||
2369 | /* whew, we found a subtype of this fstype */ | ||
2370 | sub++; /* move past '.' */ | ||
2371 | |||
2372 | /* exact match of fstype AND subtype */ | ||
2373 | if (!strcmp(subtype, sub)) | ||
2374 | break; | 2343 | break; |
2344 | c = c->next; | ||
2375 | } | 2345 | } |
2376 | 2346 | ||
2377 | /* in case we had found an fstype match but no subtype match */ | ||
2378 | if (!c) | ||
2379 | c = base; | ||
2380 | |||
2381 | if (c) { | 2347 | if (c) { |
2382 | sbsec->behavior = c->v.behavior; | 2348 | sbsec->behavior = c->v.behavior; |
2383 | if (!c->sid[0]) { | 2349 | if (!c->sid[0]) { |
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index a91d205ec0c6..0462cb3ff0a7 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c | |||
@@ -209,19 +209,26 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, | |||
209 | NULL) ? 0 : 1); | 209 | NULL) ? 0 : 1); |
210 | } | 210 | } |
211 | 211 | ||
212 | /* | 212 | static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb) |
213 | * LSM hook implementation that checks and/or returns the xfrm sid for the | ||
214 | * incoming packet. | ||
215 | */ | ||
216 | int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) | ||
217 | { | 213 | { |
218 | u32 sid_session = SECSID_NULL; | 214 | struct dst_entry *dst = skb_dst(skb); |
219 | struct sec_path *sp; | 215 | struct xfrm_state *x; |
220 | 216 | ||
221 | if (skb == NULL) | 217 | if (dst == NULL) |
222 | goto out; | 218 | return SECSID_NULL; |
219 | x = dst->xfrm; | ||
220 | if (x == NULL || !selinux_authorizable_xfrm(x)) | ||
221 | return SECSID_NULL; | ||
222 | |||
223 | return x->security->ctx_sid; | ||
224 | } | ||
225 | |||
226 | static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb, | ||
227 | u32 *sid, int ckall) | ||
228 | { | ||
229 | u32 sid_session = SECSID_NULL; | ||
230 | struct sec_path *sp = skb->sp; | ||
223 | 231 | ||
224 | sp = skb->sp; | ||
225 | if (sp) { | 232 | if (sp) { |
226 | int i; | 233 | int i; |
227 | 234 | ||
@@ -248,6 +255,30 @@ out: | |||
248 | } | 255 | } |
249 | 256 | ||
250 | /* | 257 | /* |
258 | * LSM hook implementation that checks and/or returns the xfrm sid for the | ||
259 | * incoming packet. | ||
260 | */ | ||
261 | int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) | ||
262 | { | ||
263 | if (skb == NULL) { | ||
264 | *sid = SECSID_NULL; | ||
265 | return 0; | ||
266 | } | ||
267 | return selinux_xfrm_skb_sid_ingress(skb, sid, ckall); | ||
268 | } | ||
269 | |||
270 | int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) | ||
271 | { | ||
272 | int rc; | ||
273 | |||
274 | rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0); | ||
275 | if (rc == 0 && *sid == SECSID_NULL) | ||
276 | *sid = selinux_xfrm_skb_sid_egress(skb); | ||
277 | |||
278 | return rc; | ||
279 | } | ||
280 | |||
281 | /* | ||
251 | * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. | 282 | * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. |
252 | */ | 283 | */ |
253 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, | 284 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
@@ -327,19 +358,22 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, | |||
327 | return rc; | 358 | return rc; |
328 | 359 | ||
329 | ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); | 360 | ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); |
330 | if (!ctx) | 361 | if (!ctx) { |
331 | return -ENOMEM; | 362 | rc = -ENOMEM; |
363 | goto out; | ||
364 | } | ||
332 | 365 | ||
333 | ctx->ctx_doi = XFRM_SC_DOI_LSM; | 366 | ctx->ctx_doi = XFRM_SC_DOI_LSM; |
334 | ctx->ctx_alg = XFRM_SC_ALG_SELINUX; | 367 | ctx->ctx_alg = XFRM_SC_ALG_SELINUX; |
335 | ctx->ctx_sid = secid; | 368 | ctx->ctx_sid = secid; |
336 | ctx->ctx_len = str_len; | 369 | ctx->ctx_len = str_len; |
337 | memcpy(ctx->ctx_str, ctx_str, str_len); | 370 | memcpy(ctx->ctx_str, ctx_str, str_len); |
338 | kfree(ctx_str); | ||
339 | 371 | ||
340 | x->security = ctx; | 372 | x->security = ctx; |
341 | atomic_inc(&selinux_xfrm_refcount); | 373 | atomic_inc(&selinux_xfrm_refcount); |
342 | return 0; | 374 | out: |
375 | kfree(ctx_str); | ||
376 | return rc; | ||
343 | } | 377 | } |
344 | 378 | ||
345 | /* | 379 | /* |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 6e03b465e44e..a2104671f51d 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream, | |||
1937 | case SNDRV_PCM_STATE_DISCONNECTED: | 1937 | case SNDRV_PCM_STATE_DISCONNECTED: |
1938 | err = -EBADFD; | 1938 | err = -EBADFD; |
1939 | goto _endloop; | 1939 | goto _endloop; |
1940 | case SNDRV_PCM_STATE_PAUSED: | ||
1941 | continue; | ||
1940 | } | 1942 | } |
1941 | if (!tout) { | 1943 | if (!tout) { |
1942 | snd_printd("%s write error (DMA or IRQ trouble?)\n", | 1944 | snd_printd("%s write error (DMA or IRQ trouble?)\n", |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index c4671d00babd..c7f6d1cab606 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx) | |||
474 | memset(path, 0, sizeof(*path)); | 474 | memset(path, 0, sizeof(*path)); |
475 | } | 475 | } |
476 | 476 | ||
477 | /* return a DAC if paired to the given pin by codec driver */ | ||
478 | static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin) | ||
479 | { | ||
480 | struct hda_gen_spec *spec = codec->spec; | ||
481 | const hda_nid_t *list = spec->preferred_dacs; | ||
482 | |||
483 | if (!list) | ||
484 | return 0; | ||
485 | for (; *list; list += 2) | ||
486 | if (*list == pin) | ||
487 | return list[1]; | ||
488 | return 0; | ||
489 | } | ||
490 | |||
477 | /* look for an empty DAC slot */ | 491 | /* look for an empty DAC slot */ |
478 | static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, | 492 | static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, |
479 | bool is_digital) | 493 | bool is_digital) |
@@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs, | |||
1192 | continue; | 1206 | continue; |
1193 | } | 1207 | } |
1194 | 1208 | ||
1195 | dacs[i] = look_for_dac(codec, pin, false); | 1209 | dacs[i] = get_preferred_dac(codec, pin); |
1210 | if (dacs[i]) { | ||
1211 | if (is_dac_already_used(codec, dacs[i])) | ||
1212 | badness += bad->shared_primary; | ||
1213 | } | ||
1214 | |||
1215 | if (!dacs[i]) | ||
1216 | dacs[i] = look_for_dac(codec, pin, false); | ||
1196 | if (!dacs[i] && !i) { | 1217 | if (!dacs[i] && !i) { |
1197 | /* try to steal the DAC of surrounds for the front */ | 1218 | /* try to steal the DAC of surrounds for the front */ |
1198 | for (j = 1; j < num_outs; j++) { | 1219 | for (j = 1; j < num_outs; j++) { |
@@ -4297,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec, | |||
4297 | return AC_PWRST_D3; | 4318 | return AC_PWRST_D3; |
4298 | } | 4319 | } |
4299 | 4320 | ||
4321 | /* mute all aamix inputs initially; parse up to the first leaves */ | ||
4322 | static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) | ||
4323 | { | ||
4324 | int i, nums; | ||
4325 | const hda_nid_t *conn; | ||
4326 | bool has_amp; | ||
4327 | |||
4328 | nums = snd_hda_get_conn_list(codec, mix, &conn); | ||
4329 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); | ||
4330 | for (i = 0; i < nums; i++) { | ||
4331 | if (has_amp) | ||
4332 | snd_hda_codec_amp_stereo(codec, mix, | ||
4333 | HDA_INPUT, i, | ||
4334 | 0xff, HDA_AMP_MUTE); | ||
4335 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) | ||
4336 | snd_hda_codec_amp_stereo(codec, conn[i], | ||
4337 | HDA_OUTPUT, 0, | ||
4338 | 0xff, HDA_AMP_MUTE); | ||
4339 | } | ||
4340 | } | ||
4300 | 4341 | ||
4301 | /* | 4342 | /* |
4302 | * Parse the given BIOS configuration and set up the hda_gen_spec | 4343 | * Parse the given BIOS configuration and set up the hda_gen_spec |
@@ -4435,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, | |||
4435 | } | 4476 | } |
4436 | } | 4477 | } |
4437 | 4478 | ||
4479 | /* mute all aamix input initially */ | ||
4480 | if (spec->mixer_nid) | ||
4481 | mute_all_mixer_nid(codec, spec->mixer_nid); | ||
4482 | |||
4438 | dig_only: | 4483 | dig_only: |
4439 | parse_digital(codec); | 4484 | parse_digital(codec); |
4440 | 4485 | ||
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 7e45cb44d151..0929a06df812 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h | |||
@@ -249,6 +249,9 @@ struct hda_gen_spec { | |||
249 | const struct badness_table *main_out_badness; | 249 | const struct badness_table *main_out_badness; |
250 | const struct badness_table *extra_out_badness; | 250 | const struct badness_table *extra_out_badness; |
251 | 251 | ||
252 | /* preferred pin/DAC pairs; an array of paired NIDs */ | ||
253 | const hda_nid_t *preferred_dacs; | ||
254 | |||
252 | /* loopback mixing mode */ | 255 | /* loopback mixing mode */ |
253 | bool aamix_mode; | 256 | bool aamix_mode; |
254 | 257 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 27aa14007cbd..956871d8b3d2 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev) | |||
3433 | * white/black-list for enable_msi | 3433 | * white/black-list for enable_msi |
3434 | */ | 3434 | */ |
3435 | static struct snd_pci_quirk msi_black_list[] = { | 3435 | static struct snd_pci_quirk msi_black_list[] = { |
3436 | SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */ | ||
3437 | SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */ | ||
3438 | SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */ | ||
3439 | SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */ | ||
3436 | SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ | 3440 | SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ |
3437 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ | 3441 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ |
3438 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ | 3442 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index cac015be3325..699262a3e07a 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -340,6 +340,14 @@ static int patch_ad1986a(struct hda_codec *codec) | |||
340 | { | 340 | { |
341 | int err; | 341 | int err; |
342 | struct ad198x_spec *spec; | 342 | struct ad198x_spec *spec; |
343 | static hda_nid_t preferred_pairs[] = { | ||
344 | 0x1a, 0x03, | ||
345 | 0x1b, 0x03, | ||
346 | 0x1c, 0x04, | ||
347 | 0x1d, 0x05, | ||
348 | 0x1e, 0x03, | ||
349 | 0 | ||
350 | }; | ||
343 | 351 | ||
344 | err = alloc_ad_spec(codec); | 352 | err = alloc_ad_spec(codec); |
345 | if (err < 0) | 353 | if (err < 0) |
@@ -360,6 +368,8 @@ static int patch_ad1986a(struct hda_codec *codec) | |||
360 | * So, let's disable the shared stream. | 368 | * So, let's disable the shared stream. |
361 | */ | 369 | */ |
362 | spec->gen.multiout.no_share_stream = 1; | 370 | spec->gen.multiout.no_share_stream = 1; |
371 | /* give fixed DAC/pin pairs */ | ||
372 | spec->gen.preferred_dacs = preferred_pairs; | ||
363 | 373 | ||
364 | /* AD1986A can't manage the dynamic pin on/off smoothly */ | 374 | /* AD1986A can't manage the dynamic pin on/off smoothly */ |
365 | spec->gen.auto_mute_via_amp = 1; | 375 | spec->gen.auto_mute_via_amp = 1; |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 1f2717f817a0..3fbf2883e06e 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -2936,7 +2936,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
2936 | SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), | 2936 | SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), |
2937 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 2937 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
2938 | SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), | 2938 | SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), |
2939 | SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), | ||
2940 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | 2939 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), |
2941 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), | 2940 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), |
2942 | SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), | 2941 | SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index c4a66ef6cf6f..f281c8068557 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -2337,8 +2337,9 @@ static int simple_playback_build_controls(struct hda_codec *codec) | |||
2337 | int err; | 2337 | int err; |
2338 | 2338 | ||
2339 | per_cvt = get_cvt(spec, 0); | 2339 | per_cvt = get_cvt(spec, 0); |
2340 | err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid, | 2340 | err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid, |
2341 | per_cvt->cvt_nid); | 2341 | per_cvt->cvt_nid, |
2342 | HDA_PCM_TYPE_HDMI); | ||
2342 | if (err < 0) | 2343 | if (err < 0) |
2343 | return err; | 2344 | return err; |
2344 | return simple_hdmi_build_jack(codec, 0); | 2345 | return simple_hdmi_build_jack(codec, 0); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c5ea483d7559..c5646941539a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -3849,6 +3849,7 @@ enum { | |||
3849 | ALC269_FIXUP_ASUS_X101, | 3849 | ALC269_FIXUP_ASUS_X101, |
3850 | ALC271_FIXUP_AMIC_MIC2, | 3850 | ALC271_FIXUP_AMIC_MIC2, |
3851 | ALC271_FIXUP_HP_GATE_MIC_JACK, | 3851 | ALC271_FIXUP_HP_GATE_MIC_JACK, |
3852 | ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572, | ||
3852 | ALC269_FIXUP_ACER_AC700, | 3853 | ALC269_FIXUP_ACER_AC700, |
3853 | ALC269_FIXUP_LIMIT_INT_MIC_BOOST, | 3854 | ALC269_FIXUP_LIMIT_INT_MIC_BOOST, |
3854 | ALC269VB_FIXUP_ASUS_ZENBOOK, | 3855 | ALC269VB_FIXUP_ASUS_ZENBOOK, |
@@ -4111,6 +4112,12 @@ static const struct hda_fixup alc269_fixups[] = { | |||
4111 | .chained = true, | 4112 | .chained = true, |
4112 | .chain_id = ALC271_FIXUP_AMIC_MIC2, | 4113 | .chain_id = ALC271_FIXUP_AMIC_MIC2, |
4113 | }, | 4114 | }, |
4115 | [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = { | ||
4116 | .type = HDA_FIXUP_FUNC, | ||
4117 | .v.func = alc269_fixup_limit_int_mic_boost, | ||
4118 | .chained = true, | ||
4119 | .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK, | ||
4120 | }, | ||
4114 | [ALC269_FIXUP_ACER_AC700] = { | 4121 | [ALC269_FIXUP_ACER_AC700] = { |
4115 | .type = HDA_FIXUP_PINS, | 4122 | .type = HDA_FIXUP_PINS, |
4116 | .v.pins = (const struct hda_pintbl[]) { | 4123 | .v.pins = (const struct hda_pintbl[]) { |
@@ -4208,6 +4215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
4208 | SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), | 4215 | SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), |
4209 | SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), | 4216 | SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), |
4210 | SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), | 4217 | SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), |
4218 | SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), | ||
4211 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | 4219 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), |
4212 | SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4220 | SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
4213 | SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4221 | SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
@@ -4239,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
4239 | SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4247 | SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
4240 | SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4248 | SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
4241 | SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4249 | SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
4250 | SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
4242 | SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4251 | SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
4243 | SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4252 | SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
4244 | SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), | 4253 | SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), |
4245 | SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | 4254 | SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
4255 | SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
4246 | SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), | 4256 | SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), |
4257 | SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
4247 | SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | 4258 | SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
4259 | SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
4248 | SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4260 | SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
4249 | SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4261 | SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
4250 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 4262 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
@@ -5034,8 +5046,11 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
5034 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 5046 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
5035 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5047 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5036 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5048 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5049 | SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | ||
5050 | SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | ||
5037 | SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5051 | SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5038 | SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5052 | SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5053 | SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | ||
5039 | SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), | 5054 | SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), |
5040 | SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), | 5055 | SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), |
5041 | SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), | 5056 | SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), |
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 8697cedccd21..1ead3c977a51 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c | |||
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, | |||
648 | 648 | ||
649 | dma_params = ssc_p->dma_params[dir]; | 649 | dma_params = ssc_p->dma_params[dir]; |
650 | 650 | ||
651 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); | 651 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); |
652 | ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); | 652 | ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); |
653 | 653 | ||
654 | pr_debug("%s enabled SSC_SR=0x%08x\n", | 654 | pr_debug("%s enabled SSC_SR=0x%08x\n", |
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, | |||
657 | return 0; | 657 | return 0; |
658 | } | 658 | } |
659 | 659 | ||
660 | static int atmel_ssc_trigger(struct snd_pcm_substream *substream, | ||
661 | int cmd, struct snd_soc_dai *dai) | ||
662 | { | ||
663 | struct atmel_ssc_info *ssc_p = &ssc_info[dai->id]; | ||
664 | struct atmel_pcm_dma_params *dma_params; | ||
665 | int dir; | ||
666 | |||
667 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | ||
668 | dir = 0; | ||
669 | else | ||
670 | dir = 1; | ||
671 | |||
672 | dma_params = ssc_p->dma_params[dir]; | ||
673 | |||
674 | switch (cmd) { | ||
675 | case SNDRV_PCM_TRIGGER_START: | ||
676 | case SNDRV_PCM_TRIGGER_RESUME: | ||
677 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | ||
678 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); | ||
679 | break; | ||
680 | default: | ||
681 | ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); | ||
682 | break; | ||
683 | } | ||
684 | |||
685 | return 0; | ||
686 | } | ||
660 | 687 | ||
661 | #ifdef CONFIG_PM | 688 | #ifdef CONFIG_PM |
662 | static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) | 689 | static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) |
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = { | |||
731 | .startup = atmel_ssc_startup, | 758 | .startup = atmel_ssc_startup, |
732 | .shutdown = atmel_ssc_shutdown, | 759 | .shutdown = atmel_ssc_shutdown, |
733 | .prepare = atmel_ssc_prepare, | 760 | .prepare = atmel_ssc_prepare, |
761 | .trigger = atmel_ssc_trigger, | ||
734 | .hw_params = atmel_ssc_hw_params, | 762 | .hw_params = atmel_ssc_hw_params, |
735 | .set_fmt = atmel_ssc_set_dai_fmt, | 763 | .set_fmt = atmel_ssc_set_dai_fmt, |
736 | .set_clkdiv = atmel_ssc_set_dai_clkdiv, | 764 | .set_clkdiv = atmel_ssc_set_dai_clkdiv, |
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c index 1b372283bd01..7d6a9055874b 100644 --- a/sound/soc/atmel/sam9x5_wm8731.c +++ b/sound/soc/atmel/sam9x5_wm8731.c | |||
@@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) | |||
109 | dai->stream_name = "WM8731 PCM"; | 109 | dai->stream_name = "WM8731 PCM"; |
110 | dai->codec_dai_name = "wm8731-hifi"; | 110 | dai->codec_dai_name = "wm8731-hifi"; |
111 | dai->init = sam9x5_wm8731_init; | 111 | dai->init = sam9x5_wm8731_init; |
112 | dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | 112 | dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
113 | | SND_SOC_DAIFMT_CBM_CFM; | 113 | | SND_SOC_DAIFMT_CBM_CFM; |
114 | 114 | ||
115 | ret = snd_soc_of_parse_card_name(card, "atmel,model"); | 115 | ret = snd_soc_of_parse_card_name(card, "atmel,model"); |
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 99b359e19d35..0ab2dc296474 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c | |||
@@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { | |||
1012 | { "AEC Loopback", "HPOUT3L", "OUT3L" }, | 1012 | { "AEC Loopback", "HPOUT3L", "OUT3L" }, |
1013 | { "AEC Loopback", "HPOUT3R", "OUT3R" }, | 1013 | { "AEC Loopback", "HPOUT3R", "OUT3R" }, |
1014 | { "HPOUT3L", NULL, "OUT3L" }, | 1014 | { "HPOUT3L", NULL, "OUT3L" }, |
1015 | { "HPOUT3R", NULL, "OUT3L" }, | 1015 | { "HPOUT3R", NULL, "OUT3R" }, |
1016 | 1016 | ||
1017 | { "AEC Loopback", "SPKOUTL", "OUT4L" }, | 1017 | { "AEC Loopback", "SPKOUTL", "OUT4L" }, |
1018 | { "SPKOUTLN", NULL, "OUT4L" }, | 1018 | { "SPKOUTLN", NULL, "OUT4L" }, |
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 3938fb1c203e..53bbfac6a83a 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c | |||
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) | |||
1444 | 1444 | ||
1445 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { | 1445 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { |
1446 | case SND_SOC_DAIFMT_DSP_B: | 1446 | case SND_SOC_DAIFMT_DSP_B: |
1447 | aif1 |= WM8904_AIF_LRCLK_INV; | 1447 | aif1 |= 0x3 | WM8904_AIF_LRCLK_INV; |
1448 | case SND_SOC_DAIFMT_DSP_A: | 1448 | case SND_SOC_DAIFMT_DSP_A: |
1449 | aif1 |= 0x3; | 1449 | aif1 |= 0x3; |
1450 | break; | 1450 | break; |
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 543c5c2631b6..0f17ed3e29f4 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec) | |||
2439 | snd_soc_update_bits(codec, WM8962_CLOCKING_4, | 2439 | snd_soc_update_bits(codec, WM8962_CLOCKING_4, |
2440 | WM8962_SYSCLK_RATE_MASK, clocking4); | 2440 | WM8962_SYSCLK_RATE_MASK, clocking4); |
2441 | 2441 | ||
2442 | /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK. | ||
2443 | * So we here provisionally enable it and then disable it afterward | ||
2444 | * if current bias_level hasn't reached SND_SOC_BIAS_ON. | ||
2445 | */ | ||
2446 | if (codec->dapm.bias_level != SND_SOC_BIAS_ON) | ||
2447 | snd_soc_update_bits(codec, WM8962_CLOCKING2, | ||
2448 | WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA); | ||
2449 | |||
2442 | dspclk = snd_soc_read(codec, WM8962_CLOCKING1); | 2450 | dspclk = snd_soc_read(codec, WM8962_CLOCKING1); |
2451 | |||
2452 | if (codec->dapm.bias_level != SND_SOC_BIAS_ON) | ||
2453 | snd_soc_update_bits(codec, WM8962_CLOCKING2, | ||
2454 | WM8962_SYSCLK_ENA_MASK, 0); | ||
2455 | |||
2443 | if (dspclk < 0) { | 2456 | if (dspclk < 0) { |
2444 | dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); | 2457 | dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); |
2445 | return; | 2458 | return; |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 46ec0e9744d4..4fbcab63e61f 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp) | |||
1474 | return ret; | 1474 | return ret; |
1475 | 1475 | ||
1476 | /* Wait for the RAM to start, should be near instantaneous */ | 1476 | /* Wait for the RAM to start, should be near instantaneous */ |
1477 | count = 0; | 1477 | for (count = 0; count < 10; ++count) { |
1478 | do { | ||
1479 | ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, | 1478 | ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, |
1480 | &val); | 1479 | &val); |
1481 | if (ret != 0) | 1480 | if (ret != 0) |
1482 | return ret; | 1481 | return ret; |
1483 | } while (!(val & ADSP2_RAM_RDY) && ++count < 10); | 1482 | |
1483 | if (val & ADSP2_RAM_RDY) | ||
1484 | break; | ||
1485 | |||
1486 | msleep(1); | ||
1487 | } | ||
1484 | 1488 | ||
1485 | if (!(val & ADSP2_RAM_RDY)) { | 1489 | if (!(val & ADSP2_RAM_RDY)) { |
1486 | adsp_err(dsp, "Failed to start DSP RAM\n"); | 1490 | adsp_err(dsp, "Failed to start DSP RAM\n"); |
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 61e48852b9e8..3fd76bc391de 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c | |||
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card, | |||
130 | break; | 130 | break; |
131 | } | 131 | } |
132 | 132 | ||
133 | dapm->bias_level = level; | ||
134 | |||
135 | return 0; | 133 | return 0; |
136 | } | 134 | } |
137 | 135 | ||
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 0b18f654b413..3920a5e8125f 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
@@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { | |||
473 | .playback = { | 473 | .playback = { |
474 | .channels_min = 1, | 474 | .channels_min = 1, |
475 | .channels_max = 2, | 475 | .channels_max = 2, |
476 | .rates = SNDRV_PCM_RATE_8000_192000 | | 476 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
477 | SNDRV_PCM_RATE_CONTINUOUS | | 477 | .rate_min = 5512, |
478 | SNDRV_PCM_RATE_KNOT, | 478 | .rate_max = 192000, |
479 | .formats = KIRKWOOD_I2S_FORMATS, | 479 | .formats = KIRKWOOD_I2S_FORMATS, |
480 | }, | 480 | }, |
481 | .capture = { | 481 | .capture = { |
482 | .channels_min = 1, | 482 | .channels_min = 1, |
483 | .channels_max = 2, | 483 | .channels_max = 2, |
484 | .rates = SNDRV_PCM_RATE_8000_192000 | | 484 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
485 | SNDRV_PCM_RATE_CONTINUOUS | | 485 | .rate_min = 5512, |
486 | SNDRV_PCM_RATE_KNOT, | 486 | .rate_max = 192000, |
487 | .formats = KIRKWOOD_I2S_FORMATS, | 487 | .formats = KIRKWOOD_I2S_FORMATS, |
488 | }, | 488 | }, |
489 | .ops = &kirkwood_i2s_dai_ops, | 489 | .ops = &kirkwood_i2s_dai_ops, |
@@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { | |||
494 | .playback = { | 494 | .playback = { |
495 | .channels_min = 1, | 495 | .channels_min = 1, |
496 | .channels_max = 2, | 496 | .channels_max = 2, |
497 | .rates = SNDRV_PCM_RATE_8000_192000 | | 497 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
498 | SNDRV_PCM_RATE_CONTINUOUS | | 498 | .rate_min = 5512, |
499 | SNDRV_PCM_RATE_KNOT, | 499 | .rate_max = 192000, |
500 | .formats = KIRKWOOD_SPDIF_FORMATS, | 500 | .formats = KIRKWOOD_SPDIF_FORMATS, |
501 | }, | 501 | }, |
502 | .capture = { | 502 | .capture = { |
503 | .channels_min = 1, | 503 | .channels_min = 1, |
504 | .channels_max = 2, | 504 | .channels_max = 2, |
505 | .rates = SNDRV_PCM_RATE_8000_192000 | | 505 | .rates = SNDRV_PCM_RATE_CONTINUOUS, |
506 | SNDRV_PCM_RATE_CONTINUOUS | | 506 | .rate_min = 5512, |
507 | SNDRV_PCM_RATE_KNOT, | 507 | .rate_max = 192000, |
508 | .formats = KIRKWOOD_SPDIF_FORMATS, | 508 | .formats = KIRKWOOD_SPDIF_FORMATS, |
509 | }, | 509 | }, |
510 | .ops = &kirkwood_i2s_dai_ops, | 510 | .ops = &kirkwood_i2s_dai_ops, |
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index cbc9c96ce1f4..41949af3baae 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c | |||
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm, | |||
305 | } | 305 | } |
306 | } | 306 | } |
307 | 307 | ||
308 | static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm) | ||
309 | { | ||
310 | unsigned int i; | ||
311 | |||
312 | for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; | ||
313 | i++) { | ||
314 | if (!pcm->chan[i]) | ||
315 | continue; | ||
316 | dma_release_channel(pcm->chan[i]); | ||
317 | if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) | ||
318 | break; | ||
319 | } | ||
320 | } | ||
321 | |||
308 | /** | 322 | /** |
309 | * snd_dmaengine_pcm_register - Register a dmaengine based PCM device | 323 | * snd_dmaengine_pcm_register - Register a dmaengine based PCM device |
310 | * @dev: The parent device for the PCM device | 324 | * @dev: The parent device for the PCM device |
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev, | |||
315 | const struct snd_dmaengine_pcm_config *config, unsigned int flags) | 329 | const struct snd_dmaengine_pcm_config *config, unsigned int flags) |
316 | { | 330 | { |
317 | struct dmaengine_pcm *pcm; | 331 | struct dmaengine_pcm *pcm; |
332 | int ret; | ||
318 | 333 | ||
319 | pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); | 334 | pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); |
320 | if (!pcm) | 335 | if (!pcm) |
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev, | |||
326 | dmaengine_pcm_request_chan_of(pcm, dev); | 341 | dmaengine_pcm_request_chan_of(pcm, dev); |
327 | 342 | ||
328 | if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) | 343 | if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) |
329 | return snd_soc_add_platform(dev, &pcm->platform, | 344 | ret = snd_soc_add_platform(dev, &pcm->platform, |
330 | &dmaengine_no_residue_pcm_platform); | 345 | &dmaengine_no_residue_pcm_platform); |
331 | else | 346 | else |
332 | return snd_soc_add_platform(dev, &pcm->platform, | 347 | ret = snd_soc_add_platform(dev, &pcm->platform, |
333 | &dmaengine_pcm_platform); | 348 | &dmaengine_pcm_platform); |
349 | if (ret) | ||
350 | goto err_free_dma; | ||
351 | |||
352 | return 0; | ||
353 | |||
354 | err_free_dma: | ||
355 | dmaengine_pcm_release_chan(pcm); | ||
356 | kfree(pcm); | ||
357 | return ret; | ||
334 | } | 358 | } |
335 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); | 359 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); |
336 | 360 | ||
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev) | |||
345 | { | 369 | { |
346 | struct snd_soc_platform *platform; | 370 | struct snd_soc_platform *platform; |
347 | struct dmaengine_pcm *pcm; | 371 | struct dmaengine_pcm *pcm; |
348 | unsigned int i; | ||
349 | 372 | ||
350 | platform = snd_soc_lookup_platform(dev); | 373 | platform = snd_soc_lookup_platform(dev); |
351 | if (!platform) | 374 | if (!platform) |
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev) | |||
353 | 376 | ||
354 | pcm = soc_platform_to_pcm(platform); | 377 | pcm = soc_platform_to_pcm(platform); |
355 | 378 | ||
356 | for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) { | ||
357 | if (pcm->chan[i]) { | ||
358 | dma_release_channel(pcm->chan[i]); | ||
359 | if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | snd_soc_remove_platform(platform); | 379 | snd_soc_remove_platform(platform); |
380 | dmaengine_pcm_release_chan(pcm); | ||
365 | kfree(pcm); | 381 | kfree(pcm); |
366 | } | 382 | } |
367 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); | 383 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 11a90cd027fa..891b9a9bcbf8 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
@@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) | |||
600 | struct snd_soc_platform *platform = rtd->platform; | 600 | struct snd_soc_platform *platform = rtd->platform; |
601 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; | 601 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; |
602 | struct snd_soc_dai *codec_dai = rtd->codec_dai; | 602 | struct snd_soc_dai *codec_dai = rtd->codec_dai; |
603 | struct snd_soc_codec *codec = rtd->codec; | 603 | bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; |
604 | 604 | ||
605 | mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); | 605 | mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); |
606 | 606 | ||
607 | /* apply codec digital mute */ | 607 | /* apply codec digital mute */ |
608 | if (!codec->active) | 608 | if ((playback && codec_dai->playback_active == 1) || |
609 | (!playback && codec_dai->capture_active == 1)) | ||
609 | snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); | 610 | snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); |
610 | 611 | ||
611 | /* free any machine hw params */ | 612 | /* free any machine hw params */ |
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c index 364bf6a907e1..8c819f811470 100644 --- a/sound/soc/tegra/tegra20_i2s.c +++ b/sound/soc/tegra/tegra20_i2s.c | |||
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, | |||
74 | unsigned int fmt) | 74 | unsigned int fmt) |
75 | { | 75 | { |
76 | struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); | 76 | struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); |
77 | unsigned int mask, val; | 77 | unsigned int mask = 0, val = 0; |
78 | 78 | ||
79 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { | 79 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { |
80 | case SND_SOC_DAIFMT_NB_NF: | 80 | case SND_SOC_DAIFMT_NB_NF: |
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, | |||
83 | return -EINVAL; | 83 | return -EINVAL; |
84 | } | 84 | } |
85 | 85 | ||
86 | mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; | 86 | mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE; |
87 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { | 87 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { |
88 | case SND_SOC_DAIFMT_CBS_CFS: | 88 | case SND_SOC_DAIFMT_CBS_CFS: |
89 | val = TEGRA20_I2S_CTRL_MASTER_ENABLE; | 89 | val |= TEGRA20_I2S_CTRL_MASTER_ENABLE; |
90 | break; | 90 | break; |
91 | case SND_SOC_DAIFMT_CBM_CFM: | 91 | case SND_SOC_DAIFMT_CBM_CFM: |
92 | break; | 92 | break; |
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c index 08bc6931c7c7..8c7c1028e579 100644 --- a/sound/soc/tegra/tegra20_spdif.c +++ b/sound/soc/tegra/tegra20_spdif.c | |||
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream, | |||
67 | { | 67 | { |
68 | struct device *dev = dai->dev; | 68 | struct device *dev = dai->dev; |
69 | struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); | 69 | struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); |
70 | unsigned int mask, val; | 70 | unsigned int mask = 0, val = 0; |
71 | int ret, spdifclock; | 71 | int ret, spdifclock; |
72 | 72 | ||
73 | mask = TEGRA20_SPDIF_CTRL_PACK | | 73 | mask |= TEGRA20_SPDIF_CTRL_PACK | |
74 | TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; | 74 | TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; |
75 | switch (params_format(params)) { | 75 | switch (params_format(params)) { |
76 | case SNDRV_PCM_FORMAT_S16_LE: | 76 | case SNDRV_PCM_FORMAT_S16_LE: |
77 | val = TEGRA20_SPDIF_CTRL_PACK | | 77 | val |= TEGRA20_SPDIF_CTRL_PACK | |
78 | TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; | 78 | TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; |
79 | break; | 79 | break; |
80 | default: | 80 | default: |
81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index 231a785b3921..02247fee1cf7 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c | |||
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, | |||
118 | unsigned int fmt) | 118 | unsigned int fmt) |
119 | { | 119 | { |
120 | struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); | 120 | struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); |
121 | unsigned int mask, val; | 121 | unsigned int mask = 0, val = 0; |
122 | 122 | ||
123 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { | 123 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { |
124 | case SND_SOC_DAIFMT_NB_NF: | 124 | case SND_SOC_DAIFMT_NB_NF: |
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, | |||
127 | return -EINVAL; | 127 | return -EINVAL; |
128 | } | 128 | } |
129 | 129 | ||
130 | mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; | 130 | mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE; |
131 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { | 131 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { |
132 | case SND_SOC_DAIFMT_CBS_CFS: | 132 | case SND_SOC_DAIFMT_CBS_CFS: |
133 | val = TEGRA30_I2S_CTRL_MASTER_ENABLE; | 133 | val |= TEGRA30_I2S_CTRL_MASTER_ENABLE; |
134 | break; | 134 | break; |
135 | case SND_SOC_DAIFMT_CBM_CFM: | 135 | case SND_SOC_DAIFMT_CBM_CFM: |
136 | break; | 136 | break; |
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 3454262358b3..f4b12c216f1c 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c | |||
@@ -1603,7 +1603,7 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer) | |||
1603 | return err; | 1603 | return err; |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | return err; | 1606 | return 0; |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) | 1609 | int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) |
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index dc4de3762111..bcf1d2f0b791 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include "helpers/bitmask.h" | 18 | #include "helpers/bitmask.h" |
19 | 19 | ||
20 | static struct option set_opts[] = { | 20 | static struct option set_opts[] = { |
21 | { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, | 21 | { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, |
22 | { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, | 22 | { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'}, |
23 | { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, | 23 | { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'}, |
24 | { }, | 24 | { }, |
25 | }; | 25 | }; |
26 | 26 | ||
diff --git a/tools/usb/Makefile b/tools/usb/Makefile index 396d6c44e9d7..acf2165c04e6 100644 --- a/tools/usb/Makefile +++ b/tools/usb/Makefile | |||
@@ -3,11 +3,12 @@ | |||
3 | CC = $(CROSS_COMPILE)gcc | 3 | CC = $(CROSS_COMPILE)gcc |
4 | PTHREAD_LIBS = -lpthread | 4 | PTHREAD_LIBS = -lpthread |
5 | WARNINGS = -Wall -Wextra | 5 | WARNINGS = -Wall -Wextra |
6 | CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) -I../include | 6 | CFLAGS = $(WARNINGS) -g -I../include |
7 | LDFLAGS = $(PTHREAD_LIBS) | ||
7 | 8 | ||
8 | all: testusb ffs-test | 9 | all: testusb ffs-test |
9 | %: %.c | 10 | %: %.c |
10 | $(CC) $(CFLAGS) -o $@ $^ | 11 | $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) |
11 | 12 | ||
12 | clean: | 13 | clean: |
13 | $(RM) testusb ffs-test | 14 | $(RM) testusb ffs-test |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a0aa84b5941a..4f588bc94186 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1898,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) | |||
1898 | int r; | 1898 | int r; |
1899 | struct kvm_vcpu *vcpu, *v; | 1899 | struct kvm_vcpu *vcpu, *v; |
1900 | 1900 | ||
1901 | if (id >= KVM_MAX_VCPUS) | ||
1902 | return -EINVAL; | ||
1903 | |||
1901 | vcpu = kvm_arch_vcpu_create(kvm, id); | 1904 | vcpu = kvm_arch_vcpu_create(kvm, id); |
1902 | if (IS_ERR(vcpu)) | 1905 | if (IS_ERR(vcpu)) |
1903 | return PTR_ERR(vcpu); | 1906 | return PTR_ERR(vcpu); |