diff options
author | David S. Miller <davem@davemloft.net> | 2011-12-02 13:49:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-12-02 13:49:21 -0500 |
commit | b3613118eb30a589d971e4eccbbb2a1314f5dfd4 (patch) | |
tree | 868c1ee59e1b5c19a4f2e43716400d0001a994e5 | |
parent | 7505afe28c16a8d386624930a018d0052c75d687 (diff) | |
parent | 5983fe2b29df5885880d7fa3b91aca306c7564ef (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
436 files changed, 5400 insertions, 3493 deletions
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl index 54883de5d5f9..ac3d0018140c 100644 --- a/Documentation/DocBook/uio-howto.tmpl +++ b/Documentation/DocBook/uio-howto.tmpl | |||
@@ -521,6 +521,11 @@ Here's a description of the fields of <varname>struct uio_mem</varname>: | |||
521 | 521 | ||
522 | <itemizedlist> | 522 | <itemizedlist> |
523 | <listitem><para> | 523 | <listitem><para> |
524 | <varname>const char *name</varname>: Optional. Set this to help identify | ||
525 | the memory region, it will show up in the corresponding sysfs node. | ||
526 | </para></listitem> | ||
527 | |||
528 | <listitem><para> | ||
524 | <varname>int memtype</varname>: Required if the mapping is used. Set this to | 529 | <varname>int memtype</varname>: Required if the mapping is used. Set this to |
525 | <varname>UIO_MEM_PHYS</varname> if you you have physical memory on your | 530 | <varname>UIO_MEM_PHYS</varname> if you you have physical memory on your |
526 | card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical | 531 | card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical |
@@ -553,7 +558,7 @@ instead to remember such an address. | |||
553 | </itemizedlist> | 558 | </itemizedlist> |
554 | 559 | ||
555 | <para> | 560 | <para> |
556 | Please do not touch the <varname>kobj</varname> element of | 561 | Please do not touch the <varname>map</varname> element of |
557 | <varname>struct uio_mem</varname>! It is used by the UIO framework | 562 | <varname>struct uio_mem</varname>! It is used by the UIO framework |
558 | to set up sysfs files for this mapping. Simply leave it alone. | 563 | to set up sysfs files for this mapping. Simply leave it alone. |
559 | </para> | 564 | </para> |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index e8552782b440..874921e97802 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -33,6 +33,7 @@ qcom Qualcomm, Inc. | |||
33 | ramtron Ramtron International | 33 | ramtron Ramtron International |
34 | samsung Samsung Semiconductor | 34 | samsung Samsung Semiconductor |
35 | schindler Schindler | 35 | schindler Schindler |
36 | sil Silicon Image | ||
36 | simtek | 37 | simtek |
37 | sirf SiRF Technology, Inc. | 38 | sirf SiRF Technology, Inc. |
38 | stericsson ST-Ericsson | 39 | stericsson ST-Ericsson |
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt index 64087c34327f..7671352216f1 100644 --- a/Documentation/filesystems/btrfs.txt +++ b/Documentation/filesystems/btrfs.txt | |||
@@ -63,8 +63,8 @@ IRC network. | |||
63 | Userspace tools for creating and manipulating Btrfs file systems are | 63 | Userspace tools for creating and manipulating Btrfs file systems are |
64 | available from the git repository at the following location: | 64 | available from the git repository at the following location: |
65 | 65 | ||
66 | http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git | 66 | http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git |
67 | git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git | 67 | git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git |
68 | 68 | ||
69 | These include the following tools: | 69 | These include the following tools: |
70 | 70 | ||
diff --git a/Documentation/i2c/ten-bit-addresses b/Documentation/i2c/ten-bit-addresses index e9890709c508..cdfe13901b99 100644 --- a/Documentation/i2c/ten-bit-addresses +++ b/Documentation/i2c/ten-bit-addresses | |||
@@ -1,22 +1,24 @@ | |||
1 | The I2C protocol knows about two kinds of device addresses: normal 7 bit | 1 | The I2C protocol knows about two kinds of device addresses: normal 7 bit |
2 | addresses, and an extended set of 10 bit addresses. The sets of addresses | 2 | addresses, and an extended set of 10 bit addresses. The sets of addresses |
3 | do not intersect: the 7 bit address 0x10 is not the same as the 10 bit | 3 | do not intersect: the 7 bit address 0x10 is not the same as the 10 bit |
4 | address 0x10 (though a single device could respond to both of them). You | 4 | address 0x10 (though a single device could respond to both of them). |
5 | select a 10 bit address by adding an extra byte after the address | ||
6 | byte: | ||
7 | S Addr7 Rd/Wr .... | ||
8 | becomes | ||
9 | S 11110 Addr10 Rd/Wr | ||
10 | S is the start bit, Rd/Wr the read/write bit, and if you count the number | ||
11 | of bits, you will see the there are 8 after the S bit for 7 bit addresses, | ||
12 | and 16 after the S bit for 10 bit addresses. | ||
13 | 5 | ||
14 | WARNING! The current 10 bit address support is EXPERIMENTAL. There are | 6 | I2C messages to and from 10-bit address devices have a different format. |
15 | several places in the code that will cause SEVERE PROBLEMS with 10 bit | 7 | See the I2C specification for the details. |
16 | addresses, even though there is some basic handling and hooks. Also, | ||
17 | almost no supported adapter handles the 10 bit addresses correctly. | ||
18 | 8 | ||
19 | As soon as a real 10 bit address device is spotted 'in the wild', we | 9 | The current 10 bit address support is minimal. It should work, however |
20 | can and will add proper support. Right now, 10 bit address devices | 10 | you can expect some problems along the way: |
21 | are defined by the I2C protocol, but we have never seen a single device | 11 | * Not all bus drivers support 10-bit addresses. Some don't because the |
22 | which supports them. | 12 | hardware doesn't support them (SMBus doesn't require 10-bit address |
13 | support for example), some don't because nobody bothered adding the | ||
14 | code (or it's there but not working properly.) Software implementation | ||
15 | (i2c-algo-bit) is known to work. | ||
16 | * Some optional features do not support 10-bit addresses. This is the | ||
17 | case of automatic detection and instantiation of devices by their, | ||
18 | drivers, for example. | ||
19 | * Many user-space packages (for example i2c-tools) lack support for | ||
20 | 10-bit addresses. | ||
21 | |||
22 | Note that 10-bit address devices are still pretty rare, so the limitations | ||
23 | listed above could stay for a long time, maybe even forever if nobody | ||
24 | needs them to be fixed. | ||
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 646a89e0c07d..3139fb505dce 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it. | |||
123 | Subsystem-Level Methods | 123 | Subsystem-Level Methods |
124 | ----------------------- | 124 | ----------------------- |
125 | The core methods to suspend and resume devices reside in struct dev_pm_ops | 125 | The core methods to suspend and resume devices reside in struct dev_pm_ops |
126 | pointed to by the pm member of struct bus_type, struct device_type and | 126 | pointed to by the ops member of struct dev_pm_domain, or by the pm member of |
127 | struct class. They are mostly of interest to the people writing infrastructure | 127 | struct bus_type, struct device_type and struct class. They are mostly of |
128 | for buses, like PCI or USB, or device type and device class drivers. | 128 | interest to the people writing infrastructure for platforms and buses, like PCI |
129 | or USB, or device type and device class drivers. | ||
129 | 130 | ||
130 | Bus drivers implement these methods as appropriate for the hardware and the | 131 | Bus drivers implement these methods as appropriate for the hardware and the |
131 | drivers using it; PCI works differently from USB, and so on. Not many people | 132 | drivers using it; PCI works differently from USB, and so on. Not many people |
@@ -139,41 +140,57 @@ sequencing in the driver model tree. | |||
139 | 140 | ||
140 | /sys/devices/.../power/wakeup files | 141 | /sys/devices/.../power/wakeup files |
141 | ----------------------------------- | 142 | ----------------------------------- |
142 | All devices in the driver model have two flags to control handling of wakeup | 143 | All device objects in the driver model contain fields that control the handling |
143 | events (hardware signals that can force the device and/or system out of a low | 144 | of system wakeup events (hardware signals that can force the system out of a |
144 | power state). These flags are initialized by bus or device driver code using | 145 | sleep state). These fields are initialized by bus or device driver code using |
145 | device_set_wakeup_capable() and device_set_wakeup_enable(), defined in | 146 | device_set_wakeup_capable() and device_set_wakeup_enable(), defined in |
146 | include/linux/pm_wakeup.h. | 147 | include/linux/pm_wakeup.h. |
147 | 148 | ||
148 | The "can_wakeup" flag just records whether the device (and its driver) can | 149 | The "power.can_wakeup" flag just records whether the device (and its driver) can |
149 | physically support wakeup events. The device_set_wakeup_capable() routine | 150 | physically support wakeup events. The device_set_wakeup_capable() routine |
150 | affects this flag. The "should_wakeup" flag controls whether the device should | 151 | affects this flag. The "power.wakeup" field is a pointer to an object of type |
151 | try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag; | 152 | struct wakeup_source used for controlling whether or not the device should use |
152 | for the most part drivers should not change its value. The initial value of | 153 | its system wakeup mechanism and for notifying the PM core of system wakeup |
153 | should_wakeup is supposed to be false for the majority of devices; the major | 154 | events signaled by the device. This object is only present for wakeup-capable |
154 | exceptions are power buttons, keyboards, and Ethernet adapters whose WoL | 155 | devices (i.e. devices whose "can_wakeup" flags are set) and is created (or |
155 | (wake-on-LAN) feature has been set up with ethtool. It should also default | 156 | removed) by device_set_wakeup_capable(). |
156 | to true for devices that don't generate wakeup requests on their own but merely | ||
157 | forward wakeup requests from one bus to another (like PCI bridges). | ||
158 | 157 | ||
159 | Whether or not a device is capable of issuing wakeup events is a hardware | 158 | Whether or not a device is capable of issuing wakeup events is a hardware |
160 | matter, and the kernel is responsible for keeping track of it. By contrast, | 159 | matter, and the kernel is responsible for keeping track of it. By contrast, |
161 | whether or not a wakeup-capable device should issue wakeup events is a policy | 160 | whether or not a wakeup-capable device should issue wakeup events is a policy |
162 | decision, and it is managed by user space through a sysfs attribute: the | 161 | decision, and it is managed by user space through a sysfs attribute: the |
163 | power/wakeup file. User space can write the strings "enabled" or "disabled" to | 162 | "power/wakeup" file. User space can write the strings "enabled" or "disabled" |
164 | set or clear the "should_wakeup" flag, respectively. This file is only present | 163 | to it to indicate whether or not, respectively, the device is supposed to signal |
165 | for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set) | 164 | system wakeup. This file is only present if the "power.wakeup" object exists |
166 | and is created (or removed) by device_set_wakeup_capable(). Reads from the | 165 | for the given device and is created (or removed) along with that object, by |
167 | file will return the corresponding string. | 166 | device_set_wakeup_capable(). Reads from the file will return the corresponding |
168 | 167 | string. | |
169 | The device_may_wakeup() routine returns true only if both flags are set. | 168 | |
169 | The "power/wakeup" file is supposed to contain the "disabled" string initially | ||
170 | for the majority of devices; the major exceptions are power buttons, keyboards, | ||
171 | and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with | ||
172 | ethtool. It should also default to "enabled" for devices that don't generate | ||
173 | wakeup requests on their own but merely forward wakeup requests from one bus to | ||
174 | another (like PCI Express ports). | ||
175 | |||
176 | The device_may_wakeup() routine returns true only if the "power.wakeup" object | ||
177 | exists and the corresponding "power/wakeup" file contains the string "enabled". | ||
170 | This information is used by subsystems, like the PCI bus type code, to see | 178 | This information is used by subsystems, like the PCI bus type code, to see |
171 | whether or not to enable the devices' wakeup mechanisms. If device wakeup | 179 | whether or not to enable the devices' wakeup mechanisms. If device wakeup |
172 | mechanisms are enabled or disabled directly by drivers, they also should use | 180 | mechanisms are enabled or disabled directly by drivers, they also should use |
173 | device_may_wakeup() to decide what to do during a system sleep transition. | 181 | device_may_wakeup() to decide what to do during a system sleep transition. |
174 | However for runtime power management, wakeup events should be enabled whenever | 182 | Device drivers, however, are not supposed to call device_set_wakeup_enable() |
175 | the device and driver both support them, regardless of the should_wakeup flag. | 183 | directly in any case. |
176 | 184 | ||
185 | It ought to be noted that system wakeup is conceptually different from "remote | ||
186 | wakeup" used by runtime power management, although it may be supported by the | ||
187 | same physical mechanism. Remote wakeup is a feature allowing devices in | ||
188 | low-power states to trigger specific interrupts to signal conditions in which | ||
189 | they should be put into the full-power state. Those interrupts may or may not | ||
190 | be used to signal system wakeup events, depending on the hardware design. On | ||
191 | some systems it is impossible to trigger them from system sleep states. In any | ||
192 | case, remote wakeup should always be enabled for runtime power management for | ||
193 | all devices and drivers that support it. | ||
177 | 194 | ||
178 | /sys/devices/.../power/control files | 195 | /sys/devices/.../power/control files |
179 | ------------------------------------ | 196 | ------------------------------------ |
@@ -249,20 +266,31 @@ for every device before the next phase begins. Not all busses or classes | |||
249 | support all these callbacks and not all drivers use all the callbacks. The | 266 | support all these callbacks and not all drivers use all the callbacks. The |
250 | various phases always run after tasks have been frozen and before they are | 267 | various phases always run after tasks have been frozen and before they are |
251 | unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have | 268 | unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have |
252 | been disabled (except for those marked with the IRQ_WAKEUP flag). | 269 | been disabled (except for those marked with the IRQF_NO_SUSPEND flag). |
253 | 270 | ||
254 | All phases use bus, type, or class callbacks (that is, methods defined in | 271 | All phases use PM domain, bus, type, or class callbacks (that is, methods |
255 | dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually | 272 | defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm). |
256 | exclusive, so if the device type provides a struct dev_pm_ops object pointed to | 273 | These callbacks are regarded by the PM core as mutually exclusive. Moreover, |
257 | by its pm field (i.e. both dev->type and dev->type->pm are defined), the | 274 | PM domain callbacks always take precedence over bus, type and class callbacks, |
258 | callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise, | 275 | while type callbacks take precedence over bus and class callbacks, and class |
259 | if the class provides a struct dev_pm_ops object pointed to by its pm field | 276 | callbacks take precedence over bus callbacks. To be precise, the following |
260 | (i.e. both dev->class and dev->class->pm are defined), the PM core will use the | 277 | rules are used to determine which callback to execute in the given phase: |
261 | callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of | 278 | |
262 | both the device type and class objects are NULL (or those objects do not exist), | 279 | 1. If dev->pm_domain is present, the PM core will attempt to execute the |
263 | the callbacks provided by the bus (that is, the callbacks from dev->bus->pm) | 280 | callback included in dev->pm_domain->ops. If that callback is not |
264 | will be used (this allows device types to override callbacks provided by bus | 281 | present, no action will be carried out for the given device. |
265 | types or classes if necessary). | 282 | |
283 | 2. Otherwise, if both dev->type and dev->type->pm are present, the callback | ||
284 | included in dev->type->pm will be executed. | ||
285 | |||
286 | 3. Otherwise, if both dev->class and dev->class->pm are present, the | ||
287 | callback included in dev->class->pm will be executed. | ||
288 | |||
289 | 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback | ||
290 | included in dev->bus->pm will be executed. | ||
291 | |||
292 | This allows PM domains and device types to override callbacks provided by bus | ||
293 | types or device classes if necessary. | ||
266 | 294 | ||
267 | These callbacks may in turn invoke device- or driver-specific methods stored in | 295 | These callbacks may in turn invoke device- or driver-specific methods stored in |
268 | dev->driver->pm, but they don't have to. | 296 | dev->driver->pm, but they don't have to. |
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are: | |||
283 | 311 | ||
284 | After the prepare callback method returns, no new children may be | 312 | After the prepare callback method returns, no new children may be |
285 | registered below the device. The method may also prepare the device or | 313 | registered below the device. The method may also prepare the device or |
286 | driver in some way for the upcoming system power transition (for | 314 | driver in some way for the upcoming system power transition, but it |
287 | example, by allocating additional memory required for this purpose), but | 315 | should not put the device into a low-power state. |
288 | it should not put the device into a low-power state. | ||
289 | 316 | ||
290 | 2. The suspend methods should quiesce the device to stop it from performing | 317 | 2. The suspend methods should quiesce the device to stop it from performing |
291 | I/O. They also may save the device registers and put it into the | 318 | I/O. They also may save the device registers and put it into the |
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 5336149f831b..c2ae8bf77d46 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
@@ -44,25 +44,33 @@ struct dev_pm_ops { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks | 46 | The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks |
47 | are executed by the PM core for either the power domain, or the device type | 47 | are executed by the PM core for the device's subsystem that may be either of |
48 | (if the device power domain's struct dev_pm_ops does not exist), or the class | 48 | the following: |
49 | (if the device power domain's and type's struct dev_pm_ops object does not | 49 | |
50 | exist), or the bus type (if the device power domain's, type's and class' | 50 | 1. PM domain of the device, if the device's PM domain object, dev->pm_domain, |
51 | struct dev_pm_ops objects do not exist) of the given device, so the priority | 51 | is present. |
52 | order of callbacks from high to low is that power domain callbacks, device | 52 | |
53 | type callbacks, class callbacks and bus type callbacks, and the high priority | 53 | 2. Device type of the device, if both dev->type and dev->type->pm are present. |
54 | one will take precedence over low priority one. The bus type, device type and | 54 | |
55 | class callbacks are referred to as subsystem-level callbacks in what follows, | 55 | 3. Device class of the device, if both dev->class and dev->class->pm are |
56 | and generally speaking, the power domain callbacks are used for representing | 56 | present. |
57 | power domains within a SoC. | 57 | |
58 | 4. Bus type of the device, if both dev->bus and dev->bus->pm are present. | ||
59 | |||
60 | The PM core always checks which callback to use in the order given above, so the | ||
61 | priority order of callbacks from high to low is: PM domain, device type, class | ||
62 | and bus type. Moreover, the high-priority one will always take precedence over | ||
63 | a low-priority one. The PM domain, bus type, device type and class callbacks | ||
64 | are referred to as subsystem-level callbacks in what follows. | ||
58 | 65 | ||
59 | By default, the callbacks are always invoked in process context with interrupts | 66 | By default, the callbacks are always invoked in process context with interrupts |
60 | enabled. However, subsystems can use the pm_runtime_irq_safe() helper function | 67 | enabled. However, subsystems can use the pm_runtime_irq_safe() helper function |
61 | to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume() | 68 | to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and |
62 | callbacks should be invoked in atomic context with interrupts disabled. | 69 | ->runtime_idle() callbacks may be invoked in atomic context with interrupts |
63 | This implies that these callback routines must not block or sleep, but it also | 70 | disabled for a given device. This implies that the callback routines in |
64 | means that the synchronous helper functions listed at the end of Section 4 can | 71 | question must not block or sleep, but it also means that the synchronous helper |
65 | be used within an interrupt handler or in an atomic context. | 72 | functions listed at the end of Section 4 may be used for that device within an |
73 | interrupt handler or generally in an atomic context. | ||
66 | 74 | ||
67 | The subsystem-level suspend callback is _entirely_ _responsible_ for handling | 75 | The subsystem-level suspend callback is _entirely_ _responsible_ for handling |
68 | the suspend of the device as appropriate, which may, but need not include | 76 | the suspend of the device as appropriate, which may, but need not include |
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt index 079cb3df62cf..41c8378c0b2f 100644 --- a/Documentation/serial/serial-rs485.txt +++ b/Documentation/serial/serial-rs485.txt | |||
@@ -97,15 +97,23 @@ | |||
97 | 97 | ||
98 | struct serial_rs485 rs485conf; | 98 | struct serial_rs485 rs485conf; |
99 | 99 | ||
100 | /* Set RS485 mode: */ | 100 | /* Enable RS485 mode: */ |
101 | rs485conf.flags |= SER_RS485_ENABLED; | 101 | rs485conf.flags |= SER_RS485_ENABLED; |
102 | 102 | ||
103 | /* Set logical level for RTS pin equal to 1 when sending: */ | ||
104 | rs485conf.flags |= SER_RS485_RTS_ON_SEND; | ||
105 | /* or, set logical level for RTS pin equal to 0 when sending: */ | ||
106 | rs485conf.flags &= ~(SER_RS485_RTS_ON_SEND); | ||
107 | |||
108 | /* Set logical level for RTS pin equal to 1 after sending: */ | ||
109 | rs485conf.flags |= SER_RS485_RTS_AFTER_SEND; | ||
110 | /* or, set logical level for RTS pin equal to 0 after sending: */ | ||
111 | rs485conf.flags &= ~(SER_RS485_RTS_AFTER_SEND); | ||
112 | |||
103 | /* Set rts delay before send, if needed: */ | 113 | /* Set rts delay before send, if needed: */ |
104 | rs485conf.flags |= SER_RS485_RTS_BEFORE_SEND; | ||
105 | rs485conf.delay_rts_before_send = ...; | 114 | rs485conf.delay_rts_before_send = ...; |
106 | 115 | ||
107 | /* Set rts delay after send, if needed: */ | 116 | /* Set rts delay after send, if needed: */ |
108 | rs485conf.flags |= SER_RS485_RTS_AFTER_SEND; | ||
109 | rs485conf.delay_rts_after_send = ...; | 117 | rs485conf.delay_rts_after_send = ...; |
110 | 118 | ||
111 | /* Set this flag if you want to receive data even whilst sending data */ | 119 | /* Set this flag if you want to receive data even whilst sending data */ |
diff --git a/MAINTAINERS b/MAINTAINERS index 717d9e959b15..c88eb7bb3a69 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -789,6 +789,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | |||
789 | S: Maintained | 789 | S: Maintained |
790 | T: git git://git.pengutronix.de/git/imx/linux-2.6.git | 790 | T: git git://git.pengutronix.de/git/imx/linux-2.6.git |
791 | F: arch/arm/mach-mx*/ | 791 | F: arch/arm/mach-mx*/ |
792 | F: arch/arm/mach-imx/ | ||
792 | F: arch/arm/plat-mxc/ | 793 | F: arch/arm/plat-mxc/ |
793 | 794 | ||
794 | ARM/FREESCALE IMX51 | 795 | ARM/FREESCALE IMX51 |
@@ -804,6 +805,13 @@ S: Maintained | |||
804 | T: git git://git.linaro.org/people/shawnguo/linux-2.6.git | 805 | T: git git://git.linaro.org/people/shawnguo/linux-2.6.git |
805 | F: arch/arm/mach-imx/*imx6* | 806 | F: arch/arm/mach-imx/*imx6* |
806 | 807 | ||
808 | ARM/FREESCALE MXS ARM ARCHITECTURE | ||
809 | M: Shawn Guo <shawn.guo@linaro.org> | ||
810 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
811 | S: Maintained | ||
812 | T: git git://git.linaro.org/people/shawnguo/linux-2.6.git | ||
813 | F: arch/arm/mach-mxs/ | ||
814 | |||
807 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT | 815 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT |
808 | M: Lennert Buytenhek <kernel@wantstofly.org> | 816 | M: Lennert Buytenhek <kernel@wantstofly.org> |
809 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 817 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1789,6 +1797,14 @@ F: include/net/cfg80211.h | |||
1789 | F: net/wireless/* | 1797 | F: net/wireless/* |
1790 | X: net/wireless/wext* | 1798 | X: net/wireless/wext* |
1791 | 1799 | ||
1800 | CHAR and MISC DRIVERS | ||
1801 | M: Arnd Bergmann <arnd@arndb.de> | ||
1802 | M: Greg Kroah-Hartman <greg@kroah.com> | ||
1803 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git | ||
1804 | S: Maintained | ||
1805 | F: drivers/char/* | ||
1806 | F: drivers/misc/* | ||
1807 | |||
1792 | CHECKPATCH | 1808 | CHECKPATCH |
1793 | M: Andy Whitcroft <apw@canonical.com> | 1809 | M: Andy Whitcroft <apw@canonical.com> |
1794 | S: Supported | 1810 | S: Supported |
@@ -3720,7 +3736,7 @@ F: fs/jbd2/ | |||
3720 | F: include/linux/jbd2.h | 3736 | F: include/linux/jbd2.h |
3721 | 3737 | ||
3722 | JSM Neo PCI based serial card | 3738 | JSM Neo PCI based serial card |
3723 | M: Breno Leitao <leitao@linux.vnet.ibm.com> | 3739 | M: Lucas Tavares <lucaskt@linux.vnet.ibm.com> |
3724 | L: linux-serial@vger.kernel.org | 3740 | L: linux-serial@vger.kernel.org |
3725 | S: Maintained | 3741 | S: Maintained |
3726 | F: drivers/tty/serial/jsm/ | 3742 | F: drivers/tty/serial/jsm/ |
@@ -5659,7 +5675,6 @@ F: drivers/media/video/*7146* | |||
5659 | F: include/media/*7146* | 5675 | F: include/media/*7146* |
5660 | 5676 | ||
5661 | SAMSUNG AUDIO (ASoC) DRIVERS | 5677 | SAMSUNG AUDIO (ASoC) DRIVERS |
5662 | M: Jassi Brar <jassisinghbrar@gmail.com> | ||
5663 | M: Sangbeom Kim <sbkim73@samsung.com> | 5678 | M: Sangbeom Kim <sbkim73@samsung.com> |
5664 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 5679 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
5665 | S: Supported | 5680 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 2 | 2 | PATCHLEVEL = 2 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Saber-toothed Squirrel | 5 | NAME = Saber-toothed Squirrel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 44789eff983f..e084b7e981e8 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1231,7 +1231,7 @@ config ARM_ERRATA_742231 | |||
1231 | capabilities of the processor. | 1231 | capabilities of the processor. |
1232 | 1232 | ||
1233 | config PL310_ERRATA_588369 | 1233 | config PL310_ERRATA_588369 |
1234 | bool "Clean & Invalidate maintenance operations do not invalidate clean lines" | 1234 | bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" |
1235 | depends on CACHE_L2X0 | 1235 | depends on CACHE_L2X0 |
1236 | help | 1236 | help |
1237 | The PL310 L2 cache controller implements three types of Clean & | 1237 | The PL310 L2 cache controller implements three types of Clean & |
@@ -1256,7 +1256,7 @@ config ARM_ERRATA_720789 | |||
1256 | entries regardless of the ASID. | 1256 | entries regardless of the ASID. |
1257 | 1257 | ||
1258 | config PL310_ERRATA_727915 | 1258 | config PL310_ERRATA_727915 |
1259 | bool "Background Clean & Invalidate by Way operation can cause data corruption" | 1259 | bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" |
1260 | depends on CACHE_L2X0 | 1260 | depends on CACHE_L2X0 |
1261 | help | 1261 | help |
1262 | PL310 implements the Clean & Invalidate by Way L2 cache maintenance | 1262 | PL310 implements the Clean & Invalidate by Way L2 cache maintenance |
@@ -1289,8 +1289,8 @@ config ARM_ERRATA_751472 | |||
1289 | operation is received by a CPU before the ICIALLUIS has completed, | 1289 | operation is received by a CPU before the ICIALLUIS has completed, |
1290 | potentially leading to corrupted entries in the cache or TLB. | 1290 | potentially leading to corrupted entries in the cache or TLB. |
1291 | 1291 | ||
1292 | config ARM_ERRATA_753970 | 1292 | config PL310_ERRATA_753970 |
1293 | bool "ARM errata: cache sync operation may be faulty" | 1293 | bool "PL310 errata: cache sync operation may be faulty" |
1294 | depends on CACHE_PL310 | 1294 | depends on CACHE_PL310 |
1295 | help | 1295 | help |
1296 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. | 1296 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. |
@@ -1352,6 +1352,18 @@ config ARM_ERRATA_764369 | |||
1352 | relevant cache maintenance functions and sets a specific bit | 1352 | relevant cache maintenance functions and sets a specific bit |
1353 | in the diagnostic control register of the SCU. | 1353 | in the diagnostic control register of the SCU. |
1354 | 1354 | ||
1355 | config PL310_ERRATA_769419 | ||
1356 | bool "PL310 errata: no automatic Store Buffer drain" | ||
1357 | depends on CACHE_L2X0 | ||
1358 | help | ||
1359 | On revisions of the PL310 prior to r3p2, the Store Buffer does | ||
1360 | not automatically drain. This can cause normal, non-cacheable | ||
1361 | writes to be retained when the memory system is idle, leading | ||
1362 | to suboptimal I/O performance for drivers using coherent DMA. | ||
1363 | This option adds a write barrier to the cpu_idle loop so that, | ||
1364 | on systems with an outer cache, the store buffer is drained | ||
1365 | explicitly. | ||
1366 | |||
1355 | endmenu | 1367 | endmenu |
1356 | 1368 | ||
1357 | source "arch/arm/common/Kconfig" | 1369 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 0e6ae470c94f..410a546060a2 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic) | |||
526 | sizeof(u32)); | 526 | sizeof(u32)); |
527 | BUG_ON(!gic->saved_ppi_conf); | 527 | BUG_ON(!gic->saved_ppi_conf); |
528 | 528 | ||
529 | cpu_pm_register_notifier(&gic_notifier_block); | 529 | if (gic == &gic_data[0]) |
530 | cpu_pm_register_notifier(&gic_notifier_block); | ||
530 | } | 531 | } |
531 | #else | 532 | #else |
532 | static void __init gic_pm_init(struct gic_chip_data *gic) | 533 | static void __init gic_pm_init(struct gic_chip_data *gic) |
@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start, | |||
581 | * For primary GICs, skip over SGIs. | 582 | * For primary GICs, skip over SGIs. |
582 | * For secondary GICs, skip over PPIs, too. | 583 | * For secondary GICs, skip over PPIs, too. |
583 | */ | 584 | */ |
585 | domain->hwirq_base = 32; | ||
584 | if (gic_nr == 0) { | 586 | if (gic_nr == 0) { |
585 | gic_cpu_base_addr = cpu_base; | 587 | gic_cpu_base_addr = cpu_base; |
586 | domain->hwirq_base = 16; | 588 | |
587 | if (irq_start > 0) | 589 | if ((irq_start & 31) > 0) { |
588 | irq_start = (irq_start & ~31) + 16; | 590 | domain->hwirq_base = 16; |
589 | } else | 591 | if (irq_start != -1) |
590 | domain->hwirq_base = 32; | 592 | irq_start = (irq_start & ~31) + 16; |
593 | } | ||
594 | } | ||
591 | 595 | ||
592 | /* | 596 | /* |
593 | * Find out how many interrupts are supported. | 597 | * Find out how many interrupts are supported. |
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c index 7129cfbdacd6..f407a6b35d3d 100644 --- a/arch/arm/common/pl330.c +++ b/arch/arm/common/pl330.c | |||
@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | |||
1211 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | 1211 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); |
1212 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | 1212 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); |
1213 | 1213 | ||
1214 | ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT); | 1214 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); |
1215 | ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT); | 1215 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); |
1216 | 1216 | ||
1217 | ccr |= (rqc->swap << CC_SWAP_SHFT); | 1217 | ccr |= (rqc->swap << CC_SWAP_SHFT); |
1218 | 1218 | ||
@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd) | |||
1623 | return -1; | 1623 | return -1; |
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | static bool _chan_ns(const struct pl330_info *pi, int i) | ||
1627 | { | ||
1628 | return pi->pcfg.irq_ns & (1 << i); | ||
1629 | } | ||
1630 | |||
1626 | /* Upon success, returns IdentityToken for the | 1631 | /* Upon success, returns IdentityToken for the |
1627 | * allocated channel, NULL otherwise. | 1632 | * allocated channel, NULL otherwise. |
1628 | */ | 1633 | */ |
@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi) | |||
1647 | 1652 | ||
1648 | for (i = 0; i < chans; i++) { | 1653 | for (i = 0; i < chans; i++) { |
1649 | thrd = &pl330->channels[i]; | 1654 | thrd = &pl330->channels[i]; |
1650 | if (thrd->free) { | 1655 | if ((thrd->free) && (!_manager_ns(thrd) || |
1656 | _chan_ns(pi, i))) { | ||
1651 | thrd->ev = _alloc_event(thrd); | 1657 | thrd->ev = _alloc_event(thrd); |
1652 | if (thrd->ev >= 0) { | 1658 | if (thrd->ev >= 0) { |
1653 | thrd->free = false; | 1659 | thrd->free = false; |
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9_defconfig index ffb1edd93363..8826eb218e73 100644 --- a/arch/arm/configs/at91cap9adk_defconfig +++ b/arch/arm/configs/at91cap9_defconfig | |||
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y | |||
38 | # CONFIG_IPV6 is not set | 38 | # CONFIG_IPV6 is not set |
39 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 39 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
40 | CONFIG_MTD=y | 40 | CONFIG_MTD=y |
41 | CONFIG_MTD_PARTITIONS=y | ||
42 | CONFIG_MTD_CMDLINE_PARTS=y | 41 | CONFIG_MTD_CMDLINE_PARTS=y |
43 | CONFIG_MTD_CHAR=y | 42 | CONFIG_MTD_CHAR=y |
44 | CONFIG_MTD_BLOCK=y | 43 | CONFIG_MTD_BLOCK=y |
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y | |||
52 | CONFIG_BLK_DEV_LOOP=y | 51 | CONFIG_BLK_DEV_LOOP=y |
53 | CONFIG_BLK_DEV_RAM=y | 52 | CONFIG_BLK_DEV_RAM=y |
54 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 53 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
55 | CONFIG_ATMEL_SSC=y | ||
56 | CONFIG_SCSI=y | 54 | CONFIG_SCSI=y |
57 | CONFIG_BLK_DEV_SD=y | 55 | CONFIG_BLK_DEV_SD=y |
58 | CONFIG_SCSI_MULTI_LUN=y | 56 | CONFIG_SCSI_MULTI_LUN=y |
59 | CONFIG_NETDEVICES=y | 57 | CONFIG_NETDEVICES=y |
60 | CONFIG_NET_ETHERNET=y | ||
61 | CONFIG_MII=y | 58 | CONFIG_MII=y |
62 | CONFIG_MACB=y | 59 | CONFIG_MACB=y |
63 | # CONFIG_NETDEV_1000 is not set | ||
64 | # CONFIG_NETDEV_10000 is not set | ||
65 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 60 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
66 | CONFIG_INPUT_EVDEV=y | 61 | CONFIG_INPUT_EVDEV=y |
67 | # CONFIG_INPUT_KEYBOARD is not set | 62 | # CONFIG_INPUT_KEYBOARD is not set |
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y | |||
81 | CONFIG_WATCHDOG_NOWAYOUT=y | 76 | CONFIG_WATCHDOG_NOWAYOUT=y |
82 | CONFIG_FB=y | 77 | CONFIG_FB=y |
83 | CONFIG_FB_ATMEL=y | 78 | CONFIG_FB_ATMEL=y |
84 | # CONFIG_VGA_CONSOLE is not set | ||
85 | CONFIG_LOGO=y | 79 | CONFIG_LOGO=y |
86 | # CONFIG_LOGO_LINUX_MONO is not set | 80 | # CONFIG_LOGO_LINUX_MONO is not set |
87 | # CONFIG_LOGO_LINUX_CLUT224 is not set | 81 | # CONFIG_LOGO_LINUX_CLUT224 is not set |
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m | |||
99 | CONFIG_RTC_CLASS=y | 93 | CONFIG_RTC_CLASS=y |
100 | CONFIG_RTC_DRV_AT91SAM9=y | 94 | CONFIG_RTC_DRV_AT91SAM9=y |
101 | CONFIG_EXT2_FS=y | 95 | CONFIG_EXT2_FS=y |
102 | CONFIG_INOTIFY=y | ||
103 | CONFIG_VFAT_FS=y | 96 | CONFIG_VFAT_FS=y |
104 | CONFIG_TMPFS=y | 97 | CONFIG_TMPFS=y |
105 | CONFIG_JFFS2_FS=y | 98 | CONFIG_JFFS2_FS=y |
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig index 38cb7c985426..bbe4e1a1f5d8 100644 --- a/arch/arm/configs/at91rm9200_defconfig +++ b/arch/arm/configs/at91rm9200_defconfig | |||
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y | |||
5 | CONFIG_IKCONFIG=y | 5 | CONFIG_IKCONFIG=y |
6 | CONFIG_IKCONFIG_PROC=y | 6 | CONFIG_IKCONFIG_PROC=y |
7 | CONFIG_LOG_BUF_SHIFT=14 | 7 | CONFIG_LOG_BUF_SHIFT=14 |
8 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
9 | CONFIG_BLK_DEV_INITRD=y | 8 | CONFIG_BLK_DEV_INITRD=y |
10 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
11 | CONFIG_MODULE_FORCE_LOAD=y | 10 | CONFIG_MODULE_FORCE_LOAD=y |
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y | |||
56 | CONFIG_IP_PNP_DHCP=y | 55 | CONFIG_IP_PNP_DHCP=y |
57 | CONFIG_IP_PNP_BOOTP=y | 56 | CONFIG_IP_PNP_BOOTP=y |
58 | CONFIG_NET_IPIP=m | 57 | CONFIG_NET_IPIP=m |
59 | CONFIG_NET_IPGRE=m | ||
60 | CONFIG_INET_AH=m | 58 | CONFIG_INET_AH=m |
61 | CONFIG_INET_ESP=m | 59 | CONFIG_INET_ESP=m |
62 | CONFIG_INET_IPCOMP=m | 60 | CONFIG_INET_IPCOMP=m |
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m | |||
75 | CONFIG_BRIDGE=m | 73 | CONFIG_BRIDGE=m |
76 | CONFIG_VLAN_8021Q=m | 74 | CONFIG_VLAN_8021Q=m |
77 | CONFIG_BT=m | 75 | CONFIG_BT=m |
78 | CONFIG_BT_L2CAP=m | ||
79 | CONFIG_BT_SCO=m | ||
80 | CONFIG_BT_RFCOMM=m | ||
81 | CONFIG_BT_RFCOMM_TTY=y | ||
82 | CONFIG_BT_BNEP=m | ||
83 | CONFIG_BT_BNEP_MC_FILTER=y | ||
84 | CONFIG_BT_BNEP_PROTO_FILTER=y | ||
85 | CONFIG_BT_HIDP=m | ||
86 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 76 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
87 | CONFIG_MTD=y | 77 | CONFIG_MTD=y |
88 | CONFIG_MTD_CONCAT=y | ||
89 | CONFIG_MTD_PARTITIONS=y | ||
90 | CONFIG_MTD_CMDLINE_PARTS=y | 78 | CONFIG_MTD_CMDLINE_PARTS=y |
91 | CONFIG_MTD_AFS_PARTS=y | 79 | CONFIG_MTD_AFS_PARTS=y |
92 | CONFIG_MTD_CHAR=y | 80 | CONFIG_MTD_CHAR=y |
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y | |||
108 | CONFIG_BLK_DEV_NBD=y | 96 | CONFIG_BLK_DEV_NBD=y |
109 | CONFIG_BLK_DEV_RAM=y | 97 | CONFIG_BLK_DEV_RAM=y |
110 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 98 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
111 | CONFIG_ATMEL_TCLIB=y | ||
112 | CONFIG_EEPROM_LEGACY=m | ||
113 | CONFIG_SCSI=y | 99 | CONFIG_SCSI=y |
114 | CONFIG_BLK_DEV_SD=y | 100 | CONFIG_BLK_DEV_SD=y |
115 | CONFIG_BLK_DEV_SR=m | 101 | CONFIG_BLK_DEV_SR=m |
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y | |||
119 | # CONFIG_SCSI_LOWLEVEL is not set | 105 | # CONFIG_SCSI_LOWLEVEL is not set |
120 | CONFIG_NETDEVICES=y | 106 | CONFIG_NETDEVICES=y |
121 | CONFIG_TUN=m | 107 | CONFIG_TUN=m |
108 | CONFIG_ARM_AT91_ETHER=y | ||
122 | CONFIG_PHYLIB=y | 109 | CONFIG_PHYLIB=y |
123 | CONFIG_DAVICOM_PHY=y | 110 | CONFIG_DAVICOM_PHY=y |
124 | CONFIG_SMSC_PHY=y | 111 | CONFIG_SMSC_PHY=y |
125 | CONFIG_MICREL_PHY=y | 112 | CONFIG_MICREL_PHY=y |
126 | CONFIG_NET_ETHERNET=y | 113 | CONFIG_PPP=y |
127 | CONFIG_ARM_AT91_ETHER=y | 114 | CONFIG_PPP_BSDCOMP=y |
128 | # CONFIG_NETDEV_1000 is not set | 115 | CONFIG_PPP_DEFLATE=y |
129 | # CONFIG_NETDEV_10000 is not set | 116 | CONFIG_PPP_FILTER=y |
117 | CONFIG_PPP_MPPE=m | ||
118 | CONFIG_PPP_MULTILINK=y | ||
119 | CONFIG_PPPOE=m | ||
120 | CONFIG_PPP_ASYNC=y | ||
121 | CONFIG_SLIP=m | ||
122 | CONFIG_SLIP_COMPRESSED=y | ||
123 | CONFIG_SLIP_SMART=y | ||
124 | CONFIG_SLIP_MODE_SLIP6=y | ||
130 | CONFIG_USB_CATC=m | 125 | CONFIG_USB_CATC=m |
131 | CONFIG_USB_KAWETH=m | 126 | CONFIG_USB_KAWETH=m |
132 | CONFIG_USB_PEGASUS=m | 127 | CONFIG_USB_PEGASUS=m |
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m | |||
139 | CONFIG_USB_ALI_M5632=y | 134 | CONFIG_USB_ALI_M5632=y |
140 | CONFIG_USB_AN2720=y | 135 | CONFIG_USB_AN2720=y |
141 | CONFIG_USB_EPSON2888=y | 136 | CONFIG_USB_EPSON2888=y |
142 | CONFIG_PPP=y | ||
143 | CONFIG_PPP_MULTILINK=y | ||
144 | CONFIG_PPP_FILTER=y | ||
145 | CONFIG_PPP_ASYNC=y | ||
146 | CONFIG_PPP_DEFLATE=y | ||
147 | CONFIG_PPP_BSDCOMP=y | ||
148 | CONFIG_PPP_MPPE=m | ||
149 | CONFIG_PPPOE=m | ||
150 | CONFIG_SLIP=m | ||
151 | CONFIG_SLIP_COMPRESSED=y | ||
152 | CONFIG_SLIP_SMART=y | ||
153 | CONFIG_SLIP_MODE_SLIP6=y | ||
154 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 137 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
155 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=640 | 138 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=640 |
156 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 | 139 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 |
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y | |||
158 | CONFIG_KEYBOARD_GPIO=y | 141 | CONFIG_KEYBOARD_GPIO=y |
159 | # CONFIG_INPUT_MOUSE is not set | 142 | # CONFIG_INPUT_MOUSE is not set |
160 | CONFIG_INPUT_TOUCHSCREEN=y | 143 | CONFIG_INPUT_TOUCHSCREEN=y |
144 | CONFIG_LEGACY_PTY_COUNT=32 | ||
161 | CONFIG_SERIAL_ATMEL=y | 145 | CONFIG_SERIAL_ATMEL=y |
162 | CONFIG_SERIAL_ATMEL_CONSOLE=y | 146 | CONFIG_SERIAL_ATMEL_CONSOLE=y |
163 | CONFIG_LEGACY_PTY_COUNT=32 | ||
164 | CONFIG_HW_RANDOM=y | 147 | CONFIG_HW_RANDOM=y |
165 | CONFIG_I2C=y | 148 | CONFIG_I2C=y |
166 | CONFIG_I2C_CHARDEV=y | 149 | CONFIG_I2C_CHARDEV=y |
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y | |||
290 | CONFIG_NFS_V4=y | 273 | CONFIG_NFS_V4=y |
291 | CONFIG_ROOT_NFS=y | 274 | CONFIG_ROOT_NFS=y |
292 | CONFIG_NFSD=y | 275 | CONFIG_NFSD=y |
293 | CONFIG_SMB_FS=m | ||
294 | CONFIG_CIFS=m | 276 | CONFIG_CIFS=m |
295 | CONFIG_PARTITION_ADVANCED=y | 277 | CONFIG_PARTITION_ADVANCED=y |
296 | CONFIG_MAC_PARTITION=y | 278 | CONFIG_MAC_PARTITION=y |
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y | |||
335 | CONFIG_MAGIC_SYSRQ=y | 317 | CONFIG_MAGIC_SYSRQ=y |
336 | CONFIG_DEBUG_FS=y | 318 | CONFIG_DEBUG_FS=y |
337 | CONFIG_DEBUG_KERNEL=y | 319 | CONFIG_DEBUG_KERNEL=y |
338 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
339 | # CONFIG_FTRACE is not set | 320 | # CONFIG_FTRACE is not set |
340 | CONFIG_CRYPTO_PCBC=y | 321 | CONFIG_CRYPTO_PCBC=y |
341 | CONFIG_CRYPTO_SHA1=y | 322 | CONFIG_CRYPTO_SHA1=y |
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260_defconfig index f8a9226413bf..505b3765f87e 100644 --- a/arch/arm/configs/at91sam9260ek_defconfig +++ b/arch/arm/configs/at91sam9260_defconfig | |||
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y | |||
12 | # CONFIG_IOSCHED_CFQ is not set | 12 | # CONFIG_IOSCHED_CFQ is not set |
13 | CONFIG_ARCH_AT91=y | 13 | CONFIG_ARCH_AT91=y |
14 | CONFIG_ARCH_AT91SAM9260=y | 14 | CONFIG_ARCH_AT91SAM9260=y |
15 | CONFIG_ARCH_AT91SAM9260_SAM9XE=y | ||
15 | CONFIG_MACH_AT91SAM9260EK=y | 16 | CONFIG_MACH_AT91SAM9260EK=y |
17 | CONFIG_MACH_CAM60=y | ||
18 | CONFIG_MACH_SAM9_L9260=y | ||
19 | CONFIG_MACH_AFEB9260=y | ||
20 | CONFIG_MACH_USB_A9260=y | ||
21 | CONFIG_MACH_QIL_A9260=y | ||
22 | CONFIG_MACH_CPU9260=y | ||
23 | CONFIG_MACH_FLEXIBITY=y | ||
24 | CONFIG_MACH_SNAPPER_9260=y | ||
25 | CONFIG_MACH_AT91SAM_DT=y | ||
16 | CONFIG_AT91_PROGRAMMABLE_CLOCKS=y | 26 | CONFIG_AT91_PROGRAMMABLE_CLOCKS=y |
17 | # CONFIG_ARM_THUMB is not set | 27 | # CONFIG_ARM_THUMB is not set |
18 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 28 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
19 | CONFIG_ZBOOT_ROM_BSS=0x0 | 29 | CONFIG_ZBOOT_ROM_BSS=0x0 |
30 | CONFIG_ARM_APPENDED_DTB=y | ||
31 | CONFIG_ARM_ATAG_DTB_COMPAT=y | ||
20 | CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" | 32 | CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" |
21 | CONFIG_FPE_NWFPE=y | 33 | CONFIG_FPE_NWFPE=y |
22 | CONFIG_NET=y | 34 | CONFIG_NET=y |
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y | |||
33 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 45 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
34 | CONFIG_BLK_DEV_RAM=y | 46 | CONFIG_BLK_DEV_RAM=y |
35 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 47 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
36 | CONFIG_ATMEL_SSC=y | ||
37 | CONFIG_SCSI=y | 48 | CONFIG_SCSI=y |
38 | CONFIG_BLK_DEV_SD=y | 49 | CONFIG_BLK_DEV_SD=y |
39 | CONFIG_SCSI_MULTI_LUN=y | 50 | CONFIG_SCSI_MULTI_LUN=y |
40 | CONFIG_NETDEVICES=y | 51 | CONFIG_NETDEVICES=y |
41 | CONFIG_NET_ETHERNET=y | ||
42 | CONFIG_MII=y | 52 | CONFIG_MII=y |
43 | CONFIG_MACB=y | 53 | CONFIG_MACB=y |
44 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 54 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y | |||
55 | CONFIG_WATCHDOG=y | 65 | CONFIG_WATCHDOG=y |
56 | CONFIG_WATCHDOG_NOWAYOUT=y | 66 | CONFIG_WATCHDOG_NOWAYOUT=y |
57 | CONFIG_AT91SAM9X_WATCHDOG=y | 67 | CONFIG_AT91SAM9X_WATCHDOG=y |
58 | # CONFIG_VGA_CONSOLE is not set | ||
59 | # CONFIG_USB_HID is not set | 68 | # CONFIG_USB_HID is not set |
60 | CONFIG_USB=y | 69 | CONFIG_USB=y |
61 | CONFIG_USB_DEVICEFS=y | 70 | CONFIG_USB_DEVICEFS=y |
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m | |||
71 | CONFIG_RTC_CLASS=y | 80 | CONFIG_RTC_CLASS=y |
72 | CONFIG_RTC_DRV_AT91SAM9=y | 81 | CONFIG_RTC_DRV_AT91SAM9=y |
73 | CONFIG_EXT2_FS=y | 82 | CONFIG_EXT2_FS=y |
74 | CONFIG_INOTIFY=y | ||
75 | CONFIG_VFAT_FS=y | 83 | CONFIG_VFAT_FS=y |
76 | CONFIG_TMPFS=y | 84 | CONFIG_TMPFS=y |
77 | CONFIG_CRAMFS=y | 85 | CONFIG_CRAMFS=y |
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20_defconfig index 9e90e6d79297..9123568d9a8d 100644 --- a/arch/arm/configs/at91sam9g20ek_defconfig +++ b/arch/arm/configs/at91sam9g20_defconfig | |||
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y | |||
14 | CONFIG_ARCH_AT91SAM9G20=y | 14 | CONFIG_ARCH_AT91SAM9G20=y |
15 | CONFIG_MACH_AT91SAM9G20EK=y | 15 | CONFIG_MACH_AT91SAM9G20EK=y |
16 | CONFIG_MACH_AT91SAM9G20EK_2MMC=y | 16 | CONFIG_MACH_AT91SAM9G20EK_2MMC=y |
17 | CONFIG_MACH_CPU9G20=y | ||
18 | CONFIG_MACH_ACMENETUSFOXG20=y | ||
19 | CONFIG_MACH_PORTUXG20=y | ||
20 | CONFIG_MACH_STAMP9G20=y | ||
21 | CONFIG_MACH_PCONTROL_G20=y | ||
22 | CONFIG_MACH_GSIA18S=y | ||
23 | CONFIG_MACH_USB_A9G20=y | ||
24 | CONFIG_MACH_SNAPPER_9260=y | ||
25 | CONFIG_MACH_AT91SAM_DT=y | ||
17 | CONFIG_AT91_PROGRAMMABLE_CLOCKS=y | 26 | CONFIG_AT91_PROGRAMMABLE_CLOCKS=y |
18 | # CONFIG_ARM_THUMB is not set | 27 | # CONFIG_ARM_THUMB is not set |
19 | CONFIG_AEABI=y | 28 | CONFIG_AEABI=y |
@@ -21,9 +30,10 @@ CONFIG_LEDS=y | |||
21 | CONFIG_LEDS_CPU=y | 30 | CONFIG_LEDS_CPU=y |
22 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 31 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
23 | CONFIG_ZBOOT_ROM_BSS=0x0 | 32 | CONFIG_ZBOOT_ROM_BSS=0x0 |
33 | CONFIG_ARM_APPENDED_DTB=y | ||
34 | CONFIG_ARM_ATAG_DTB_COMPAT=y | ||
24 | CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" | 35 | CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" |
25 | CONFIG_FPE_NWFPE=y | 36 | CONFIG_FPE_NWFPE=y |
26 | CONFIG_PM=y | ||
27 | CONFIG_NET=y | 37 | CONFIG_NET=y |
28 | CONFIG_PACKET=y | 38 | CONFIG_PACKET=y |
29 | CONFIG_UNIX=y | 39 | CONFIG_UNIX=y |
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y | |||
37 | # CONFIG_IPV6 is not set | 47 | # CONFIG_IPV6 is not set |
38 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 48 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
39 | CONFIG_MTD=y | 49 | CONFIG_MTD=y |
40 | CONFIG_MTD_CONCAT=y | ||
41 | CONFIG_MTD_PARTITIONS=y | ||
42 | CONFIG_MTD_CMDLINE_PARTS=y | 50 | CONFIG_MTD_CMDLINE_PARTS=y |
43 | CONFIG_MTD_CHAR=y | 51 | CONFIG_MTD_CHAR=y |
44 | CONFIG_MTD_BLOCK=y | 52 | CONFIG_MTD_BLOCK=y |
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y | |||
48 | CONFIG_BLK_DEV_LOOP=y | 56 | CONFIG_BLK_DEV_LOOP=y |
49 | CONFIG_BLK_DEV_RAM=y | 57 | CONFIG_BLK_DEV_RAM=y |
50 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 58 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
51 | CONFIG_ATMEL_SSC=y | ||
52 | CONFIG_SCSI=y | 59 | CONFIG_SCSI=y |
53 | CONFIG_BLK_DEV_SD=y | 60 | CONFIG_BLK_DEV_SD=y |
54 | CONFIG_SCSI_MULTI_LUN=y | 61 | CONFIG_SCSI_MULTI_LUN=y |
55 | # CONFIG_SCSI_LOWLEVEL is not set | 62 | # CONFIG_SCSI_LOWLEVEL is not set |
56 | CONFIG_NETDEVICES=y | 63 | CONFIG_NETDEVICES=y |
57 | CONFIG_NET_ETHERNET=y | ||
58 | CONFIG_MII=y | 64 | CONFIG_MII=y |
59 | CONFIG_MACB=y | 65 | CONFIG_MACB=y |
60 | # CONFIG_NETDEV_1000 is not set | ||
61 | # CONFIG_NETDEV_10000 is not set | ||
62 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 66 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
63 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=320 | 67 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=320 |
64 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240 | 68 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240 |
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y | |||
66 | # CONFIG_KEYBOARD_ATKBD is not set | 70 | # CONFIG_KEYBOARD_ATKBD is not set |
67 | CONFIG_KEYBOARD_GPIO=y | 71 | CONFIG_KEYBOARD_GPIO=y |
68 | # CONFIG_INPUT_MOUSE is not set | 72 | # CONFIG_INPUT_MOUSE is not set |
73 | CONFIG_LEGACY_PTY_COUNT=16 | ||
69 | CONFIG_SERIAL_ATMEL=y | 74 | CONFIG_SERIAL_ATMEL=y |
70 | CONFIG_SERIAL_ATMEL_CONSOLE=y | 75 | CONFIG_SERIAL_ATMEL_CONSOLE=y |
71 | CONFIG_LEGACY_PTY_COUNT=16 | ||
72 | CONFIG_HW_RANDOM=y | 76 | CONFIG_HW_RANDOM=y |
73 | CONFIG_SPI=y | 77 | CONFIG_SPI=y |
74 | CONFIG_SPI_ATMEL=y | 78 | CONFIG_SPI_ATMEL=y |
75 | CONFIG_SPI_SPIDEV=y | 79 | CONFIG_SPI_SPIDEV=y |
76 | # CONFIG_HWMON is not set | 80 | # CONFIG_HWMON is not set |
77 | # CONFIG_VGA_CONSOLE is not set | ||
78 | CONFIG_SOUND=y | 81 | CONFIG_SOUND=y |
79 | CONFIG_SND=y | 82 | CONFIG_SND=y |
80 | CONFIG_SND_SEQUENCER=y | 83 | CONFIG_SND_SEQUENCER=y |
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y | |||
82 | CONFIG_SND_PCM_OSS=y | 85 | CONFIG_SND_PCM_OSS=y |
83 | CONFIG_SND_SEQUENCER_OSS=y | 86 | CONFIG_SND_SEQUENCER_OSS=y |
84 | # CONFIG_SND_VERBOSE_PROCFS is not set | 87 | # CONFIG_SND_VERBOSE_PROCFS is not set |
85 | CONFIG_SND_AT73C213=y | ||
86 | CONFIG_USB=y | 88 | CONFIG_USB=y |
87 | CONFIG_USB_DEVICEFS=y | 89 | CONFIG_USB_DEVICEFS=y |
88 | # CONFIG_USB_DEVICE_CLASS is not set | 90 | # CONFIG_USB_DEVICE_CLASS is not set |
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y | |||
105 | CONFIG_RTC_CLASS=y | 107 | CONFIG_RTC_CLASS=y |
106 | CONFIG_RTC_DRV_AT91SAM9=y | 108 | CONFIG_RTC_DRV_AT91SAM9=y |
107 | CONFIG_EXT2_FS=y | 109 | CONFIG_EXT2_FS=y |
108 | CONFIG_INOTIFY=y | ||
109 | CONFIG_MSDOS_FS=y | 110 | CONFIG_MSDOS_FS=y |
110 | CONFIG_VFAT_FS=y | 111 | CONFIG_VFAT_FS=y |
111 | CONFIG_TMPFS=y | 112 | CONFIG_TMPFS=y |
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig index c5876d244f4b..606d48f3b8f8 100644 --- a/arch/arm/configs/at91sam9g45_defconfig +++ b/arch/arm/configs/at91sam9g45_defconfig | |||
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y | |||
18 | CONFIG_ARCH_AT91=y | 18 | CONFIG_ARCH_AT91=y |
19 | CONFIG_ARCH_AT91SAM9G45=y | 19 | CONFIG_ARCH_AT91SAM9G45=y |
20 | CONFIG_MACH_AT91SAM9M10G45EK=y | 20 | CONFIG_MACH_AT91SAM9M10G45EK=y |
21 | CONFIG_MACH_AT91SAM_DT=y | ||
21 | CONFIG_AT91_PROGRAMMABLE_CLOCKS=y | 22 | CONFIG_AT91_PROGRAMMABLE_CLOCKS=y |
22 | CONFIG_AT91_SLOW_CLOCK=y | 23 | CONFIG_AT91_SLOW_CLOCK=y |
23 | CONFIG_AEABI=y | 24 | CONFIG_AEABI=y |
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y | |||
73 | # CONFIG_SCSI_LOWLEVEL is not set | 74 | # CONFIG_SCSI_LOWLEVEL is not set |
74 | CONFIG_NETDEVICES=y | 75 | CONFIG_NETDEVICES=y |
75 | CONFIG_MII=y | 76 | CONFIG_MII=y |
76 | CONFIG_DAVICOM_PHY=y | ||
77 | CONFIG_NET_ETHERNET=y | ||
78 | CONFIG_MACB=y | 77 | CONFIG_MACB=y |
79 | # CONFIG_NETDEV_1000 is not set | 78 | CONFIG_DAVICOM_PHY=y |
80 | # CONFIG_NETDEV_10000 is not set | ||
81 | CONFIG_LIBERTAS_THINFIRM=m | 79 | CONFIG_LIBERTAS_THINFIRM=m |
82 | CONFIG_LIBERTAS_THINFIRM_USB=m | 80 | CONFIG_LIBERTAS_THINFIRM_USB=m |
83 | CONFIG_AT76C50X_USB=m | 81 | CONFIG_AT76C50X_USB=m |
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y | |||
131 | CONFIG_SPI=y | 129 | CONFIG_SPI=y |
132 | CONFIG_SPI_ATMEL=y | 130 | CONFIG_SPI_ATMEL=y |
133 | # CONFIG_HWMON is not set | 131 | # CONFIG_HWMON is not set |
134 | # CONFIG_MFD_SUPPORT is not set | ||
135 | CONFIG_FB=y | 132 | CONFIG_FB=y |
136 | CONFIG_FB_ATMEL=y | 133 | CONFIG_FB_ATMEL=y |
137 | CONFIG_FB_UDL=m | 134 | CONFIG_FB_UDL=m |
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rl_defconfig index 75621e4d03fc..ad562ee64209 100644 --- a/arch/arm/configs/at91sam9rlek_defconfig +++ b/arch/arm/configs/at91sam9rl_defconfig | |||
@@ -23,8 +23,6 @@ CONFIG_NET=y | |||
23 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
24 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 24 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
25 | CONFIG_MTD=y | 25 | CONFIG_MTD=y |
26 | CONFIG_MTD_CONCAT=y | ||
27 | CONFIG_MTD_PARTITIONS=y | ||
28 | CONFIG_MTD_CMDLINE_PARTS=y | 26 | CONFIG_MTD_CMDLINE_PARTS=y |
29 | CONFIG_MTD_CHAR=y | 27 | CONFIG_MTD_CHAR=y |
30 | CONFIG_MTD_BLOCK=y | 28 | CONFIG_MTD_BLOCK=y |
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y | |||
35 | CONFIG_BLK_DEV_RAM=y | 33 | CONFIG_BLK_DEV_RAM=y |
36 | CONFIG_BLK_DEV_RAM_COUNT=4 | 34 | CONFIG_BLK_DEV_RAM_COUNT=4 |
37 | CONFIG_BLK_DEV_RAM_SIZE=24576 | 35 | CONFIG_BLK_DEV_RAM_SIZE=24576 |
38 | CONFIG_ATMEL_SSC=y | ||
39 | CONFIG_SCSI=y | 36 | CONFIG_SCSI=y |
40 | CONFIG_BLK_DEV_SD=y | 37 | CONFIG_BLK_DEV_SD=y |
41 | CONFIG_SCSI_MULTI_LUN=y | 38 | CONFIG_SCSI_MULTI_LUN=y |
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y | |||
62 | CONFIG_AT91SAM9X_WATCHDOG=y | 59 | CONFIG_AT91SAM9X_WATCHDOG=y |
63 | CONFIG_FB=y | 60 | CONFIG_FB=y |
64 | CONFIG_FB_ATMEL=y | 61 | CONFIG_FB_ATMEL=y |
65 | # CONFIG_VGA_CONSOLE is not set | ||
66 | CONFIG_MMC=y | 62 | CONFIG_MMC=y |
67 | CONFIG_MMC_AT91=m | 63 | CONFIG_MMC_AT91=m |
68 | CONFIG_RTC_CLASS=y | 64 | CONFIG_RTC_CLASS=y |
69 | CONFIG_RTC_DRV_AT91SAM9=y | 65 | CONFIG_RTC_DRV_AT91SAM9=y |
70 | CONFIG_EXT2_FS=y | 66 | CONFIG_EXT2_FS=y |
71 | CONFIG_INOTIFY=y | ||
72 | CONFIG_MSDOS_FS=y | 67 | CONFIG_MSDOS_FS=y |
73 | CONFIG_VFAT_FS=y | 68 | CONFIG_VFAT_FS=y |
74 | CONFIG_TMPFS=y | 69 | CONFIG_TMPFS=y |
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index 227a477346ed..d95763d5f0d8 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig | |||
@@ -287,7 +287,7 @@ CONFIG_USB=y | |||
287 | # CONFIG_USB_DEVICE_CLASS is not set | 287 | # CONFIG_USB_DEVICE_CLASS is not set |
288 | CONFIG_USB_OHCI_HCD=y | 288 | CONFIG_USB_OHCI_HCD=y |
289 | CONFIG_USB_GADGET=y | 289 | CONFIG_USB_GADGET=y |
290 | CONFIG_USB_GADGET_PXA27X=y | 290 | CONFIG_USB_PXA27X=y |
291 | CONFIG_USB_ETH=m | 291 | CONFIG_USB_ETH=m |
292 | # CONFIG_USB_ETH_RNDIS is not set | 292 | # CONFIG_USB_ETH_RNDIS is not set |
293 | CONFIG_MMC=y | 293 | CONFIG_MMC=y |
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig index 176ec22af034..fd996bb13022 100644 --- a/arch/arm/configs/imote2_defconfig +++ b/arch/arm/configs/imote2_defconfig | |||
@@ -263,7 +263,7 @@ CONFIG_USB=y | |||
263 | # CONFIG_USB_DEVICE_CLASS is not set | 263 | # CONFIG_USB_DEVICE_CLASS is not set |
264 | CONFIG_USB_OHCI_HCD=y | 264 | CONFIG_USB_OHCI_HCD=y |
265 | CONFIG_USB_GADGET=y | 265 | CONFIG_USB_GADGET=y |
266 | CONFIG_USB_GADGET_PXA27X=y | 266 | CONFIG_USB_PXA27X=y |
267 | CONFIG_USB_ETH=m | 267 | CONFIG_USB_ETH=m |
268 | # CONFIG_USB_ETH_RNDIS is not set | 268 | # CONFIG_USB_ETH_RNDIS is not set |
269 | CONFIG_MMC=y | 269 | CONFIG_MMC=y |
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig index a88e64d4e9a5..443675d317e6 100644 --- a/arch/arm/configs/magician_defconfig +++ b/arch/arm/configs/magician_defconfig | |||
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m | |||
132 | CONFIG_USB_OHCI_HCD=y | 132 | CONFIG_USB_OHCI_HCD=y |
133 | CONFIG_USB_GADGET=y | 133 | CONFIG_USB_GADGET=y |
134 | CONFIG_USB_GADGET_VBUS_DRAW=500 | 134 | CONFIG_USB_GADGET_VBUS_DRAW=500 |
135 | CONFIG_USB_GADGET_PXA27X=y | 135 | CONFIG_USB_PXA27X=y |
136 | CONFIG_USB_ETH=m | 136 | CONFIG_USB_ETH=m |
137 | # CONFIG_USB_ETH_RNDIS is not set | 137 | # CONFIG_USB_ETH_RNDIS is not set |
138 | CONFIG_USB_GADGETFS=m | 138 | CONFIG_USB_GADGETFS=m |
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index 7b63462b349d..a7e777581378 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_MACH_SX1=y | |||
48 | CONFIG_MACH_NOKIA770=y | 48 | CONFIG_MACH_NOKIA770=y |
49 | CONFIG_MACH_AMS_DELTA=y | 49 | CONFIG_MACH_AMS_DELTA=y |
50 | CONFIG_MACH_OMAP_GENERIC=y | 50 | CONFIG_MACH_OMAP_GENERIC=y |
51 | CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y | ||
52 | CONFIG_OMAP_ARM_216MHZ=y | 51 | CONFIG_OMAP_ARM_216MHZ=y |
53 | CONFIG_OMAP_ARM_195MHZ=y | 52 | CONFIG_OMAP_ARM_195MHZ=y |
54 | CONFIG_OMAP_ARM_192MHZ=y | 53 | CONFIG_OMAP_ARM_192MHZ=y |
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig index 4a5a12681be2..374000ec4e4e 100644 --- a/arch/arm/configs/u300_defconfig +++ b/arch/arm/configs/u300_defconfig | |||
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y | |||
14 | CONFIG_ARCH_U300=y | 14 | CONFIG_ARCH_U300=y |
15 | CONFIG_MACH_U300=y | 15 | CONFIG_MACH_U300=y |
16 | CONFIG_MACH_U300_BS335=y | 16 | CONFIG_MACH_U300_BS335=y |
17 | CONFIG_MACH_U300_DUAL_RAM=y | ||
18 | CONFIG_U300_DEBUG=y | ||
19 | CONFIG_MACH_U300_SPIDUMMY=y | 17 | CONFIG_MACH_U300_SPIDUMMY=y |
20 | CONFIG_NO_HZ=y | 18 | CONFIG_NO_HZ=y |
21 | CONFIG_HIGH_RES_TIMERS=y | 19 | CONFIG_HIGH_RES_TIMERS=y |
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0 | |||
26 | CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072" | 24 | CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072" |
27 | CONFIG_CPU_IDLE=y | 25 | CONFIG_CPU_IDLE=y |
28 | CONFIG_FPE_NWFPE=y | 26 | CONFIG_FPE_NWFPE=y |
29 | CONFIG_PM=y | ||
30 | # CONFIG_SUSPEND is not set | 27 | # CONFIG_SUSPEND is not set |
31 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 28 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
32 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 29 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
33 | # CONFIG_MISC_DEVICES is not set | 30 | CONFIG_MTD=y |
31 | CONFIG_MTD_CMDLINE_PARTS=y | ||
32 | CONFIG_MTD_NAND=y | ||
33 | CONFIG_MTD_NAND_FSMC=y | ||
34 | # CONFIG_INPUT_MOUSEDEV is not set | 34 | # CONFIG_INPUT_MOUSEDEV is not set |
35 | CONFIG_INPUT_EVDEV=y | 35 | CONFIG_INPUT_EVDEV=y |
36 | # CONFIG_KEYBOARD_ATKBD is not set | 36 | # CONFIG_KEYBOARD_ATKBD is not set |
37 | # CONFIG_INPUT_MOUSE is not set | 37 | # CONFIG_INPUT_MOUSE is not set |
38 | # CONFIG_SERIO is not set | 38 | # CONFIG_SERIO is not set |
39 | CONFIG_LEGACY_PTY_COUNT=16 | ||
39 | CONFIG_SERIAL_AMBA_PL011=y | 40 | CONFIG_SERIAL_AMBA_PL011=y |
40 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y | 41 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y |
41 | CONFIG_LEGACY_PTY_COUNT=16 | ||
42 | # CONFIG_HW_RANDOM is not set | 42 | # CONFIG_HW_RANDOM is not set |
43 | CONFIG_I2C=y | 43 | CONFIG_I2C=y |
44 | # CONFIG_HWMON is not set | 44 | # CONFIG_HWMON is not set |
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y | |||
51 | # CONFIG_HID_SUPPORT is not set | 51 | # CONFIG_HID_SUPPORT is not set |
52 | # CONFIG_USB_SUPPORT is not set | 52 | # CONFIG_USB_SUPPORT is not set |
53 | CONFIG_MMC=y | 53 | CONFIG_MMC=y |
54 | CONFIG_MMC_CLKGATE=y | ||
54 | CONFIG_MMC_ARMMMCI=y | 55 | CONFIG_MMC_ARMMMCI=y |
55 | CONFIG_RTC_CLASS=y | 56 | CONFIG_RTC_CLASS=y |
56 | # CONFIG_RTC_HCTOSYS is not set | 57 | # CONFIG_RTC_HCTOSYS is not set |
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y | |||
65 | CONFIG_NLS_ISO8859_1=y | 66 | CONFIG_NLS_ISO8859_1=y |
66 | CONFIG_PRINTK_TIME=y | 67 | CONFIG_PRINTK_TIME=y |
67 | CONFIG_DEBUG_FS=y | 68 | CONFIG_DEBUG_FS=y |
68 | CONFIG_DEBUG_KERNEL=y | ||
69 | # CONFIG_SCHED_DEBUG is not set | 69 | # CONFIG_SCHED_DEBUG is not set |
70 | CONFIG_TIMER_STATS=y | 70 | CONFIG_TIMER_STATS=y |
71 | # CONFIG_DEBUG_PREEMPT is not set | 71 | # CONFIG_DEBUG_PREEMPT is not set |
72 | CONFIG_DEBUG_INFO=y | 72 | CONFIG_DEBUG_INFO=y |
73 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
74 | # CONFIG_CRC32 is not set | 73 | # CONFIG_CRC32 is not set |
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index 97d31a4663da..2d7b6e7b7271 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig | |||
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y | |||
10 | CONFIG_ARCH_U8500=y | 10 | CONFIG_ARCH_U8500=y |
11 | CONFIG_UX500_SOC_DB5500=y | 11 | CONFIG_UX500_SOC_DB5500=y |
12 | CONFIG_UX500_SOC_DB8500=y | 12 | CONFIG_UX500_SOC_DB8500=y |
13 | CONFIG_MACH_U8500=y | 13 | CONFIG_MACH_HREFV60=y |
14 | CONFIG_MACH_SNOWBALL=y | 14 | CONFIG_MACH_SNOWBALL=y |
15 | CONFIG_MACH_U5500=y | 15 | CONFIG_MACH_U5500=y |
16 | CONFIG_NO_HZ=y | 16 | CONFIG_NO_HZ=y |
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y | |||
24 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 24 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
25 | CONFIG_VFP=y | 25 | CONFIG_VFP=y |
26 | CONFIG_NEON=y | 26 | CONFIG_NEON=y |
27 | CONFIG_PM_RUNTIME=y | ||
27 | CONFIG_NET=y | 28 | CONFIG_NET=y |
28 | CONFIG_PACKET=y | 29 | CONFIG_PACKET=y |
29 | CONFIG_UNIX=y | 30 | CONFIG_UNIX=y |
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y | |||
41 | CONFIG_AB8500_PWM=y | 42 | CONFIG_AB8500_PWM=y |
42 | CONFIG_SENSORS_BH1780=y | 43 | CONFIG_SENSORS_BH1780=y |
43 | CONFIG_NETDEVICES=y | 44 | CONFIG_NETDEVICES=y |
44 | CONFIG_SMSC_PHY=y | ||
45 | CONFIG_NET_ETHERNET=y | ||
46 | CONFIG_SMSC911X=y | 45 | CONFIG_SMSC911X=y |
47 | # CONFIG_NETDEV_1000 is not set | 46 | CONFIG_SMSC_PHY=y |
48 | # CONFIG_NETDEV_10000 is not set | ||
49 | # CONFIG_WLAN is not set | 47 | # CONFIG_WLAN is not set |
50 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 48 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
51 | CONFIG_INPUT_EVDEV=y | 49 | CONFIG_INPUT_EVDEV=y |
@@ -72,15 +70,12 @@ CONFIG_SPI=y | |||
72 | CONFIG_SPI_PL022=y | 70 | CONFIG_SPI_PL022=y |
73 | CONFIG_GPIO_STMPE=y | 71 | CONFIG_GPIO_STMPE=y |
74 | CONFIG_GPIO_TC3589X=y | 72 | CONFIG_GPIO_TC3589X=y |
75 | # CONFIG_HWMON is not set | ||
76 | CONFIG_MFD_STMPE=y | 73 | CONFIG_MFD_STMPE=y |
77 | CONFIG_MFD_TC3589X=y | 74 | CONFIG_MFD_TC3589X=y |
75 | CONFIG_AB5500_CORE=y | ||
78 | CONFIG_AB8500_CORE=y | 76 | CONFIG_AB8500_CORE=y |
79 | CONFIG_REGULATOR_AB8500=y | 77 | CONFIG_REGULATOR_AB8500=y |
80 | # CONFIG_HID_SUPPORT is not set | 78 | # CONFIG_HID_SUPPORT is not set |
81 | CONFIG_USB_MUSB_HDRC=y | ||
82 | CONFIG_USB_GADGET_MUSB_HDRC=y | ||
83 | CONFIG_MUSB_PIO_ONLY=y | ||
84 | CONFIG_USB_GADGET=y | 79 | CONFIG_USB_GADGET=y |
85 | CONFIG_AB8500_USB=y | 80 | CONFIG_AB8500_USB=y |
86 | CONFIG_MMC=y | 81 | CONFIG_MMC=y |
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y | |||
97 | CONFIG_STE_DMA40=y | 92 | CONFIG_STE_DMA40=y |
98 | CONFIG_STAGING=y | 93 | CONFIG_STAGING=y |
99 | CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y | 94 | CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y |
95 | CONFIG_HSEM_U8500=y | ||
100 | CONFIG_EXT2_FS=y | 96 | CONFIG_EXT2_FS=y |
101 | CONFIG_EXT2_FS_XATTR=y | 97 | CONFIG_EXT2_FS_XATTR=y |
102 | CONFIG_EXT2_FS_POSIX_ACL=y | 98 | CONFIG_EXT2_FS_POSIX_ACL=y |
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig index 59577ad3f4ef..547a3c1e59db 100644 --- a/arch/arm/configs/zeus_defconfig +++ b/arch/arm/configs/zeus_defconfig | |||
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m | |||
140 | CONFIG_USB_SERIAL_GENERIC=y | 140 | CONFIG_USB_SERIAL_GENERIC=y |
141 | CONFIG_USB_SERIAL_MCT_U232=m | 141 | CONFIG_USB_SERIAL_MCT_U232=m |
142 | CONFIG_USB_GADGET=m | 142 | CONFIG_USB_GADGET=m |
143 | CONFIG_USB_GADGET_PXA27X=y | 143 | CONFIG_USB_PXA27X=y |
144 | CONFIG_USB_ETH=m | 144 | CONFIG_USB_ETH=m |
145 | CONFIG_USB_GADGETFS=m | 145 | CONFIG_USB_GADGETFS=m |
146 | CONFIG_USB_FILE_STORAGE=m | 146 | CONFIG_USB_FILE_STORAGE=m |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 71d99b83cdb9..0bda22c094a6 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type); | |||
55 | extern void | 55 | extern void |
56 | release_pmu(enum arm_pmu_type type); | 56 | release_pmu(enum arm_pmu_type type); |
57 | 57 | ||
58 | /** | ||
59 | * init_pmu() - Initialise the PMU. | ||
60 | * | ||
61 | * Initialise the system ready for PMU enabling. This should typically set the | ||
62 | * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do | ||
63 | * the actual hardware initialisation. | ||
64 | */ | ||
65 | extern int | ||
66 | init_pmu(enum arm_pmu_type type); | ||
67 | |||
68 | #else /* CONFIG_CPU_HAS_PMU */ | 58 | #else /* CONFIG_CPU_HAS_PMU */ |
69 | 59 | ||
70 | #include <linux/err.h> | 60 | #include <linux/err.h> |
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index a7e457ed27c3..58b8b84adcd2 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h | |||
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS]; | |||
25 | 25 | ||
26 | void init_cpu_topology(void); | 26 | void init_cpu_topology(void); |
27 | void store_cpu_topology(unsigned int cpuid); | 27 | void store_cpu_topology(unsigned int cpuid); |
28 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | 28 | const struct cpumask *cpu_coregroup_mask(int cpu); |
29 | 29 | ||
30 | #else | 30 | #else |
31 | 31 | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9ad50c4208ae..b145f16c91bc 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -497,7 +497,7 @@ ENDPROC(__und_usr) | |||
497 | .popsection | 497 | .popsection |
498 | .pushsection __ex_table,"a" | 498 | .pushsection __ex_table,"a" |
499 | .long 1b, 4b | 499 | .long 1b, 4b |
500 | #if __LINUX_ARM_ARCH__ >= 7 | 500 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
501 | .long 2b, 4b | 501 | .long 2b, 4b |
502 | .long 3b, 4b | 502 | .long 3b, 4b |
503 | #endif | 503 | #endif |
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c index 9fe8910308af..8a30c89da70e 100644 --- a/arch/arm/kernel/kprobes-arm.c +++ b/arch/arm/kernel/kprobes-arm.c | |||
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = { | |||
519 | static const union decode_item arm_cccc_0001_____1001_table[] = { | 519 | static const union decode_item arm_cccc_0001_____1001_table[] = { |
520 | /* Synchronization primitives */ | 520 | /* Synchronization primitives */ |
521 | 521 | ||
522 | #if __LINUX_ARM_ARCH__ < 6 | ||
523 | /* Deprecated on ARMv6 and may be UNDEFINED on v7 */ | ||
522 | /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ | 524 | /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ |
523 | DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, | 525 | DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, |
524 | REGS(NOPC, NOPC, 0, 0, NOPC)), | 526 | REGS(NOPC, NOPC, 0, 0, NOPC)), |
525 | 527 | #endif | |
526 | /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ | 528 | /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ |
527 | /* And unallocated instructions... */ | 529 | /* And unallocated instructions... */ |
528 | DECODE_END | 530 | DECODE_END |
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index fc82de8bdcce..ba32b393b3f0 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c | |||
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void) | |||
427 | 427 | ||
428 | TEST_GROUP("Synchronization primitives") | 428 | TEST_GROUP("Synchronization primitives") |
429 | 429 | ||
430 | /* | 430 | #if __LINUX_ARM_ARCH__ < 6 |
431 | * Use hard coded constants for SWP instructions to avoid warnings | 431 | TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]") |
432 | * about deprecated instructions. | 432 | TEST_R( "swpvs r0, r",1,VAL1,", [sp]") |
433 | */ | 433 | TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]") |
434 | TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]") | 434 | #else |
435 | TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]") | 435 | TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]") |
436 | TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]") | 436 | TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]") |
437 | TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]") | ||
438 | #endif | ||
437 | TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") | 439 | TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") |
438 | TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") | 440 | TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") |
439 | TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") | 441 | TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") |
440 | TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]") | 442 | #if __LINUX_ARM_ARCH__ < 6 |
441 | TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]") | 443 | TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") |
444 | TEST_R( "swpvsb r0, r",1,VAL1,", [sp]") | ||
445 | #else | ||
446 | TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]") | ||
447 | TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]") | ||
448 | #endif | ||
442 | TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") | 449 | TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") |
443 | 450 | ||
444 | TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ | 451 | TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ |
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void) | |||
550 | TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") | 557 | TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") |
551 | TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") | 558 | TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") |
552 | TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") | 559 | TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") |
553 | TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"") | 560 | TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"") |
554 | TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") | 561 | TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") |
555 | TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") | 562 | TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") |
556 | 563 | ||
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c index 5e726c31c45a..5d8b85792222 100644 --- a/arch/arm/kernel/kprobes-test-thumb.c +++ b/arch/arm/kernel/kprobes-test-thumb.c | |||
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void) | |||
222 | DONT_TEST_IN_ITBLOCK( | 222 | DONT_TEST_IN_ITBLOCK( |
223 | TEST_BF_R( "cbnz r",0,0, ", 2f") | 223 | TEST_BF_R( "cbnz r",0,0, ", 2f") |
224 | TEST_BF_R( "cbz r",2,-1,", 2f") | 224 | TEST_BF_R( "cbz r",2,-1,", 2f") |
225 | TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20) | 225 | TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20) |
226 | TEST_BF_RX( "cbz r",7,0, ", 2f",0x40) | 226 | TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40) |
227 | ) | 227 | ) |
228 | TEST_R("sxth r0, r",7, HH1,"") | 228 | TEST_R("sxth r0, r",7, HH1,"") |
229 | TEST_R("sxth r7, r",0, HH2,"") | 229 | TEST_R("sxth r7, r",0, HH2,"") |
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK( | |||
246 | TESTCASE_START(code) \ | 246 | TESTCASE_START(code) \ |
247 | TEST_ARG_PTR(13, offset) \ | 247 | TEST_ARG_PTR(13, offset) \ |
248 | TEST_ARG_END("") \ | 248 | TEST_ARG_END("") \ |
249 | TEST_BRANCH_F(code,0) \ | 249 | TEST_BRANCH_F(code) \ |
250 | TESTCASE_END | 250 | TESTCASE_END |
251 | 251 | ||
252 | TEST("push {r0}") | 252 | TEST("push {r0}") |
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8, | |||
319 | 319 | ||
320 | TEST_BF( "b 2f") | 320 | TEST_BF( "b 2f") |
321 | TEST_BB( "b 2b") | 321 | TEST_BB( "b 2b") |
322 | TEST_BF_X("b 2f", 0x400) | 322 | TEST_BF_X("b 2f", SPACE_0x400) |
323 | TEST_BB_X("b 2b", 0x400) | 323 | TEST_BB_X("b 2b", SPACE_0x400) |
324 | 324 | ||
325 | TEST_GROUP("Testing instructions in IT blocks") | 325 | TEST_GROUP("Testing instructions in IT blocks") |
326 | 326 | ||
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22, | |||
746 | TEST_BB("bne.w 2b") | 746 | TEST_BB("bne.w 2b") |
747 | TEST_BF("bgt.w 2f") | 747 | TEST_BF("bgt.w 2f") |
748 | TEST_BB("blt.w 2b") | 748 | TEST_BB("blt.w 2b") |
749 | TEST_BF_X("bpl.w 2f",0x1000) | 749 | TEST_BF_X("bpl.w 2f", SPACE_0x1000) |
750 | ) | 750 | ) |
751 | 751 | ||
752 | TEST_UNSUPPORTED("msr cpsr, r0") | 752 | TEST_UNSUPPORTED("msr cpsr, r0") |
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22, | |||
786 | 786 | ||
787 | TEST_BF( "b.w 2f") | 787 | TEST_BF( "b.w 2f") |
788 | TEST_BB( "b.w 2b") | 788 | TEST_BB( "b.w 2b") |
789 | TEST_BF_X("b.w 2f", 0x1000) | 789 | TEST_BF_X("b.w 2f", SPACE_0x1000) |
790 | 790 | ||
791 | TEST_BF( "bl.w 2f") | 791 | TEST_BF( "bl.w 2f") |
792 | TEST_BB( "bl.w 2b") | 792 | TEST_BB( "bl.w 2b") |
793 | TEST_BB_X("bl.w 2b", 0x1000) | 793 | TEST_BB_X("bl.w 2b", SPACE_0x1000) |
794 | 794 | ||
795 | TEST_X( "blx __dummy_arm_subroutine", | 795 | TEST_X( "blx __dummy_arm_subroutine", |
796 | ".arm \n\t" | 796 | ".arm \n\t" |
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index 0dc5d77b9356..e28a869b1ae4 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h | |||
@@ -149,23 +149,31 @@ struct test_arg_end { | |||
149 | "1: "instruction" \n\t" \ | 149 | "1: "instruction" \n\t" \ |
150 | " nop \n\t" | 150 | " nop \n\t" |
151 | 151 | ||
152 | #define TEST_BRANCH_F(instruction, xtra_dist) \ | 152 | #define TEST_BRANCH_F(instruction) \ |
153 | TEST_INSTRUCTION(instruction) \ | 153 | TEST_INSTRUCTION(instruction) \ |
154 | ".if "#xtra_dist" \n\t" \ | ||
155 | " b 99f \n\t" \ | 154 | " b 99f \n\t" \ |
156 | ".space "#xtra_dist" \n\t" \ | 155 | "2: nop \n\t" |
157 | ".endif \n\t" \ | 156 | |
157 | #define TEST_BRANCH_B(instruction) \ | ||
158 | " b 50f \n\t" \ | ||
159 | " b 99f \n\t" \ | ||
160 | "2: nop \n\t" \ | ||
161 | " b 99f \n\t" \ | ||
162 | TEST_INSTRUCTION(instruction) | ||
163 | |||
164 | #define TEST_BRANCH_FX(instruction, codex) \ | ||
165 | TEST_INSTRUCTION(instruction) \ | ||
166 | " b 99f \n\t" \ | ||
167 | codex" \n\t" \ | ||
158 | " b 99f \n\t" \ | 168 | " b 99f \n\t" \ |
159 | "2: nop \n\t" | 169 | "2: nop \n\t" |
160 | 170 | ||
161 | #define TEST_BRANCH_B(instruction, xtra_dist) \ | 171 | #define TEST_BRANCH_BX(instruction, codex) \ |
162 | " b 50f \n\t" \ | 172 | " b 50f \n\t" \ |
163 | " b 99f \n\t" \ | 173 | " b 99f \n\t" \ |
164 | "2: nop \n\t" \ | 174 | "2: nop \n\t" \ |
165 | " b 99f \n\t" \ | 175 | " b 99f \n\t" \ |
166 | ".if "#xtra_dist" \n\t" \ | 176 | codex" \n\t" \ |
167 | ".space "#xtra_dist" \n\t" \ | ||
168 | ".endif \n\t" \ | ||
169 | TEST_INSTRUCTION(instruction) | 177 | TEST_INSTRUCTION(instruction) |
170 | 178 | ||
171 | #define TESTCASE_END \ | 179 | #define TESTCASE_END \ |
@@ -301,47 +309,60 @@ struct test_arg_end { | |||
301 | TESTCASE_START(code1 #reg1 code2) \ | 309 | TESTCASE_START(code1 #reg1 code2) \ |
302 | TEST_ARG_PTR(reg1, val1) \ | 310 | TEST_ARG_PTR(reg1, val1) \ |
303 | TEST_ARG_END("") \ | 311 | TEST_ARG_END("") \ |
304 | TEST_BRANCH_F(code1 #reg1 code2, 0) \ | 312 | TEST_BRANCH_F(code1 #reg1 code2) \ |
305 | TESTCASE_END | 313 | TESTCASE_END |
306 | 314 | ||
307 | #define TEST_BF_X(code, xtra_dist) \ | 315 | #define TEST_BF(code) \ |
308 | TESTCASE_START(code) \ | 316 | TESTCASE_START(code) \ |
309 | TEST_ARG_END("") \ | 317 | TEST_ARG_END("") \ |
310 | TEST_BRANCH_F(code, xtra_dist) \ | 318 | TEST_BRANCH_F(code) \ |
311 | TESTCASE_END | 319 | TESTCASE_END |
312 | 320 | ||
313 | #define TEST_BB_X(code, xtra_dist) \ | 321 | #define TEST_BB(code) \ |
314 | TESTCASE_START(code) \ | 322 | TESTCASE_START(code) \ |
315 | TEST_ARG_END("") \ | 323 | TEST_ARG_END("") \ |
316 | TEST_BRANCH_B(code, xtra_dist) \ | 324 | TEST_BRANCH_B(code) \ |
317 | TESTCASE_END | 325 | TESTCASE_END |
318 | 326 | ||
319 | #define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \ | 327 | #define TEST_BF_R(code1, reg, val, code2) \ |
320 | TESTCASE_START(code1 #reg code2) \ | 328 | TESTCASE_START(code1 #reg code2) \ |
321 | TEST_ARG_REG(reg, val) \ | 329 | TEST_ARG_REG(reg, val) \ |
322 | TEST_ARG_END("") \ | 330 | TEST_ARG_END("") \ |
323 | TEST_BRANCH_F(code1 #reg code2, xtra_dist) \ | 331 | TEST_BRANCH_F(code1 #reg code2) \ |
324 | TESTCASE_END | 332 | TESTCASE_END |
325 | 333 | ||
326 | #define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \ | 334 | #define TEST_BB_R(code1, reg, val, code2) \ |
327 | TESTCASE_START(code1 #reg code2) \ | 335 | TESTCASE_START(code1 #reg code2) \ |
328 | TEST_ARG_REG(reg, val) \ | 336 | TEST_ARG_REG(reg, val) \ |
329 | TEST_ARG_END("") \ | 337 | TEST_ARG_END("") \ |
330 | TEST_BRANCH_B(code1 #reg code2, xtra_dist) \ | 338 | TEST_BRANCH_B(code1 #reg code2) \ |
331 | TESTCASE_END | 339 | TESTCASE_END |
332 | 340 | ||
333 | #define TEST_BF(code) TEST_BF_X(code, 0) | ||
334 | #define TEST_BB(code) TEST_BB_X(code, 0) | ||
335 | |||
336 | #define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0) | ||
337 | #define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0) | ||
338 | |||
339 | #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ | 341 | #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ |
340 | TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ | 342 | TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ |
341 | TEST_ARG_REG(reg1, val1) \ | 343 | TEST_ARG_REG(reg1, val1) \ |
342 | TEST_ARG_REG(reg2, val2) \ | 344 | TEST_ARG_REG(reg2, val2) \ |
343 | TEST_ARG_END("") \ | 345 | TEST_ARG_END("") \ |
344 | TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \ | 346 | TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \ |
347 | TESTCASE_END | ||
348 | |||
349 | #define TEST_BF_X(code, codex) \ | ||
350 | TESTCASE_START(code) \ | ||
351 | TEST_ARG_END("") \ | ||
352 | TEST_BRANCH_FX(code, codex) \ | ||
353 | TESTCASE_END | ||
354 | |||
355 | #define TEST_BB_X(code, codex) \ | ||
356 | TESTCASE_START(code) \ | ||
357 | TEST_ARG_END("") \ | ||
358 | TEST_BRANCH_BX(code, codex) \ | ||
359 | TESTCASE_END | ||
360 | |||
361 | #define TEST_BF_RX(code1, reg, val, code2, codex) \ | ||
362 | TESTCASE_START(code1 #reg code2) \ | ||
363 | TEST_ARG_REG(reg, val) \ | ||
364 | TEST_ARG_END("") \ | ||
365 | TEST_BRANCH_FX(code1 #reg code2, codex) \ | ||
345 | TESTCASE_END | 366 | TESTCASE_END |
346 | 367 | ||
347 | #define TEST_X(code, codex) \ | 368 | #define TEST_X(code, codex) \ |
@@ -372,6 +393,25 @@ struct test_arg_end { | |||
372 | TESTCASE_END | 393 | TESTCASE_END |
373 | 394 | ||
374 | 395 | ||
396 | /* | ||
397 | * Macros for defining space directives spread over multiple lines. | ||
398 | * These are required so the compiler guesses better the length of inline asm | ||
399 | * code and will spill the literal pool early enough to avoid generating PC | ||
400 | * relative loads with out of range offsets. | ||
401 | */ | ||
402 | #define TWICE(x) x x | ||
403 | #define SPACE_0x8 TWICE(".space 4\n\t") | ||
404 | #define SPACE_0x10 TWICE(SPACE_0x8) | ||
405 | #define SPACE_0x20 TWICE(SPACE_0x10) | ||
406 | #define SPACE_0x40 TWICE(SPACE_0x20) | ||
407 | #define SPACE_0x80 TWICE(SPACE_0x40) | ||
408 | #define SPACE_0x100 TWICE(SPACE_0x80) | ||
409 | #define SPACE_0x200 TWICE(SPACE_0x100) | ||
410 | #define SPACE_0x400 TWICE(SPACE_0x200) | ||
411 | #define SPACE_0x800 TWICE(SPACE_0x400) | ||
412 | #define SPACE_0x1000 TWICE(SPACE_0x800) | ||
413 | |||
414 | |||
375 | /* Various values used in test cases... */ | 415 | /* Various values used in test cases... */ |
376 | #define N(val) (val ^ 0xffffffff) | 416 | #define N(val) (val ^ 0xffffffff) |
377 | #define VAL1 0x12345678 | 417 | #define VAL1 0x12345678 |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 24e2347be6b1..c475379199b1 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -343,8 +343,14 @@ validate_group(struct perf_event *event) | |||
343 | { | 343 | { |
344 | struct perf_event *sibling, *leader = event->group_leader; | 344 | struct perf_event *sibling, *leader = event->group_leader; |
345 | struct pmu_hw_events fake_pmu; | 345 | struct pmu_hw_events fake_pmu; |
346 | DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); | ||
346 | 347 | ||
347 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | 348 | /* |
349 | * Initialise the fake PMU. We only need to populate the | ||
350 | * used_mask for the purposes of validation. | ||
351 | */ | ||
352 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); | ||
353 | fake_pmu.used_mask = fake_used_mask; | ||
348 | 354 | ||
349 | if (!validate_event(&fake_pmu, leader)) | 355 | if (!validate_event(&fake_pmu, leader)) |
350 | return -ENOSPC; | 356 | return -ENOSPC; |
@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) | |||
396 | int i, err, irq, irqs; | 402 | int i, err, irq, irqs; |
397 | struct platform_device *pmu_device = armpmu->plat_device; | 403 | struct platform_device *pmu_device = armpmu->plat_device; |
398 | 404 | ||
405 | if (!pmu_device) | ||
406 | return -ENODEV; | ||
407 | |||
399 | err = reserve_pmu(armpmu->type); | 408 | err = reserve_pmu(armpmu->type); |
400 | if (err) { | 409 | if (err) { |
401 | pr_warning("unable to reserve pmu\n"); | 410 | pr_warning("unable to reserve pmu\n"); |
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c3407ee8576..2334bf8a650a 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type) | |||
33 | { | 33 | { |
34 | clear_bit_unlock(type, pmu_lock); | 34 | clear_bit_unlock(type, pmu_lock); |
35 | } | 35 | } |
36 | EXPORT_SYMBOL_GPL(release_pmu); | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 75316f0dd02a..3d0c6fb74ae4 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -192,6 +192,9 @@ void cpu_idle(void) | |||
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | local_irq_disable(); | 194 | local_irq_disable(); |
195 | #ifdef CONFIG_PL310_ERRATA_769419 | ||
196 | wmb(); | ||
197 | #endif | ||
195 | if (hlt_counter) { | 198 | if (hlt_counter) { |
196 | local_irq_enable(); | 199 | local_irq_enable(); |
197 | cpu_relax(); | 200 | cpu_relax(); |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 1040c00405d0..8200deaa14f6 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -43,7 +43,7 @@ | |||
43 | 43 | ||
44 | struct cputopo_arm cpu_topology[NR_CPUS]; | 44 | struct cputopo_arm cpu_topology[NR_CPUS]; |
45 | 45 | ||
46 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 46 | const struct cpumask *cpu_coregroup_mask(int cpu) |
47 | { | 47 | { |
48 | return &cpu_topology[cpu].core_sibling; | 48 | return &cpu_topology[cpu].core_sibling; |
49 | } | 49 | } |
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 10d868a5a481..d6408d1ee543 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h | |||
@@ -1,5 +1,9 @@ | |||
1 | #include <asm/unwind.h> | ||
2 | |||
1 | #if __LINUX_ARM_ARCH__ >= 6 | 3 | #if __LINUX_ARM_ARCH__ >= 6 |
2 | .macro bitop, instr | 4 | .macro bitop, name, instr |
5 | ENTRY( \name ) | ||
6 | UNWIND( .fnstart ) | ||
3 | ands ip, r1, #3 | 7 | ands ip, r1, #3 |
4 | strneb r1, [ip] @ assert word-aligned | 8 | strneb r1, [ip] @ assert word-aligned |
5 | mov r2, #1 | 9 | mov r2, #1 |
@@ -13,9 +17,13 @@ | |||
13 | cmp r0, #0 | 17 | cmp r0, #0 |
14 | bne 1b | 18 | bne 1b |
15 | bx lr | 19 | bx lr |
20 | UNWIND( .fnend ) | ||
21 | ENDPROC(\name ) | ||
16 | .endm | 22 | .endm |
17 | 23 | ||
18 | .macro testop, instr, store | 24 | .macro testop, name, instr, store |
25 | ENTRY( \name ) | ||
26 | UNWIND( .fnstart ) | ||
19 | ands ip, r1, #3 | 27 | ands ip, r1, #3 |
20 | strneb r1, [ip] @ assert word-aligned | 28 | strneb r1, [ip] @ assert word-aligned |
21 | mov r2, #1 | 29 | mov r2, #1 |
@@ -34,9 +42,13 @@ | |||
34 | cmp r0, #0 | 42 | cmp r0, #0 |
35 | movne r0, #1 | 43 | movne r0, #1 |
36 | 2: bx lr | 44 | 2: bx lr |
45 | UNWIND( .fnend ) | ||
46 | ENDPROC(\name ) | ||
37 | .endm | 47 | .endm |
38 | #else | 48 | #else |
39 | .macro bitop, instr | 49 | .macro bitop, name, instr |
50 | ENTRY( \name ) | ||
51 | UNWIND( .fnstart ) | ||
40 | ands ip, r1, #3 | 52 | ands ip, r1, #3 |
41 | strneb r1, [ip] @ assert word-aligned | 53 | strneb r1, [ip] @ assert word-aligned |
42 | and r2, r0, #31 | 54 | and r2, r0, #31 |
@@ -49,6 +61,8 @@ | |||
49 | str r2, [r1, r0, lsl #2] | 61 | str r2, [r1, r0, lsl #2] |
50 | restore_irqs ip | 62 | restore_irqs ip |
51 | mov pc, lr | 63 | mov pc, lr |
64 | UNWIND( .fnend ) | ||
65 | ENDPROC(\name ) | ||
52 | .endm | 66 | .endm |
53 | 67 | ||
54 | /** | 68 | /** |
@@ -59,7 +73,9 @@ | |||
59 | * Note: we can trivially conditionalise the store instruction | 73 | * Note: we can trivially conditionalise the store instruction |
60 | * to avoid dirtying the data cache. | 74 | * to avoid dirtying the data cache. |
61 | */ | 75 | */ |
62 | .macro testop, instr, store | 76 | .macro testop, name, instr, store |
77 | ENTRY( \name ) | ||
78 | UNWIND( .fnstart ) | ||
63 | ands ip, r1, #3 | 79 | ands ip, r1, #3 |
64 | strneb r1, [ip] @ assert word-aligned | 80 | strneb r1, [ip] @ assert word-aligned |
65 | and r3, r0, #31 | 81 | and r3, r0, #31 |
@@ -73,5 +89,7 @@ | |||
73 | moveq r0, #0 | 89 | moveq r0, #0 |
74 | restore_irqs ip | 90 | restore_irqs ip |
75 | mov pc, lr | 91 | mov pc, lr |
92 | UNWIND( .fnend ) | ||
93 | ENDPROC(\name ) | ||
76 | .endm | 94 | .endm |
77 | #endif | 95 | #endif |
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S index 68ed5b62e839..f4027862172f 100644 --- a/arch/arm/lib/changebit.S +++ b/arch/arm/lib/changebit.S | |||
@@ -12,6 +12,4 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_change_bit) | 15 | bitop _change_bit, eor |
16 | bitop eor | ||
17 | ENDPROC(_change_bit) | ||
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S index 4c04c3b51eeb..f6b75fb64d30 100644 --- a/arch/arm/lib/clearbit.S +++ b/arch/arm/lib/clearbit.S | |||
@@ -12,6 +12,4 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_clear_bit) | 15 | bitop _clear_bit, bic |
16 | bitop bic | ||
17 | ENDPROC(_clear_bit) | ||
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S index bbee5c66a23e..618fedae4b37 100644 --- a/arch/arm/lib/setbit.S +++ b/arch/arm/lib/setbit.S | |||
@@ -12,6 +12,4 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_set_bit) | 15 | bitop _set_bit, orr |
16 | bitop orr | ||
17 | ENDPROC(_set_bit) | ||
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S index 15a4d431f229..4becdc3a59cb 100644 --- a/arch/arm/lib/testchangebit.S +++ b/arch/arm/lib/testchangebit.S | |||
@@ -12,6 +12,4 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_test_and_change_bit) | 15 | testop _test_and_change_bit, eor, str |
16 | testop eor, str | ||
17 | ENDPROC(_test_and_change_bit) | ||
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S index 521b66b5b95d..918841dcce7a 100644 --- a/arch/arm/lib/testclearbit.S +++ b/arch/arm/lib/testclearbit.S | |||
@@ -12,6 +12,4 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_test_and_clear_bit) | 15 | testop _test_and_clear_bit, bicne, strne |
16 | testop bicne, strne | ||
17 | ENDPROC(_test_and_clear_bit) | ||
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S index 1c98cc2185bb..8d1b2fe9e487 100644 --- a/arch/arm/lib/testsetbit.S +++ b/arch/arm/lib/testsetbit.S | |||
@@ -12,6 +12,4 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_test_and_set_bit) | 15 | testop _test_and_set_bit, orreq, streq |
16 | testop orreq, streq | ||
17 | ENDPROC(_test_and_set_bit) | ||
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c index 35f6502144ae..4ebb382c5979 100644 --- a/arch/arm/mach-exynos/cpuidle.c +++ b/arch/arm/mach-exynos/cpuidle.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/cpuidle.h> | 13 | #include <linux/cpuidle.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/export.h> | ||
16 | #include <linux/time.h> | ||
15 | 17 | ||
16 | #include <asm/proc-fns.h> | 18 | #include <asm/proc-fns.h> |
17 | 19 | ||
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index b82dcf08e747..88660d500f5b 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
23 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/smp.h> | ||
25 | 26 | ||
26 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
27 | #include <asm/unified.h> | 28 | #include <asm/unified.h> |
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void) | |||
72 | 73 | ||
73 | void highbank_set_cpu_jump(int cpu, void *jump_addr) | 74 | void highbank_set_cpu_jump(int cpu, void *jump_addr) |
74 | { | 75 | { |
76 | #ifdef CONFIG_SMP | ||
77 | cpu = cpu_logical_map(cpu); | ||
78 | #endif | ||
75 | writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu)); | 79 | writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu)); |
76 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); | 80 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); |
77 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), | 81 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 5f7f9c2a34ae..c44aa974e79c 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC | |||
10 | config HAVE_IMX_SRC | 10 | config HAVE_IMX_SRC |
11 | bool | 11 | bool |
12 | 12 | ||
13 | # | ||
14 | # ARCH_MX31 and ARCH_MX35 are left for compatibility | ||
15 | # Some usages assume that having one of them implies not having (e.g.) ARCH_MX2. | ||
16 | # To easily distinguish good and reviewed from unreviewed usages new (and IMHO | ||
17 | # more sensible) names are used: SOC_IMX31 and SOC_IMX35 | ||
18 | config ARCH_MX1 | 13 | config ARCH_MX1 |
19 | bool | 14 | bool |
20 | 15 | ||
@@ -27,12 +22,6 @@ config ARCH_MX25 | |||
27 | config MACH_MX27 | 22 | config MACH_MX27 |
28 | bool | 23 | bool |
29 | 24 | ||
30 | config ARCH_MX31 | ||
31 | bool | ||
32 | |||
33 | config ARCH_MX35 | ||
34 | bool | ||
35 | |||
36 | config SOC_IMX1 | 25 | config SOC_IMX1 |
37 | bool | 26 | bool |
38 | select ARCH_MX1 | 27 | select ARCH_MX1 |
@@ -72,7 +61,6 @@ config SOC_IMX31 | |||
72 | select CPU_V6 | 61 | select CPU_V6 |
73 | select IMX_HAVE_PLATFORM_MXC_RNGA | 62 | select IMX_HAVE_PLATFORM_MXC_RNGA |
74 | select ARCH_MXC_AUDMUX_V2 | 63 | select ARCH_MXC_AUDMUX_V2 |
75 | select ARCH_MX31 | ||
76 | select MXC_AVIC | 64 | select MXC_AVIC |
77 | select SMP_ON_UP if SMP | 65 | select SMP_ON_UP if SMP |
78 | 66 | ||
@@ -82,7 +70,6 @@ config SOC_IMX35 | |||
82 | select ARCH_MXC_IOMUX_V3 | 70 | select ARCH_MXC_IOMUX_V3 |
83 | select ARCH_MXC_AUDMUX_V2 | 71 | select ARCH_MXC_AUDMUX_V2 |
84 | select HAVE_EPIT | 72 | select HAVE_EPIT |
85 | select ARCH_MX35 | ||
86 | select MXC_AVIC | 73 | select MXC_AVIC |
87 | select SMP_ON_UP if SMP | 74 | select SMP_ON_UP if SMP |
88 | 75 | ||
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c index 613a1b993bff..039a7abb165a 100644 --- a/arch/arm/mach-imx/clock-imx6q.c +++ b/arch/arm/mach-imx/clock-imx6q.c | |||
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = { | |||
1953 | imx_map_entry(MX6Q, ANATOP, MT_DEVICE), | 1953 | imx_map_entry(MX6Q, ANATOP, MT_DEVICE), |
1954 | }; | 1954 | }; |
1955 | 1955 | ||
1956 | void __init imx6q_clock_map_io(void) | ||
1957 | { | ||
1958 | iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc)); | ||
1959 | } | ||
1960 | |||
1956 | int __init mx6q_clocks_init(void) | 1961 | int __init mx6q_clocks_init(void) |
1957 | { | 1962 | { |
1958 | struct device_node *np; | 1963 | struct device_node *np; |
1959 | void __iomem *base; | 1964 | void __iomem *base; |
1960 | int i, irq; | 1965 | int i, irq; |
1961 | 1966 | ||
1962 | iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc)); | ||
1963 | |||
1964 | /* retrieve the freqency of fixed clocks from device tree */ | 1967 | /* retrieve the freqency of fixed clocks from device tree */ |
1965 | for_each_compatible_node(np, NULL, "fixed-clock") { | 1968 | for_each_compatible_node(np, NULL, "fixed-clock") { |
1966 | u32 rate; | 1969 | u32 rate; |
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 8bf5fa349484..9cd860a27af5 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -34,6 +34,7 @@ static void __init imx6q_map_io(void) | |||
34 | { | 34 | { |
35 | imx_lluart_map_io(); | 35 | imx_lluart_map_io(); |
36 | imx_scu_map_io(); | 36 | imx_scu_map_io(); |
37 | imx6q_clock_map_io(); | ||
37 | } | 38 | } |
38 | 39 | ||
39 | static void __init imx6q_gpio_add_irq_domain(struct device_node *np, | 40 | static void __init imx6q_gpio_add_irq_domain(struct device_node *np, |
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c index 9f0e82ec3398..31807d2a8b7b 100644 --- a/arch/arm/mach-imx/mm-imx3.c +++ b/arch/arm/mach-imx/mm-imx3.c | |||
@@ -33,29 +33,32 @@ | |||
33 | static void imx3_idle(void) | 33 | static void imx3_idle(void) |
34 | { | 34 | { |
35 | unsigned long reg = 0; | 35 | unsigned long reg = 0; |
36 | __asm__ __volatile__( | 36 | |
37 | /* disable I and D cache */ | 37 | if (!need_resched()) |
38 | "mrc p15, 0, %0, c1, c0, 0\n" | 38 | __asm__ __volatile__( |
39 | "bic %0, %0, #0x00001000\n" | 39 | /* disable I and D cache */ |
40 | "bic %0, %0, #0x00000004\n" | 40 | "mrc p15, 0, %0, c1, c0, 0\n" |
41 | "mcr p15, 0, %0, c1, c0, 0\n" | 41 | "bic %0, %0, #0x00001000\n" |
42 | /* invalidate I cache */ | 42 | "bic %0, %0, #0x00000004\n" |
43 | "mov %0, #0\n" | 43 | "mcr p15, 0, %0, c1, c0, 0\n" |
44 | "mcr p15, 0, %0, c7, c5, 0\n" | 44 | /* invalidate I cache */ |
45 | /* clear and invalidate D cache */ | 45 | "mov %0, #0\n" |
46 | "mov %0, #0\n" | 46 | "mcr p15, 0, %0, c7, c5, 0\n" |
47 | "mcr p15, 0, %0, c7, c14, 0\n" | 47 | /* clear and invalidate D cache */ |
48 | /* WFI */ | 48 | "mov %0, #0\n" |
49 | "mov %0, #0\n" | 49 | "mcr p15, 0, %0, c7, c14, 0\n" |
50 | "mcr p15, 0, %0, c7, c0, 4\n" | 50 | /* WFI */ |
51 | "nop\n" "nop\n" "nop\n" "nop\n" | 51 | "mov %0, #0\n" |
52 | "nop\n" "nop\n" "nop\n" | 52 | "mcr p15, 0, %0, c7, c0, 4\n" |
53 | /* enable I and D cache */ | 53 | "nop\n" "nop\n" "nop\n" "nop\n" |
54 | "mrc p15, 0, %0, c1, c0, 0\n" | 54 | "nop\n" "nop\n" "nop\n" |
55 | "orr %0, %0, #0x00001000\n" | 55 | /* enable I and D cache */ |
56 | "orr %0, %0, #0x00000004\n" | 56 | "mrc p15, 0, %0, c1, c0, 0\n" |
57 | "mcr p15, 0, %0, c1, c0, 0\n" | 57 | "orr %0, %0, #0x00001000\n" |
58 | : "=r" (reg)); | 58 | "orr %0, %0, #0x00000004\n" |
59 | "mcr p15, 0, %0, c1, c0, 0\n" | ||
60 | : "=r" (reg)); | ||
61 | local_irq_enable(); | ||
59 | } | 62 | } |
60 | 63 | ||
61 | static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size, | 64 | static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size, |
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void) | |||
108 | l2x0_init(l2x0_base, 0x00030024, 0x00000000); | 111 | l2x0_init(l2x0_base, 0x00030024, 0x00000000); |
109 | } | 112 | } |
110 | 113 | ||
114 | #ifdef CONFIG_SOC_IMX31 | ||
111 | static struct map_desc mx31_io_desc[] __initdata = { | 115 | static struct map_desc mx31_io_desc[] __initdata = { |
112 | imx_map_entry(MX31, X_MEMC, MT_DEVICE), | 116 | imx_map_entry(MX31, X_MEMC, MT_DEVICE), |
113 | imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED), | 117 | imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED), |
@@ -126,33 +130,11 @@ void __init mx31_map_io(void) | |||
126 | iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc)); | 130 | iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc)); |
127 | } | 131 | } |
128 | 132 | ||
129 | static struct map_desc mx35_io_desc[] __initdata = { | ||
130 | imx_map_entry(MX35, X_MEMC, MT_DEVICE), | ||
131 | imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED), | ||
132 | imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED), | ||
133 | imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED), | ||
134 | imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED), | ||
135 | }; | ||
136 | |||
137 | void __init mx35_map_io(void) | ||
138 | { | ||
139 | iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc)); | ||
140 | } | ||
141 | |||
142 | void __init imx31_init_early(void) | 133 | void __init imx31_init_early(void) |
143 | { | 134 | { |
144 | mxc_set_cpu_type(MXC_CPU_MX31); | 135 | mxc_set_cpu_type(MXC_CPU_MX31); |
145 | mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); | 136 | mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); |
146 | imx_idle = imx3_idle; | 137 | pm_idle = imx3_idle; |
147 | imx_ioremap = imx3_ioremap; | ||
148 | } | ||
149 | |||
150 | void __init imx35_init_early(void) | ||
151 | { | ||
152 | mxc_set_cpu_type(MXC_CPU_MX35); | ||
153 | mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR)); | ||
154 | mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); | ||
155 | imx_idle = imx3_idle; | ||
156 | imx_ioremap = imx3_ioremap; | 138 | imx_ioremap = imx3_ioremap; |
157 | } | 139 | } |
158 | 140 | ||
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void) | |||
161 | mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); | 143 | mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); |
162 | } | 144 | } |
163 | 145 | ||
164 | void __init mx35_init_irq(void) | ||
165 | { | ||
166 | mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR)); | ||
167 | } | ||
168 | |||
169 | static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = { | 146 | static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = { |
170 | .per_2_per_addr = 1677, | 147 | .per_2_per_addr = 1677, |
171 | }; | 148 | }; |
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void) | |||
199 | 176 | ||
200 | imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata); | 177 | imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata); |
201 | } | 178 | } |
179 | #endif /* ifdef CONFIG_SOC_IMX31 */ | ||
180 | |||
181 | #ifdef CONFIG_SOC_IMX35 | ||
182 | static struct map_desc mx35_io_desc[] __initdata = { | ||
183 | imx_map_entry(MX35, X_MEMC, MT_DEVICE), | ||
184 | imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED), | ||
185 | imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED), | ||
186 | imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED), | ||
187 | imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED), | ||
188 | }; | ||
189 | |||
190 | void __init mx35_map_io(void) | ||
191 | { | ||
192 | iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc)); | ||
193 | } | ||
194 | |||
195 | void __init imx35_init_early(void) | ||
196 | { | ||
197 | mxc_set_cpu_type(MXC_CPU_MX35); | ||
198 | mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR)); | ||
199 | mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); | ||
200 | pm_idle = imx3_idle; | ||
201 | imx_ioremap = imx3_ioremap; | ||
202 | } | ||
203 | |||
204 | void __init mx35_init_irq(void) | ||
205 | { | ||
206 | mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR)); | ||
207 | } | ||
202 | 208 | ||
203 | static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = { | 209 | static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = { |
204 | .ap_2_ap_addr = 642, | 210 | .ap_2_ap_addr = 642, |
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void) | |||
254 | 260 | ||
255 | imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata); | 261 | imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata); |
256 | } | 262 | } |
263 | #endif /* ifdef CONFIG_SOC_IMX35 */ | ||
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index 36cacbd0dcc2..a8e33681b732 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/of_address.h> | 16 | #include <linux/of_address.h> |
17 | #include <linux/smp.h> | ||
17 | #include <asm/unified.h> | 18 | #include <asm/unified.h> |
18 | 19 | ||
19 | #define SRC_SCR 0x000 | 20 | #define SRC_SCR 0x000 |
@@ -23,10 +24,15 @@ | |||
23 | 24 | ||
24 | static void __iomem *src_base; | 25 | static void __iomem *src_base; |
25 | 26 | ||
27 | #ifndef CONFIG_SMP | ||
28 | #define cpu_logical_map(cpu) 0 | ||
29 | #endif | ||
30 | |||
26 | void imx_enable_cpu(int cpu, bool enable) | 31 | void imx_enable_cpu(int cpu, bool enable) |
27 | { | 32 | { |
28 | u32 mask, val; | 33 | u32 mask, val; |
29 | 34 | ||
35 | cpu = cpu_logical_map(cpu); | ||
30 | mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); | 36 | mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); |
31 | val = readl_relaxed(src_base + SRC_SCR); | 37 | val = readl_relaxed(src_base + SRC_SCR); |
32 | val = enable ? val | mask : val & ~mask; | 38 | val = enable ? val | mask : val & ~mask; |
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable) | |||
35 | 41 | ||
36 | void imx_set_cpu_jump(int cpu, void *jump_addr) | 42 | void imx_set_cpu_jump(int cpu, void *jump_addr) |
37 | { | 43 | { |
44 | cpu = cpu_logical_map(cpu); | ||
38 | writel_relaxed(BSYM(virt_to_phys(jump_addr)), | 45 | writel_relaxed(BSYM(virt_to_phys(jump_addr)), |
39 | src_base + SRC_GPR1 + cpu * 8); | 46 | src_base + SRC_GPR1 + cpu * 8); |
40 | } | 47 | } |
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index 69156568bc41..4665767a4f79 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c | |||
@@ -182,7 +182,7 @@ static void __init gplugd_init(void) | |||
182 | 182 | ||
183 | /* on-chip devices */ | 183 | /* on-chip devices */ |
184 | pxa168_add_uart(3); | 184 | pxa168_add_uart(3); |
185 | pxa168_add_ssp(0); | 185 | pxa168_add_ssp(1); |
186 | pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info)); | 186 | pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info)); |
187 | 187 | ||
188 | pxa168_add_eth(&gplugd_eth_platform_data); | 188 | pxa168_add_eth(&gplugd_eth_platform_data); |
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h index d14eeaf16322..99b4ce1b6562 100644 --- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h +++ b/arch/arm/mach-mmp/include/mach/gpio-pxa.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000) | 7 | #define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000) |
8 | 8 | ||
9 | #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) | 9 | #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) |
10 | #define GPIO_REG(x) (GPIO_REGS_VIRT + (x)) | 10 | #define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x))) |
11 | 11 | ||
12 | #define NR_BUILTIN_GPIO IRQ_GPIO_NUM | 12 | #define NR_BUILTIN_GPIO IRQ_GPIO_NUM |
13 | 13 | ||
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c index 5c5328257dca..5e2e7a843860 100644 --- a/arch/arm/mach-mx5/cpu.c +++ b/arch/arm/mach-mx5/cpu.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <mach/hardware.h> | 18 | #include <mach/hardware.h> |
19 | #include <asm/io.h> | 19 | #include <linux/io.h> |
20 | 20 | ||
21 | static int mx5_cpu_rev = -1; | 21 | static int mx5_cpu_rev = -1; |
22 | 22 | ||
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void) | |||
67 | if (!cpu_is_mx51()) | 67 | if (!cpu_is_mx51()) |
68 | return 0; | 68 | return 0; |
69 | 69 | ||
70 | if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) { | 70 | if (mx51_revision() < IMX_CHIP_REVISION_3_0 && |
71 | (elf_hwcap & HWCAP_NEON)) { | ||
71 | elf_hwcap &= ~HWCAP_NEON; | 72 | elf_hwcap &= ~HWCAP_NEON; |
72 | pr_info("Turning off NEON support, detected broken NEON implementation\n"); | 73 | pr_info("Turning off NEON support, detected broken NEON implementation\n"); |
73 | } | 74 | } |
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c index 26eacc9d0d90..df4a508f240a 100644 --- a/arch/arm/mach-mx5/mm.c +++ b/arch/arm/mach-mx5/mm.c | |||
@@ -23,7 +23,9 @@ | |||
23 | 23 | ||
24 | static void imx5_idle(void) | 24 | static void imx5_idle(void) |
25 | { | 25 | { |
26 | mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); | 26 | if (!need_resched()) |
27 | mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); | ||
28 | local_irq_enable(); | ||
27 | } | 29 | } |
28 | 30 | ||
29 | /* | 31 | /* |
@@ -89,7 +91,7 @@ void __init imx51_init_early(void) | |||
89 | mxc_set_cpu_type(MXC_CPU_MX51); | 91 | mxc_set_cpu_type(MXC_CPU_MX51); |
90 | mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); | 92 | mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); |
91 | mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR)); | 93 | mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR)); |
92 | imx_idle = imx5_idle; | 94 | pm_idle = imx5_idle; |
93 | } | 95 | } |
94 | 96 | ||
95 | void __init imx53_init_early(void) | 97 | void __init imx53_init_early(void) |
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c index 229ae3494216..da6e4aad177c 100644 --- a/arch/arm/mach-mxs/clock-mx28.c +++ b/arch/arm/mach-mxs/clock-mx28.c | |||
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ | |||
404 | reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ | 404 | reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ |
405 | reg &= ~BM_CLKCTRL_##dr##_DIV; \ | 405 | reg &= ~BM_CLKCTRL_##dr##_DIV; \ |
406 | reg |= div << BP_CLKCTRL_##dr##_DIV; \ | 406 | reg |= div << BP_CLKCTRL_##dr##_DIV; \ |
407 | if (reg | (1 << clk->enable_shift)) { \ | 407 | if (reg & (1 << clk->enable_shift)) { \ |
408 | pr_err("%s: clock is gated\n", __func__); \ | 408 | pr_err("%s: clock is gated\n", __func__); \ |
409 | return -EINVAL; \ | 409 | return -EINVAL; \ |
410 | } \ | 410 | } \ |
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig index e0a028161dde..73f287d6429b 100644 --- a/arch/arm/mach-omap1/Kconfig +++ b/arch/arm/mach-omap1/Kconfig | |||
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC | |||
171 | comment "OMAP CPU Speed" | 171 | comment "OMAP CPU Speed" |
172 | depends on ARCH_OMAP1 | 172 | depends on ARCH_OMAP1 |
173 | 173 | ||
174 | config OMAP_CLOCKS_SET_BY_BOOTLOADER | ||
175 | bool "OMAP clocks set by bootloader" | ||
176 | depends on ARCH_OMAP1 | ||
177 | help | ||
178 | Enable this option to prevent the kernel from overriding the clock | ||
179 | frequencies programmed by bootloader for MPU, DSP, MMUs, TC, | ||
180 | internal LCD controller and MPU peripherals. | ||
181 | |||
182 | config OMAP_ARM_216MHZ | 174 | config OMAP_ARM_216MHZ |
183 | bool "OMAP ARM 216 MHz CPU (1710 only)" | 175 | bool "OMAP ARM 216 MHz CPU (1710 only)" |
184 | depends on ARCH_OMAP1 && ARCH_OMAP16XX | 176 | depends on ARCH_OMAP1 && ARCH_OMAP16XX |
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index 51bae31cf361..b0f15d234a12 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c | |||
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void) | |||
302 | omap_cfg_reg(J19_1610_CAM_D6); | 302 | omap_cfg_reg(J19_1610_CAM_D6); |
303 | omap_cfg_reg(J18_1610_CAM_D7); | 303 | omap_cfg_reg(J18_1610_CAM_D7); |
304 | 304 | ||
305 | iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc)); | ||
306 | |||
307 | omap_board_config = ams_delta_config; | 305 | omap_board_config = ams_delta_config; |
308 | omap_board_config_size = ARRAY_SIZE(ams_delta_config); | 306 | omap_board_config_size = ARRAY_SIZE(ams_delta_config); |
309 | omap_serial_init(); | 307 | omap_serial_init(); |
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void) | |||
373 | } | 371 | } |
374 | arch_initcall(ams_delta_modem_init); | 372 | arch_initcall(ams_delta_modem_init); |
375 | 373 | ||
374 | static void __init ams_delta_map_io(void) | ||
375 | { | ||
376 | omap15xx_map_io(); | ||
377 | iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc)); | ||
378 | } | ||
379 | |||
376 | MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)") | 380 | MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)") |
377 | /* Maintainer: Jonathan McDowell <noodles@earth.li> */ | 381 | /* Maintainer: Jonathan McDowell <noodles@earth.li> */ |
378 | .atag_offset = 0x100, | 382 | .atag_offset = 0x100, |
379 | .map_io = omap15xx_map_io, | 383 | .map_io = ams_delta_map_io, |
380 | .init_early = omap1_init_early, | 384 | .init_early = omap1_init_early, |
381 | .reserve = omap_reserve, | 385 | .reserve = omap_reserve, |
382 | .init_irq = omap1_init_irq, | 386 | .init_irq = omap1_init_irq, |
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h index eaf09efb91ca..16b1423b454a 100644 --- a/arch/arm/mach-omap1/clock.h +++ b/arch/arm/mach-omap1/clock.h | |||
@@ -17,7 +17,8 @@ | |||
17 | 17 | ||
18 | #include <plat/clock.h> | 18 | #include <plat/clock.h> |
19 | 19 | ||
20 | extern int __init omap1_clk_init(void); | 20 | int omap1_clk_init(void); |
21 | void omap1_clk_late_init(void); | ||
21 | extern int omap1_clk_enable(struct clk *clk); | 22 | extern int omap1_clk_enable(struct clk *clk); |
22 | extern void omap1_clk_disable(struct clk *clk); | 23 | extern void omap1_clk_disable(struct clk *clk); |
23 | extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate); | 24 | extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate); |
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index 92400b9eb69f..1297bb58869c 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c | |||
@@ -767,6 +767,15 @@ static struct clk_functions omap1_clk_functions = { | |||
767 | .clk_disable_unused = omap1_clk_disable_unused, | 767 | .clk_disable_unused = omap1_clk_disable_unused, |
768 | }; | 768 | }; |
769 | 769 | ||
770 | static void __init omap1_show_rates(void) | ||
771 | { | ||
772 | pr_notice("Clocking rate (xtal/DPLL1/MPU): " | ||
773 | "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n", | ||
774 | ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10, | ||
775 | ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10, | ||
776 | arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10); | ||
777 | } | ||
778 | |||
770 | int __init omap1_clk_init(void) | 779 | int __init omap1_clk_init(void) |
771 | { | 780 | { |
772 | struct omap_clk *c; | 781 | struct omap_clk *c; |
@@ -835,9 +844,12 @@ int __init omap1_clk_init(void) | |||
835 | /* We want to be in syncronous scalable mode */ | 844 | /* We want to be in syncronous scalable mode */ |
836 | omap_writew(0x1000, ARM_SYSST); | 845 | omap_writew(0x1000, ARM_SYSST); |
837 | 846 | ||
838 | #ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER | 847 | |
839 | /* Use values set by bootloader. Determine PLL rate and recalculate | 848 | /* |
840 | * dependent clocks as if kernel had changed PLL or divisors. | 849 | * Initially use the values set by bootloader. Determine PLL rate and |
850 | * recalculate dependent clocks as if kernel had changed PLL or | ||
851 | * divisors. See also omap1_clk_late_init() that can reprogram dpll1 | ||
852 | * after the SRAM is initialized. | ||
841 | */ | 853 | */ |
842 | { | 854 | { |
843 | unsigned pll_ctl_val = omap_readw(DPLL_CTL); | 855 | unsigned pll_ctl_val = omap_readw(DPLL_CTL); |
@@ -862,25 +874,10 @@ int __init omap1_clk_init(void) | |||
862 | } | 874 | } |
863 | } | 875 | } |
864 | } | 876 | } |
865 | #else | ||
866 | /* Find the highest supported frequency and enable it */ | ||
867 | if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) { | ||
868 | printk(KERN_ERR "System frequencies not set. Check your config.\n"); | ||
869 | /* Guess sane values (60MHz) */ | ||
870 | omap_writew(0x2290, DPLL_CTL); | ||
871 | omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL); | ||
872 | ck_dpll1.rate = 60000000; | ||
873 | } | ||
874 | #endif | ||
875 | propagate_rate(&ck_dpll1); | 877 | propagate_rate(&ck_dpll1); |
876 | /* Cache rates for clocks connected to ck_ref (not dpll1) */ | 878 | /* Cache rates for clocks connected to ck_ref (not dpll1) */ |
877 | propagate_rate(&ck_ref); | 879 | propagate_rate(&ck_ref); |
878 | printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): " | 880 | omap1_show_rates(); |
879 | "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n", | ||
880 | ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10, | ||
881 | ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10, | ||
882 | arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10); | ||
883 | |||
884 | if (machine_is_omap_perseus2() || machine_is_omap_fsample()) { | 881 | if (machine_is_omap_perseus2() || machine_is_omap_fsample()) { |
885 | /* Select slicer output as OMAP input clock */ | 882 | /* Select slicer output as OMAP input clock */ |
886 | omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, | 883 | omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, |
@@ -925,3 +922,21 @@ int __init omap1_clk_init(void) | |||
925 | 922 | ||
926 | return 0; | 923 | return 0; |
927 | } | 924 | } |
925 | |||
926 | #define OMAP1_DPLL1_SANE_VALUE 60000000 | ||
927 | |||
928 | void __init omap1_clk_late_init(void) | ||
929 | { | ||
930 | if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE) | ||
931 | return; | ||
932 | |||
933 | /* Find the highest supported frequency and enable it */ | ||
934 | if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) { | ||
935 | pr_err("System frequencies not set, using default. Check your config.\n"); | ||
936 | omap_writew(0x2290, DPLL_CTL); | ||
937 | omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL); | ||
938 | ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE; | ||
939 | } | ||
940 | propagate_rate(&ck_dpll1); | ||
941 | omap1_show_rates(); | ||
942 | } | ||
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c index 48ef9888e820..475cb2f50d87 100644 --- a/arch/arm/mach-omap1/devices.c +++ b/arch/arm/mach-omap1/devices.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <plat/omap7xx.h> | 30 | #include <plat/omap7xx.h> |
31 | #include <plat/mcbsp.h> | 31 | #include <plat/mcbsp.h> |
32 | 32 | ||
33 | #include "clock.h" | ||
34 | |||
33 | /*-------------------------------------------------------------------------*/ | 35 | /*-------------------------------------------------------------------------*/ |
34 | 36 | ||
35 | #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) | 37 | #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) |
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void) | |||
293 | return -ENODEV; | 295 | return -ENODEV; |
294 | 296 | ||
295 | omap_sram_init(); | 297 | omap_sram_init(); |
298 | omap1_clk_late_init(); | ||
296 | 299 | ||
297 | /* please keep these calls, and their implementations above, | 300 | /* please keep these calls, and their implementations above, |
298 | * in alphabetical order so they're easier to sort through. | 301 | * in alphabetical order so they're easier to sort through. |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 503414718905..e1293aa513d3 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -334,6 +334,7 @@ config MACH_OMAP4_PANDA | |||
334 | config OMAP3_EMU | 334 | config OMAP3_EMU |
335 | bool "OMAP3 debugging peripherals" | 335 | bool "OMAP3 debugging peripherals" |
336 | depends on ARCH_OMAP3 | 336 | depends on ARCH_OMAP3 |
337 | select ARM_AMBA | ||
337 | select OC_ETM | 338 | select OC_ETM |
338 | help | 339 | help |
339 | Say Y here to enable debugging hardware of omap3 | 340 | Say Y here to enable debugging hardware of omap3 |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 69ab1c069134..b009f17dee56 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Common support | 5 | # Common support |
6 | obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \ | 6 | obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \ |
7 | common.o gpio.o dma.o wd_timer.o | 7 | common.o gpio.o dma.o wd_timer.o display.o |
8 | 8 | ||
9 | omap-2-3-common = irq.o sdrc.o | 9 | omap-2-3-common = irq.o sdrc.o |
10 | hwmod-common = omap_hwmod.o \ | 10 | hwmod-common = omap_hwmod.o \ |
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o | |||
264 | obj-y += $(smsc911x-m) $(smsc911x-y) | 264 | obj-y += $(smsc911x-m) $(smsc911x-y) |
265 | obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o | 265 | obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o |
266 | 266 | ||
267 | disp-$(CONFIG_OMAP2_DSS) := display.o | ||
268 | obj-y += $(disp-m) $(disp-y) | ||
269 | |||
270 | obj-y += common-board-devices.o twl-common.o | 267 | obj-y += common-board-devices.o twl-common.o |
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 1fe35c24fba2..942bb4f19f9f 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
26 | #include <linux/cpuidle.h> | 26 | #include <linux/cpuidle.h> |
27 | #include <linux/export.h> | ||
27 | 28 | ||
28 | #include <plat/prcm.h> | 29 | #include <plat/prcm.h> |
29 | #include <plat/irqs.h> | 30 | #include <plat/irqs.h> |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index adb2756e242f..dce9905d64bb 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -27,8 +27,35 @@ | |||
27 | #include <plat/omap_hwmod.h> | 27 | #include <plat/omap_hwmod.h> |
28 | #include <plat/omap_device.h> | 28 | #include <plat/omap_device.h> |
29 | #include <plat/omap-pm.h> | 29 | #include <plat/omap-pm.h> |
30 | #include <plat/common.h> | ||
30 | 31 | ||
31 | #include "control.h" | 32 | #include "control.h" |
33 | #include "display.h" | ||
34 | |||
35 | #define DISPC_CONTROL 0x0040 | ||
36 | #define DISPC_CONTROL2 0x0238 | ||
37 | #define DISPC_IRQSTATUS 0x0018 | ||
38 | |||
39 | #define DSS_SYSCONFIG 0x10 | ||
40 | #define DSS_SYSSTATUS 0x14 | ||
41 | #define DSS_CONTROL 0x40 | ||
42 | #define DSS_SDI_CONTROL 0x44 | ||
43 | #define DSS_PLL_CONTROL 0x48 | ||
44 | |||
45 | #define LCD_EN_MASK (0x1 << 0) | ||
46 | #define DIGIT_EN_MASK (0x1 << 1) | ||
47 | |||
48 | #define FRAMEDONE_IRQ_SHIFT 0 | ||
49 | #define EVSYNC_EVEN_IRQ_SHIFT 2 | ||
50 | #define EVSYNC_ODD_IRQ_SHIFT 3 | ||
51 | #define FRAMEDONE2_IRQ_SHIFT 22 | ||
52 | #define FRAMEDONETV_IRQ_SHIFT 24 | ||
53 | |||
54 | /* | ||
55 | * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC | ||
56 | * reset before deciding that something has gone wrong | ||
57 | */ | ||
58 | #define FRAMEDONE_IRQ_TIMEOUT 100 | ||
32 | 59 | ||
33 | static struct platform_device omap_display_device = { | 60 | static struct platform_device omap_display_device = { |
34 | .name = "omapdss", | 61 | .name = "omapdss", |
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data) | |||
172 | 199 | ||
173 | return r; | 200 | return r; |
174 | } | 201 | } |
202 | |||
203 | static void dispc_disable_outputs(void) | ||
204 | { | ||
205 | u32 v, irq_mask = 0; | ||
206 | bool lcd_en, digit_en, lcd2_en = false; | ||
207 | int i; | ||
208 | struct omap_dss_dispc_dev_attr *da; | ||
209 | struct omap_hwmod *oh; | ||
210 | |||
211 | oh = omap_hwmod_lookup("dss_dispc"); | ||
212 | if (!oh) { | ||
213 | WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n"); | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | if (!oh->dev_attr) { | ||
218 | pr_err("display: could not disable outputs during reset due to missing dev_attr\n"); | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr; | ||
223 | |||
224 | /* store value of LCDENABLE and DIGITENABLE bits */ | ||
225 | v = omap_hwmod_read(oh, DISPC_CONTROL); | ||
226 | lcd_en = v & LCD_EN_MASK; | ||
227 | digit_en = v & DIGIT_EN_MASK; | ||
228 | |||
229 | /* store value of LCDENABLE for LCD2 */ | ||
230 | if (da->manager_count > 2) { | ||
231 | v = omap_hwmod_read(oh, DISPC_CONTROL2); | ||
232 | lcd2_en = v & LCD_EN_MASK; | ||
233 | } | ||
234 | |||
235 | if (!(lcd_en | digit_en | lcd2_en)) | ||
236 | return; /* no managers currently enabled */ | ||
237 | |||
238 | /* | ||
239 | * If any manager was enabled, we need to disable it before | ||
240 | * DSS clocks are disabled or DISPC module is reset | ||
241 | */ | ||
242 | if (lcd_en) | ||
243 | irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT; | ||
244 | |||
245 | if (digit_en) { | ||
246 | if (da->has_framedonetv_irq) { | ||
247 | irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT; | ||
248 | } else { | ||
249 | irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT | | ||
250 | 1 << EVSYNC_ODD_IRQ_SHIFT; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | if (lcd2_en) | ||
255 | irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT; | ||
256 | |||
257 | /* | ||
258 | * clear any previous FRAMEDONE, FRAMEDONETV, | ||
259 | * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts | ||
260 | */ | ||
261 | omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS); | ||
262 | |||
263 | /* disable LCD and TV managers */ | ||
264 | v = omap_hwmod_read(oh, DISPC_CONTROL); | ||
265 | v &= ~(LCD_EN_MASK | DIGIT_EN_MASK); | ||
266 | omap_hwmod_write(v, oh, DISPC_CONTROL); | ||
267 | |||
268 | /* disable LCD2 manager */ | ||
269 | if (da->manager_count > 2) { | ||
270 | v = omap_hwmod_read(oh, DISPC_CONTROL2); | ||
271 | v &= ~LCD_EN_MASK; | ||
272 | omap_hwmod_write(v, oh, DISPC_CONTROL2); | ||
273 | } | ||
274 | |||
275 | i = 0; | ||
276 | while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) != | ||
277 | irq_mask) { | ||
278 | i++; | ||
279 | if (i > FRAMEDONE_IRQ_TIMEOUT) { | ||
280 | pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n"); | ||
281 | break; | ||
282 | } | ||
283 | mdelay(1); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | #define MAX_MODULE_SOFTRESET_WAIT 10000 | ||
288 | int omap_dss_reset(struct omap_hwmod *oh) | ||
289 | { | ||
290 | struct omap_hwmod_opt_clk *oc; | ||
291 | int c = 0; | ||
292 | int i, r; | ||
293 | |||
294 | if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) { | ||
295 | pr_err("dss_core: hwmod data doesn't contain reset data\n"); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) | ||
300 | if (oc->_clk) | ||
301 | clk_enable(oc->_clk); | ||
302 | |||
303 | dispc_disable_outputs(); | ||
304 | |||
305 | /* clear SDI registers */ | ||
306 | if (cpu_is_omap3430()) { | ||
307 | omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL); | ||
308 | omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * clear DSS_CONTROL register to switch DSS clock sources to | ||
313 | * PRCM clock, if any | ||
314 | */ | ||
315 | omap_hwmod_write(0x0, oh, DSS_CONTROL); | ||
316 | |||
317 | omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) | ||
318 | & SYSS_RESETDONE_MASK), | ||
319 | MAX_MODULE_SOFTRESET_WAIT, c); | ||
320 | |||
321 | if (c == MAX_MODULE_SOFTRESET_WAIT) | ||
322 | pr_warning("dss_core: waiting for reset to finish failed\n"); | ||
323 | else | ||
324 | pr_debug("dss_core: softreset done\n"); | ||
325 | |||
326 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) | ||
327 | if (oc->_clk) | ||
328 | clk_disable(oc->_clk); | ||
329 | |||
330 | r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; | ||
331 | |||
332 | return r; | ||
333 | } | ||
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h new file mode 100644 index 000000000000..b871b017b352 --- /dev/null +++ b/arch/arm/mach-omap2/display.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * display.h - OMAP2+ integration-specific DSS header | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H | ||
20 | #define __ARCH_ARM_MACH_OMAP2_DISPLAY_H | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | |||
24 | struct omap_dss_dispc_dev_attr { | ||
25 | u8 manager_count; | ||
26 | bool has_framedonetv_irq; | ||
27 | }; | ||
28 | |||
29 | #endif | ||
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/arch/arm/mach-omap2/io.h +++ /dev/null | |||
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 6b3088db83b7..207a2ff9a8c4 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh) | |||
749 | ohii = &oh->mpu_irqs[i++]; | 749 | ohii = &oh->mpu_irqs[i++]; |
750 | } while (ohii->irq != -1); | 750 | } while (ohii->irq != -1); |
751 | 751 | ||
752 | return i; | 752 | return i-1; |
753 | } | 753 | } |
754 | 754 | ||
755 | /** | 755 | /** |
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh) | |||
772 | ohdi = &oh->sdma_reqs[i++]; | 772 | ohdi = &oh->sdma_reqs[i++]; |
773 | } while (ohdi->dma_req != -1); | 773 | } while (ohdi->dma_req != -1); |
774 | 774 | ||
775 | return i; | 775 | return i-1; |
776 | } | 776 | } |
777 | 777 | ||
778 | /** | 778 | /** |
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os) | |||
795 | mem = &os->addr[i++]; | 795 | mem = &os->addr[i++]; |
796 | } while (mem->pa_start != mem->pa_end); | 796 | } while (mem->pa_start != mem->pa_end); |
797 | 797 | ||
798 | return i; | 798 | return i-1; |
799 | } | 799 | } |
800 | 800 | ||
801 | /** | 801 | /** |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c index 6d7206213525..a5409ce3f323 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c | |||
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = { | |||
875 | }; | 875 | }; |
876 | 876 | ||
877 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { | 877 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { |
878 | /* | ||
879 | * The DSS HW needs all DSS clocks enabled during reset. The dss_core | ||
880 | * driver does not use these clocks. | ||
881 | */ | ||
878 | { .role = "tv_clk", .clk = "dss_54m_fck" }, | 882 | { .role = "tv_clk", .clk = "dss_54m_fck" }, |
879 | { .role = "sys_clk", .clk = "dss2_fck" }, | 883 | { .role = "sys_clk", .clk = "dss2_fck" }, |
880 | }; | 884 | }; |
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = { | |||
899 | .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves), | 903 | .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves), |
900 | .masters = omap2420_dss_masters, | 904 | .masters = omap2420_dss_masters, |
901 | .masters_cnt = ARRAY_SIZE(omap2420_dss_masters), | 905 | .masters_cnt = ARRAY_SIZE(omap2420_dss_masters), |
902 | .flags = HWMOD_NO_IDLEST, | 906 | .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET, |
903 | }; | 907 | }; |
904 | 908 | ||
905 | /* l4_core -> dss_dispc */ | 909 | /* l4_core -> dss_dispc */ |
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = { | |||
939 | .slaves = omap2420_dss_dispc_slaves, | 943 | .slaves = omap2420_dss_dispc_slaves, |
940 | .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves), | 944 | .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves), |
941 | .flags = HWMOD_NO_IDLEST, | 945 | .flags = HWMOD_NO_IDLEST, |
946 | .dev_attr = &omap2_3_dss_dispc_dev_attr | ||
942 | }; | 947 | }; |
943 | 948 | ||
944 | /* l4_core -> dss_rfbi */ | 949 | /* l4_core -> dss_rfbi */ |
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = { | |||
961 | &omap2420_l4_core__dss_rfbi, | 966 | &omap2420_l4_core__dss_rfbi, |
962 | }; | 967 | }; |
963 | 968 | ||
969 | static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { | ||
970 | { .role = "ick", .clk = "dss_ick" }, | ||
971 | }; | ||
972 | |||
964 | static struct omap_hwmod omap2420_dss_rfbi_hwmod = { | 973 | static struct omap_hwmod omap2420_dss_rfbi_hwmod = { |
965 | .name = "dss_rfbi", | 974 | .name = "dss_rfbi", |
966 | .class = &omap2_rfbi_hwmod_class, | 975 | .class = &omap2_rfbi_hwmod_class, |
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = { | |||
972 | .module_offs = CORE_MOD, | 981 | .module_offs = CORE_MOD, |
973 | }, | 982 | }, |
974 | }, | 983 | }, |
984 | .opt_clks = dss_rfbi_opt_clks, | ||
985 | .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), | ||
975 | .slaves = omap2420_dss_rfbi_slaves, | 986 | .slaves = omap2420_dss_rfbi_slaves, |
976 | .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves), | 987 | .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves), |
977 | .flags = HWMOD_NO_IDLEST, | 988 | .flags = HWMOD_NO_IDLEST, |
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = { | |||
981 | static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = { | 992 | static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = { |
982 | .master = &omap2420_l4_core_hwmod, | 993 | .master = &omap2420_l4_core_hwmod, |
983 | .slave = &omap2420_dss_venc_hwmod, | 994 | .slave = &omap2420_dss_venc_hwmod, |
984 | .clk = "dss_54m_fck", | 995 | .clk = "dss_ick", |
985 | .addr = omap2_dss_venc_addrs, | 996 | .addr = omap2_dss_venc_addrs, |
986 | .fw = { | 997 | .fw = { |
987 | .omap2 = { | 998 | .omap2 = { |
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = { | |||
1001 | static struct omap_hwmod omap2420_dss_venc_hwmod = { | 1012 | static struct omap_hwmod omap2420_dss_venc_hwmod = { |
1002 | .name = "dss_venc", | 1013 | .name = "dss_venc", |
1003 | .class = &omap2_venc_hwmod_class, | 1014 | .class = &omap2_venc_hwmod_class, |
1004 | .main_clk = "dss1_fck", | 1015 | .main_clk = "dss_54m_fck", |
1005 | .prcm = { | 1016 | .prcm = { |
1006 | .omap2 = { | 1017 | .omap2 = { |
1007 | .prcm_reg_id = 1, | 1018 | .prcm_reg_id = 1, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index a2580d01c3ff..c4f56cb60d7d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c | |||
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = { | |||
942 | }; | 942 | }; |
943 | 943 | ||
944 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { | 944 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { |
945 | /* | ||
946 | * The DSS HW needs all DSS clocks enabled during reset. The dss_core | ||
947 | * driver does not use these clocks. | ||
948 | */ | ||
945 | { .role = "tv_clk", .clk = "dss_54m_fck" }, | 949 | { .role = "tv_clk", .clk = "dss_54m_fck" }, |
946 | { .role = "sys_clk", .clk = "dss2_fck" }, | 950 | { .role = "sys_clk", .clk = "dss2_fck" }, |
947 | }; | 951 | }; |
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = { | |||
966 | .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves), | 970 | .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves), |
967 | .masters = omap2430_dss_masters, | 971 | .masters = omap2430_dss_masters, |
968 | .masters_cnt = ARRAY_SIZE(omap2430_dss_masters), | 972 | .masters_cnt = ARRAY_SIZE(omap2430_dss_masters), |
969 | .flags = HWMOD_NO_IDLEST, | 973 | .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET, |
970 | }; | 974 | }; |
971 | 975 | ||
972 | /* l4_core -> dss_dispc */ | 976 | /* l4_core -> dss_dispc */ |
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = { | |||
1000 | .slaves = omap2430_dss_dispc_slaves, | 1004 | .slaves = omap2430_dss_dispc_slaves, |
1001 | .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves), | 1005 | .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves), |
1002 | .flags = HWMOD_NO_IDLEST, | 1006 | .flags = HWMOD_NO_IDLEST, |
1007 | .dev_attr = &omap2_3_dss_dispc_dev_attr | ||
1003 | }; | 1008 | }; |
1004 | 1009 | ||
1005 | /* l4_core -> dss_rfbi */ | 1010 | /* l4_core -> dss_rfbi */ |
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = { | |||
1016 | &omap2430_l4_core__dss_rfbi, | 1021 | &omap2430_l4_core__dss_rfbi, |
1017 | }; | 1022 | }; |
1018 | 1023 | ||
1024 | static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { | ||
1025 | { .role = "ick", .clk = "dss_ick" }, | ||
1026 | }; | ||
1027 | |||
1019 | static struct omap_hwmod omap2430_dss_rfbi_hwmod = { | 1028 | static struct omap_hwmod omap2430_dss_rfbi_hwmod = { |
1020 | .name = "dss_rfbi", | 1029 | .name = "dss_rfbi", |
1021 | .class = &omap2_rfbi_hwmod_class, | 1030 | .class = &omap2_rfbi_hwmod_class, |
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = { | |||
1027 | .module_offs = CORE_MOD, | 1036 | .module_offs = CORE_MOD, |
1028 | }, | 1037 | }, |
1029 | }, | 1038 | }, |
1039 | .opt_clks = dss_rfbi_opt_clks, | ||
1040 | .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), | ||
1030 | .slaves = omap2430_dss_rfbi_slaves, | 1041 | .slaves = omap2430_dss_rfbi_slaves, |
1031 | .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves), | 1042 | .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves), |
1032 | .flags = HWMOD_NO_IDLEST, | 1043 | .flags = HWMOD_NO_IDLEST, |
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = { | |||
1036 | static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = { | 1047 | static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = { |
1037 | .master = &omap2430_l4_core_hwmod, | 1048 | .master = &omap2430_l4_core_hwmod, |
1038 | .slave = &omap2430_dss_venc_hwmod, | 1049 | .slave = &omap2430_dss_venc_hwmod, |
1039 | .clk = "dss_54m_fck", | 1050 | .clk = "dss_ick", |
1040 | .addr = omap2_dss_venc_addrs, | 1051 | .addr = omap2_dss_venc_addrs, |
1041 | .flags = OCPIF_SWSUP_IDLE, | 1052 | .flags = OCPIF_SWSUP_IDLE, |
1042 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 1053 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = { | |||
1050 | static struct omap_hwmod omap2430_dss_venc_hwmod = { | 1061 | static struct omap_hwmod omap2430_dss_venc_hwmod = { |
1051 | .name = "dss_venc", | 1062 | .name = "dss_venc", |
1052 | .class = &omap2_venc_hwmod_class, | 1063 | .class = &omap2_venc_hwmod_class, |
1053 | .main_clk = "dss1_fck", | 1064 | .main_clk = "dss_54m_fck", |
1054 | .prcm = { | 1065 | .prcm = { |
1055 | .omap2 = { | 1066 | .omap2 = { |
1056 | .prcm_reg_id = 1, | 1067 | .prcm_reg_id = 1, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c index c451729d289a..c11273da5dcc 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <plat/omap_hwmod.h> | 11 | #include <plat/omap_hwmod.h> |
12 | #include <plat/serial.h> | 12 | #include <plat/serial.h> |
13 | #include <plat/dma.h> | 13 | #include <plat/dma.h> |
14 | #include <plat/common.h> | ||
14 | 15 | ||
15 | #include <mach/irqs.h> | 16 | #include <mach/irqs.h> |
16 | 17 | ||
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = { | |||
43 | .rev_offs = 0x0000, | 44 | .rev_offs = 0x0000, |
44 | .sysc_offs = 0x0010, | 45 | .sysc_offs = 0x0010, |
45 | .syss_offs = 0x0014, | 46 | .syss_offs = 0x0014, |
46 | .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), | 47 | .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | |
48 | SYSS_HAS_RESET_STATUS), | ||
47 | .sysc_fields = &omap_hwmod_sysc_type1, | 49 | .sysc_fields = &omap_hwmod_sysc_type1, |
48 | }; | 50 | }; |
49 | 51 | ||
50 | struct omap_hwmod_class omap2_dss_hwmod_class = { | 52 | struct omap_hwmod_class omap2_dss_hwmod_class = { |
51 | .name = "dss", | 53 | .name = "dss", |
52 | .sysc = &omap2_dss_sysc, | 54 | .sysc = &omap2_dss_sysc, |
55 | .reset = omap_dss_reset, | ||
53 | }; | 56 | }; |
54 | 57 | ||
55 | /* | 58 | /* |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index bc9035ec87fc..7f8915ad5099 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = { | |||
1369 | }; | 1369 | }; |
1370 | 1370 | ||
1371 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { | 1371 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { |
1372 | { .role = "tv_clk", .clk = "dss_tv_fck" }, | 1372 | /* |
1373 | { .role = "video_clk", .clk = "dss_96m_fck" }, | 1373 | * The DSS HW needs all DSS clocks enabled during reset. The dss_core |
1374 | * driver does not use these clocks. | ||
1375 | */ | ||
1374 | { .role = "sys_clk", .clk = "dss2_alwon_fck" }, | 1376 | { .role = "sys_clk", .clk = "dss2_alwon_fck" }, |
1377 | { .role = "tv_clk", .clk = "dss_tv_fck" }, | ||
1378 | /* required only on OMAP3430 */ | ||
1379 | { .role = "tv_dac_clk", .clk = "dss_96m_fck" }, | ||
1375 | }; | 1380 | }; |
1376 | 1381 | ||
1377 | static struct omap_hwmod omap3430es1_dss_core_hwmod = { | 1382 | static struct omap_hwmod omap3430es1_dss_core_hwmod = { |
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = { | |||
1394 | .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves), | 1399 | .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves), |
1395 | .masters = omap3xxx_dss_masters, | 1400 | .masters = omap3xxx_dss_masters, |
1396 | .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), | 1401 | .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), |
1397 | .flags = HWMOD_NO_IDLEST, | 1402 | .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET, |
1398 | }; | 1403 | }; |
1399 | 1404 | ||
1400 | static struct omap_hwmod omap3xxx_dss_core_hwmod = { | 1405 | static struct omap_hwmod omap3xxx_dss_core_hwmod = { |
1401 | .name = "dss_core", | 1406 | .name = "dss_core", |
1407 | .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, | ||
1402 | .class = &omap2_dss_hwmod_class, | 1408 | .class = &omap2_dss_hwmod_class, |
1403 | .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ | 1409 | .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ |
1404 | .sdma_reqs = omap3xxx_dss_sdma_chs, | 1410 | .sdma_reqs = omap3xxx_dss_sdma_chs, |
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { | |||
1456 | .slaves = omap3xxx_dss_dispc_slaves, | 1462 | .slaves = omap3xxx_dss_dispc_slaves, |
1457 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves), | 1463 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves), |
1458 | .flags = HWMOD_NO_IDLEST, | 1464 | .flags = HWMOD_NO_IDLEST, |
1465 | .dev_attr = &omap2_3_dss_dispc_dev_attr | ||
1459 | }; | 1466 | }; |
1460 | 1467 | ||
1461 | /* | 1468 | /* |
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = { | |||
1486 | static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { | 1493 | static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { |
1487 | .master = &omap3xxx_l4_core_hwmod, | 1494 | .master = &omap3xxx_l4_core_hwmod, |
1488 | .slave = &omap3xxx_dss_dsi1_hwmod, | 1495 | .slave = &omap3xxx_dss_dsi1_hwmod, |
1496 | .clk = "dss_ick", | ||
1489 | .addr = omap3xxx_dss_dsi1_addrs, | 1497 | .addr = omap3xxx_dss_dsi1_addrs, |
1490 | .fw = { | 1498 | .fw = { |
1491 | .omap2 = { | 1499 | .omap2 = { |
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = { | |||
1502 | &omap3xxx_l4_core__dss_dsi1, | 1510 | &omap3xxx_l4_core__dss_dsi1, |
1503 | }; | 1511 | }; |
1504 | 1512 | ||
1513 | static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = { | ||
1514 | { .role = "sys_clk", .clk = "dss2_alwon_fck" }, | ||
1515 | }; | ||
1516 | |||
1505 | static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { | 1517 | static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { |
1506 | .name = "dss_dsi1", | 1518 | .name = "dss_dsi1", |
1507 | .class = &omap3xxx_dsi_hwmod_class, | 1519 | .class = &omap3xxx_dsi_hwmod_class, |
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { | |||
1514 | .module_offs = OMAP3430_DSS_MOD, | 1526 | .module_offs = OMAP3430_DSS_MOD, |
1515 | }, | 1527 | }, |
1516 | }, | 1528 | }, |
1529 | .opt_clks = dss_dsi1_opt_clks, | ||
1530 | .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks), | ||
1517 | .slaves = omap3xxx_dss_dsi1_slaves, | 1531 | .slaves = omap3xxx_dss_dsi1_slaves, |
1518 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves), | 1532 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves), |
1519 | .flags = HWMOD_NO_IDLEST, | 1533 | .flags = HWMOD_NO_IDLEST, |
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = { | |||
1540 | &omap3xxx_l4_core__dss_rfbi, | 1554 | &omap3xxx_l4_core__dss_rfbi, |
1541 | }; | 1555 | }; |
1542 | 1556 | ||
1557 | static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { | ||
1558 | { .role = "ick", .clk = "dss_ick" }, | ||
1559 | }; | ||
1560 | |||
1543 | static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { | 1561 | static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { |
1544 | .name = "dss_rfbi", | 1562 | .name = "dss_rfbi", |
1545 | .class = &omap2_rfbi_hwmod_class, | 1563 | .class = &omap2_rfbi_hwmod_class, |
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { | |||
1551 | .module_offs = OMAP3430_DSS_MOD, | 1569 | .module_offs = OMAP3430_DSS_MOD, |
1552 | }, | 1570 | }, |
1553 | }, | 1571 | }, |
1572 | .opt_clks = dss_rfbi_opt_clks, | ||
1573 | .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), | ||
1554 | .slaves = omap3xxx_dss_rfbi_slaves, | 1574 | .slaves = omap3xxx_dss_rfbi_slaves, |
1555 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves), | 1575 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves), |
1556 | .flags = HWMOD_NO_IDLEST, | 1576 | .flags = HWMOD_NO_IDLEST, |
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { | |||
1560 | static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { | 1580 | static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { |
1561 | .master = &omap3xxx_l4_core_hwmod, | 1581 | .master = &omap3xxx_l4_core_hwmod, |
1562 | .slave = &omap3xxx_dss_venc_hwmod, | 1582 | .slave = &omap3xxx_dss_venc_hwmod, |
1563 | .clk = "dss_tv_fck", | 1583 | .clk = "dss_ick", |
1564 | .addr = omap2_dss_venc_addrs, | 1584 | .addr = omap2_dss_venc_addrs, |
1565 | .fw = { | 1585 | .fw = { |
1566 | .omap2 = { | 1586 | .omap2 = { |
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = { | |||
1578 | &omap3xxx_l4_core__dss_venc, | 1598 | &omap3xxx_l4_core__dss_venc, |
1579 | }; | 1599 | }; |
1580 | 1600 | ||
1601 | static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = { | ||
1602 | /* required only on OMAP3430 */ | ||
1603 | { .role = "tv_dac_clk", .clk = "dss_96m_fck" }, | ||
1604 | }; | ||
1605 | |||
1581 | static struct omap_hwmod omap3xxx_dss_venc_hwmod = { | 1606 | static struct omap_hwmod omap3xxx_dss_venc_hwmod = { |
1582 | .name = "dss_venc", | 1607 | .name = "dss_venc", |
1583 | .class = &omap2_venc_hwmod_class, | 1608 | .class = &omap2_venc_hwmod_class, |
1584 | .main_clk = "dss1_alwon_fck", | 1609 | .main_clk = "dss_tv_fck", |
1585 | .prcm = { | 1610 | .prcm = { |
1586 | .omap2 = { | 1611 | .omap2 = { |
1587 | .prcm_reg_id = 1, | 1612 | .prcm_reg_id = 1, |
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = { | |||
1589 | .module_offs = OMAP3430_DSS_MOD, | 1614 | .module_offs = OMAP3430_DSS_MOD, |
1590 | }, | 1615 | }, |
1591 | }, | 1616 | }, |
1617 | .opt_clks = dss_venc_opt_clks, | ||
1618 | .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks), | ||
1592 | .slaves = omap3xxx_dss_venc_slaves, | 1619 | .slaves = omap3xxx_dss_venc_slaves, |
1593 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves), | 1620 | .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves), |
1594 | .flags = HWMOD_NO_IDLEST, | 1621 | .flags = HWMOD_NO_IDLEST, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 7695e5d43316..daaf165af696 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <plat/mmc.h> | 30 | #include <plat/mmc.h> |
31 | #include <plat/i2c.h> | 31 | #include <plat/i2c.h> |
32 | #include <plat/dmtimer.h> | 32 | #include <plat/dmtimer.h> |
33 | #include <plat/common.h> | ||
33 | 34 | ||
34 | #include "omap_hwmod_common_data.h" | 35 | #include "omap_hwmod_common_data.h" |
35 | 36 | ||
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = { | |||
1187 | static struct omap_hwmod_class omap44xx_dss_hwmod_class = { | 1188 | static struct omap_hwmod_class omap44xx_dss_hwmod_class = { |
1188 | .name = "dss", | 1189 | .name = "dss", |
1189 | .sysc = &omap44xx_dss_sysc, | 1190 | .sysc = &omap44xx_dss_sysc, |
1191 | .reset = omap_dss_reset, | ||
1190 | }; | 1192 | }; |
1191 | 1193 | ||
1192 | /* dss */ | 1194 | /* dss */ |
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = { | |||
1240 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { | 1242 | static struct omap_hwmod_opt_clk dss_opt_clks[] = { |
1241 | { .role = "sys_clk", .clk = "dss_sys_clk" }, | 1243 | { .role = "sys_clk", .clk = "dss_sys_clk" }, |
1242 | { .role = "tv_clk", .clk = "dss_tv_clk" }, | 1244 | { .role = "tv_clk", .clk = "dss_tv_clk" }, |
1243 | { .role = "dss_clk", .clk = "dss_dss_clk" }, | 1245 | { .role = "hdmi_clk", .clk = "dss_48mhz_clk" }, |
1244 | { .role = "video_clk", .clk = "dss_48mhz_clk" }, | ||
1245 | }; | 1246 | }; |
1246 | 1247 | ||
1247 | static struct omap_hwmod omap44xx_dss_hwmod = { | 1248 | static struct omap_hwmod omap44xx_dss_hwmod = { |
1248 | .name = "dss_core", | 1249 | .name = "dss_core", |
1250 | .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, | ||
1249 | .class = &omap44xx_dss_hwmod_class, | 1251 | .class = &omap44xx_dss_hwmod_class, |
1250 | .clkdm_name = "l3_dss_clkdm", | 1252 | .clkdm_name = "l3_dss_clkdm", |
1251 | .main_clk = "dss_dss_clk", | 1253 | .main_clk = "dss_dss_clk", |
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = { | |||
1325 | { } | 1327 | { } |
1326 | }; | 1328 | }; |
1327 | 1329 | ||
1330 | static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = { | ||
1331 | .manager_count = 3, | ||
1332 | .has_framedonetv_irq = 1 | ||
1333 | }; | ||
1334 | |||
1328 | /* l4_per -> dss_dispc */ | 1335 | /* l4_per -> dss_dispc */ |
1329 | static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = { | 1336 | static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = { |
1330 | .master = &omap44xx_l4_per_hwmod, | 1337 | .master = &omap44xx_l4_per_hwmod, |
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = { | |||
1340 | &omap44xx_l4_per__dss_dispc, | 1347 | &omap44xx_l4_per__dss_dispc, |
1341 | }; | 1348 | }; |
1342 | 1349 | ||
1343 | static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = { | ||
1344 | { .role = "sys_clk", .clk = "dss_sys_clk" }, | ||
1345 | { .role = "tv_clk", .clk = "dss_tv_clk" }, | ||
1346 | { .role = "hdmi_clk", .clk = "dss_48mhz_clk" }, | ||
1347 | }; | ||
1348 | |||
1349 | static struct omap_hwmod omap44xx_dss_dispc_hwmod = { | 1350 | static struct omap_hwmod omap44xx_dss_dispc_hwmod = { |
1350 | .name = "dss_dispc", | 1351 | .name = "dss_dispc", |
1351 | .class = &omap44xx_dispc_hwmod_class, | 1352 | .class = &omap44xx_dispc_hwmod_class, |
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = { | |||
1359 | .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, | 1360 | .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, |
1360 | }, | 1361 | }, |
1361 | }, | 1362 | }, |
1362 | .opt_clks = dss_dispc_opt_clks, | ||
1363 | .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks), | ||
1364 | .slaves = omap44xx_dss_dispc_slaves, | 1363 | .slaves = omap44xx_dss_dispc_slaves, |
1365 | .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves), | 1364 | .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves), |
1365 | .dev_attr = &omap44xx_dss_dispc_dev_attr | ||
1366 | }; | 1366 | }; |
1367 | 1367 | ||
1368 | /* | 1368 | /* |
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = { | |||
1624 | .clkdm_name = "l3_dss_clkdm", | 1624 | .clkdm_name = "l3_dss_clkdm", |
1625 | .mpu_irqs = omap44xx_dss_hdmi_irqs, | 1625 | .mpu_irqs = omap44xx_dss_hdmi_irqs, |
1626 | .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, | 1626 | .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, |
1627 | .main_clk = "dss_dss_clk", | 1627 | .main_clk = "dss_48mhz_clk", |
1628 | .prcm = { | 1628 | .prcm = { |
1629 | .omap4 = { | 1629 | .omap4 = { |
1630 | .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, | 1630 | .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, |
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = { | |||
1785 | .name = "dss_venc", | 1785 | .name = "dss_venc", |
1786 | .class = &omap44xx_venc_hwmod_class, | 1786 | .class = &omap44xx_venc_hwmod_class, |
1787 | .clkdm_name = "l3_dss_clkdm", | 1787 | .clkdm_name = "l3_dss_clkdm", |
1788 | .main_clk = "dss_dss_clk", | 1788 | .main_clk = "dss_tv_clk", |
1789 | .prcm = { | 1789 | .prcm = { |
1790 | .omap4 = { | 1790 | .omap4 = { |
1791 | .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, | 1791 | .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c index de832ebc93a9..51e5418899fb 100644 --- a/arch/arm/mach-omap2/omap_hwmod_common_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c | |||
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = { | |||
49 | .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT, | 49 | .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = { | ||
53 | .manager_count = 2, | ||
54 | .has_framedonetv_irq = 0 | ||
55 | }; | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h index 39a7c37f4587..ad5d8f04c0b8 100644 --- a/arch/arm/mach-omap2/omap_hwmod_common_data.h +++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #include <plat/omap_hwmod.h> | 17 | #include <plat/omap_hwmod.h> |
18 | 18 | ||
19 | #include "display.h" | ||
20 | |||
19 | /* Common address space across OMAP2xxx */ | 21 | /* Common address space across OMAP2xxx */ |
20 | extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[]; | 22 | extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[]; |
21 | extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[]; | 23 | extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[]; |
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class; | |||
111 | extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class; | 113 | extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class; |
112 | extern struct omap_hwmod_class omap2xxx_mcspi_class; | 114 | extern struct omap_hwmod_class omap2xxx_mcspi_class; |
113 | 115 | ||
116 | extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr; | ||
117 | |||
114 | #endif | 118 | #endif |
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c index 6a66aa5e2a5b..d15225ff5c49 100644 --- a/arch/arm/mach-omap2/omap_l3_noc.c +++ b/arch/arm/mach-omap2/omap_l3_noc.c | |||
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev) | |||
237 | static const struct of_device_id l3_noc_match[] = { | 237 | static const struct of_device_id l3_noc_match[] = { |
238 | {.compatible = "ti,omap4-l3-noc", }, | 238 | {.compatible = "ti,omap4-l3-noc", }, |
239 | {}, | 239 | {}, |
240 | } | 240 | }; |
241 | MODULE_DEVICE_TABLE(of, l3_noc_match); | 241 | MODULE_DEVICE_TABLE(of, l3_noc_match); |
242 | #else | 242 | #else |
243 | #define l3_noc_match NULL | 243 | #define l3_noc_match NULL |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 1e79bdf313e3..00bff46ca48b 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include "powerdomain.h" | 24 | #include "powerdomain.h" |
25 | #include "clockdomain.h" | 25 | #include "clockdomain.h" |
26 | #include "pm.h" | 26 | #include "pm.h" |
27 | #include "twl-common.h" | ||
27 | 28 | ||
28 | static struct omap_device_pm_latency *pm_lats; | 29 | static struct omap_device_pm_latency *pm_lats; |
29 | 30 | ||
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init); | |||
226 | 227 | ||
227 | static int __init omap2_common_pm_late_init(void) | 228 | static int __init omap2_common_pm_late_init(void) |
228 | { | 229 | { |
229 | /* Init the OMAP TWL parameters */ | ||
230 | omap3_twl_init(); | ||
231 | omap4_twl_init(); | ||
232 | |||
233 | /* Init the voltage layer */ | 230 | /* Init the voltage layer */ |
231 | omap_pmic_late_init(); | ||
234 | omap_voltage_late_init(); | 232 | omap_voltage_late_init(); |
235 | 233 | ||
236 | /* Initialize the voltages */ | 234 | /* Initialize the voltages */ |
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 6a4f6839a7d9..cf246b39bac7 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c | |||
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data) | |||
139 | sr_write_reg(sr_info, ERRCONFIG_V1, status); | 139 | sr_write_reg(sr_info, ERRCONFIG_V1, status); |
140 | } else if (sr_info->ip_type == SR_TYPE_V2) { | 140 | } else if (sr_info->ip_type == SR_TYPE_V2) { |
141 | /* Read the status bits */ | 141 | /* Read the status bits */ |
142 | sr_read_reg(sr_info, IRQSTATUS); | 142 | status = sr_read_reg(sr_info, IRQSTATUS); |
143 | 143 | ||
144 | /* Clear them by writing back */ | 144 | /* Clear them by writing back */ |
145 | sr_write_reg(sr_info, IRQSTATUS, status); | 145 | sr_write_reg(sr_info, IRQSTATUS, status); |
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 522435772168..10b20c652e5d 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <plat/usb.h> | 30 | #include <plat/usb.h> |
31 | 31 | ||
32 | #include "twl-common.h" | 32 | #include "twl-common.h" |
33 | #include "pm.h" | ||
33 | 34 | ||
34 | static struct i2c_board_info __initdata pmic_i2c_board_info = { | 35 | static struct i2c_board_info __initdata pmic_i2c_board_info = { |
35 | .addr = 0x48, | 36 | .addr = 0x48, |
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate, | |||
48 | omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); | 49 | omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); |
49 | } | 50 | } |
50 | 51 | ||
52 | void __init omap_pmic_late_init(void) | ||
53 | { | ||
54 | /* Init the OMAP TWL parameters (if PMIC has been registerd) */ | ||
55 | if (!pmic_i2c_board_info.irq) | ||
56 | return; | ||
57 | |||
58 | omap3_twl_init(); | ||
59 | omap4_twl_init(); | ||
60 | } | ||
61 | |||
51 | #if defined(CONFIG_ARCH_OMAP3) | 62 | #if defined(CONFIG_ARCH_OMAP3) |
52 | static struct twl4030_usb_data omap3_usb_pdata = { | 63 | static struct twl4030_usb_data omap3_usb_pdata = { |
53 | .usb_mode = T2_USB_MODE_ULPI, | 64 | .usb_mode = T2_USB_MODE_ULPI, |
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h index 5e83a5bd37fb..275dde8cb27a 100644 --- a/arch/arm/mach-omap2/twl-common.h +++ b/arch/arm/mach-omap2/twl-common.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __OMAP_PMIC_COMMON__ | 1 | #ifndef __OMAP_PMIC_COMMON__ |
2 | #define __OMAP_PMIC_COMMON__ | 2 | #define __OMAP_PMIC_COMMON__ |
3 | 3 | ||
4 | #include <plat/irqs.h> | ||
5 | |||
4 | #define TWL_COMMON_PDATA_USB (1 << 0) | 6 | #define TWL_COMMON_PDATA_USB (1 << 0) |
5 | #define TWL_COMMON_PDATA_BCI (1 << 1) | 7 | #define TWL_COMMON_PDATA_BCI (1 << 1) |
6 | #define TWL_COMMON_PDATA_MADC (1 << 2) | 8 | #define TWL_COMMON_PDATA_MADC (1 << 2) |
@@ -30,6 +32,7 @@ struct twl4030_platform_data; | |||
30 | 32 | ||
31 | void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, | 33 | void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, |
32 | struct twl4030_platform_data *pmic_data); | 34 | struct twl4030_platform_data *pmic_data); |
35 | void omap_pmic_late_init(void); | ||
33 | 36 | ||
34 | static inline void omap2_pmic_init(const char *pmic_type, | 37 | static inline void omap2_pmic_init(const char *pmic_type, |
35 | struct twl4030_platform_data *pmic_data) | 38 | struct twl4030_platform_data *pmic_data) |
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index fc0b8544e174..4b81f59a4cba 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c | |||
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {} | |||
307 | /****************************************************************************** | 307 | /****************************************************************************** |
308 | * USB Gadget | 308 | * USB Gadget |
309 | ******************************************************************************/ | 309 | ******************************************************************************/ |
310 | #if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) | 310 | #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) |
311 | static void balloon3_udc_command(int cmd) | 311 | static void balloon3_udc_command(int cmd) |
312 | { | 312 | { |
313 | if (cmd == PXA2XX_UDC_CMD_CONNECT) | 313 | if (cmd == PXA2XX_UDC_CMD_CONNECT) |
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c index 692e1ffc5586..d23b92b80488 100644 --- a/arch/arm/mach-pxa/colibri-pxa320.c +++ b/arch/arm/mach-pxa/colibri-pxa320.c | |||
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void) | |||
146 | static inline void __init colibri_pxa320_init_eth(void) {} | 146 | static inline void __init colibri_pxa320_init_eth(void) {} |
147 | #endif /* CONFIG_AX88796 */ | 147 | #endif /* CONFIG_AX88796 */ |
148 | 148 | ||
149 | #if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) | 149 | #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) |
150 | static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { | 150 | static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { |
151 | .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), | 151 | .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), |
152 | .gpio_pullup = -1, | 152 | .gpio_pullup = -1, |
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c index 9c8208ca0415..ffdd70dad327 100644 --- a/arch/arm/mach-pxa/gumstix.c +++ b/arch/arm/mach-pxa/gumstix.c | |||
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void) | |||
106 | } | 106 | } |
107 | #endif | 107 | #endif |
108 | 108 | ||
109 | #ifdef CONFIG_USB_GADGET_PXA25X | 109 | #ifdef CONFIG_USB_PXA25X |
110 | static struct gpio_vbus_mach_info gumstix_udc_info = { | 110 | static struct gpio_vbus_mach_info gumstix_udc_info = { |
111 | .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn, | 111 | .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn, |
112 | .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx, | 112 | .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx, |
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h index f80bbe246afe..d4eac3d6ffb5 100644 --- a/arch/arm/mach-pxa/include/mach/palm27x.h +++ b/arch/arm/mach-pxa/include/mach/palm27x.h | |||
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power, | |||
37 | #define palm27x_lcd_init(power, mode) do {} while (0) | 37 | #define palm27x_lcd_init(power, mode) do {} while (0) |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #if defined(CONFIG_USB_GADGET_PXA27X) || \ | 40 | #if defined(CONFIG_USB_PXA27X) || \ |
41 | defined(CONFIG_USB_GADGET_PXA27X_MODULE) | 41 | defined(CONFIG_USB_PXA27X_MODULE) |
42 | extern void __init palm27x_udc_init(int vbus, int pullup, | 42 | extern void __init palm27x_udc_init(int vbus, int pullup, |
43 | int vbus_inverted); | 43 | int vbus_inverted); |
44 | #else | 44 | #else |
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 325c245c0a0d..fbc10d7b95d1 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c | |||
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode) | |||
164 | /****************************************************************************** | 164 | /****************************************************************************** |
165 | * USB Gadget | 165 | * USB Gadget |
166 | ******************************************************************************/ | 166 | ******************************************************************************/ |
167 | #if defined(CONFIG_USB_GADGET_PXA27X) || \ | 167 | #if defined(CONFIG_USB_PXA27X) || \ |
168 | defined(CONFIG_USB_GADGET_PXA27X_MODULE) | 168 | defined(CONFIG_USB_PXA27X_MODULE) |
169 | static struct gpio_vbus_mach_info palm27x_udc_info = { | 169 | static struct gpio_vbus_mach_info palm27x_udc_info = { |
170 | .gpio_vbus_inverted = 1, | 170 | .gpio_vbus_inverted = 1, |
171 | }; | 171 | }; |
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c index 6ec7caefb37c..2c24c67fd92b 100644 --- a/arch/arm/mach-pxa/palmtc.c +++ b/arch/arm/mach-pxa/palmtc.c | |||
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {} | |||
338 | /****************************************************************************** | 338 | /****************************************************************************** |
339 | * UDC | 339 | * UDC |
340 | ******************************************************************************/ | 340 | ******************************************************************************/ |
341 | #if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE) | 341 | #if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE) |
342 | static struct gpio_vbus_mach_info palmtc_udc_info = { | 342 | static struct gpio_vbus_mach_info palmtc_udc_info = { |
343 | .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N, | 343 | .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N, |
344 | .gpio_vbus_inverted = 1, | 344 | .gpio_vbus_inverted = 1, |
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index a7539a6ed1ff..ca0c6615028c 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c | |||
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {} | |||
343 | /****************************************************************************** | 343 | /****************************************************************************** |
344 | * USB Gadget | 344 | * USB Gadget |
345 | ******************************************************************************/ | 345 | ******************************************************************************/ |
346 | #if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) | 346 | #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) |
347 | static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = { | 347 | static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = { |
348 | .gpio_vbus = GPIO41_VPAC270_UDC_DETECT, | 348 | .gpio_vbus = GPIO41_VPAC270_UDC_DETECT, |
349 | .gpio_pullup = -1, | 349 | .gpio_pullup = -1, |
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c index 66668565ee75..f208154b1382 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/export.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/i2c.h> | 13 | #include <linux/i2c.h> |
14 | 14 | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 8ac9e9f84790..b1e192ba8c24 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -61,7 +61,7 @@ static inline void cache_sync(void) | |||
61 | { | 61 | { |
62 | void __iomem *base = l2x0_base; | 62 | void __iomem *base = l2x0_base; |
63 | 63 | ||
64 | #ifdef CONFIG_ARM_ERRATA_753970 | 64 | #ifdef CONFIG_PL310_ERRATA_753970 |
65 | /* write to an unmmapped register */ | 65 | /* write to an unmmapped register */ |
66 | writel_relaxed(0, base + L2X0_DUMMY_REG); | 66 | writel_relaxed(0, base + L2X0_DUMMY_REG); |
67 | #else | 67 | #else |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e4e7f6cba1ab..1aa664a1999f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -168,7 +168,7 @@ static int __init consistent_init(void) | |||
168 | pte_t *pte; | 168 | pte_t *pte; |
169 | int i = 0; | 169 | int i = 0; |
170 | unsigned long base = consistent_base; | 170 | unsigned long base = consistent_base; |
171 | unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT; | 171 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; |
172 | 172 | ||
173 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); | 173 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); |
174 | if (!consistent_pte) { | 174 | if (!consistent_pte) { |
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
332 | struct page *page; | 332 | struct page *page; |
333 | void *addr; | 333 | void *addr; |
334 | 334 | ||
335 | /* | ||
336 | * Following is a work-around (a.k.a. hack) to prevent pages | ||
337 | * with __GFP_COMP being passed to split_page() which cannot | ||
338 | * handle them. The real problem is that this flag probably | ||
339 | * should be 0 on ARM as it is not supported on this | ||
340 | * platform; see CONFIG_HUGETLBFS. | ||
341 | */ | ||
342 | gfp &= ~(__GFP_COMP); | ||
343 | |||
335 | *handle = ~0; | 344 | *handle = ~0; |
336 | size = PAGE_ALIGN(size); | 345 | size = PAGE_ALIGN(size); |
337 | 346 | ||
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 74be05f3e03a..44b628e4d6ea 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -9,8 +9,7 @@ | |||
9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
10 | #include <linux/personality.h> | 10 | #include <linux/personality.h> |
11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
12 | #include <asm/cputype.h> | 12 | #include <asm/cachetype.h> |
13 | #include <asm/system.h> | ||
14 | 13 | ||
15 | #define COLOUR_ALIGN(addr,pgoff) \ | 14 | #define COLOUR_ALIGN(addr,pgoff) \ |
16 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | 15 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ |
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
32 | struct mm_struct *mm = current->mm; | 31 | struct mm_struct *mm = current->mm; |
33 | struct vm_area_struct *vma; | 32 | struct vm_area_struct *vma; |
34 | unsigned long start_addr; | 33 | unsigned long start_addr; |
35 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) | 34 | int do_align = 0; |
36 | unsigned int cache_type; | 35 | int aliasing = cache_is_vipt_aliasing(); |
37 | int do_align = 0, aliasing = 0; | ||
38 | 36 | ||
39 | /* | 37 | /* |
40 | * We only need to do colour alignment if either the I or D | 38 | * We only need to do colour alignment if either the I or D |
41 | * caches alias. This is indicated by bits 9 and 21 of the | 39 | * caches alias. |
42 | * cache type register. | ||
43 | */ | 40 | */ |
44 | cache_type = read_cpuid_cachetype(); | 41 | if (aliasing) |
45 | if (cache_type != read_cpuid_id()) { | 42 | do_align = filp || (flags & MAP_SHARED); |
46 | aliasing = (cache_type | cache_type >> 12) & (1 << 11); | ||
47 | if (aliasing) | ||
48 | do_align = filp || flags & MAP_SHARED; | ||
49 | } | ||
50 | #else | ||
51 | #define do_align 0 | ||
52 | #define aliasing 0 | ||
53 | #endif | ||
54 | 43 | ||
55 | /* | 44 | /* |
56 | * We enforce the MAP_FIXED case. | 45 | * We enforce the MAP_FIXED case. |
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h index 83b745a5e1b7..c75f254abd85 100644 --- a/arch/arm/plat-mxc/include/mach/common.h +++ b/arch/arm/plat-mxc/include/mach/common.h | |||
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode { | |||
85 | }; | 85 | }; |
86 | 86 | ||
87 | extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode); | 87 | extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode); |
88 | extern void (*imx_idle)(void); | ||
89 | extern void imx_print_silicon_rev(const char *cpu, int srev); | 88 | extern void imx_print_silicon_rev(const char *cpu, int srev); |
90 | 89 | ||
91 | void avic_handle_irq(struct pt_regs *); | 90 | void avic_handle_irq(struct pt_regs *); |
@@ -133,4 +132,5 @@ extern void imx53_qsb_common_init(void); | |||
133 | extern void imx53_smd_common_init(void); | 132 | extern void imx53_smd_common_init(void); |
134 | extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode); | 133 | extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode); |
135 | extern void imx6q_pm_init(void); | 134 | extern void imx6q_pm_init(void); |
135 | extern void imx6q_clock_map_io(void); | ||
136 | #endif | 136 | #endif |
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h index 00a78193c681..a4d36d601d55 100644 --- a/arch/arm/plat-mxc/include/mach/mxc.h +++ b/arch/arm/plat-mxc/include/mach/mxc.h | |||
@@ -50,20 +50,6 @@ | |||
50 | #define IMX_CHIP_REVISION_3_3 0x33 | 50 | #define IMX_CHIP_REVISION_3_3 0x33 |
51 | #define IMX_CHIP_REVISION_UNKNOWN 0xff | 51 | #define IMX_CHIP_REVISION_UNKNOWN 0xff |
52 | 52 | ||
53 | #define IMX_CHIP_REVISION_1_0_STRING "1.0" | ||
54 | #define IMX_CHIP_REVISION_1_1_STRING "1.1" | ||
55 | #define IMX_CHIP_REVISION_1_2_STRING "1.2" | ||
56 | #define IMX_CHIP_REVISION_1_3_STRING "1.3" | ||
57 | #define IMX_CHIP_REVISION_2_0_STRING "2.0" | ||
58 | #define IMX_CHIP_REVISION_2_1_STRING "2.1" | ||
59 | #define IMX_CHIP_REVISION_2_2_STRING "2.2" | ||
60 | #define IMX_CHIP_REVISION_2_3_STRING "2.3" | ||
61 | #define IMX_CHIP_REVISION_3_0_STRING "3.0" | ||
62 | #define IMX_CHIP_REVISION_3_1_STRING "3.1" | ||
63 | #define IMX_CHIP_REVISION_3_2_STRING "3.2" | ||
64 | #define IMX_CHIP_REVISION_3_3_STRING "3.3" | ||
65 | #define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown" | ||
66 | |||
67 | #ifndef __ASSEMBLY__ | 53 | #ifndef __ASSEMBLY__ |
68 | extern unsigned int __mxc_cpu_type; | 54 | extern unsigned int __mxc_cpu_type; |
69 | #endif | 55 | #endif |
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h index cf88b3593fba..b9895d250167 100644 --- a/arch/arm/plat-mxc/include/mach/system.h +++ b/arch/arm/plat-mxc/include/mach/system.h | |||
@@ -17,14 +17,9 @@ | |||
17 | #ifndef __ASM_ARCH_MXC_SYSTEM_H__ | 17 | #ifndef __ASM_ARCH_MXC_SYSTEM_H__ |
18 | #define __ASM_ARCH_MXC_SYSTEM_H__ | 18 | #define __ASM_ARCH_MXC_SYSTEM_H__ |
19 | 19 | ||
20 | extern void (*imx_idle)(void); | ||
21 | |||
22 | static inline void arch_idle(void) | 20 | static inline void arch_idle(void) |
23 | { | 21 | { |
24 | if (imx_idle != NULL) | 22 | cpu_do_idle(); |
25 | (imx_idle)(); | ||
26 | else | ||
27 | cpu_do_idle(); | ||
28 | } | 23 | } |
29 | 24 | ||
30 | void arch_reset(char mode, const char *cmd); | 25 | void arch_reset(char mode, const char *cmd); |
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c index 9dad8dcc2ea9..d65fb31a55ca 100644 --- a/arch/arm/plat-mxc/system.c +++ b/arch/arm/plat-mxc/system.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/module.h> | ||
24 | 25 | ||
25 | #include <mach/hardware.h> | 26 | #include <mach/hardware.h> |
26 | #include <mach/common.h> | 27 | #include <mach/common.h> |
@@ -28,8 +29,8 @@ | |||
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
30 | 31 | ||
31 | void (*imx_idle)(void) = NULL; | ||
32 | void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL; | 32 | void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL; |
33 | EXPORT_SYMBOL_GPL(imx_ioremap); | ||
33 | 34 | ||
34 | static void __iomem *wdog_base; | 35 | static void __iomem *wdog_base; |
35 | 36 | ||
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h index 197ca03c3f7d..eb73ab40e955 100644 --- a/arch/arm/plat-omap/include/plat/clock.h +++ b/arch/arm/plat-omap/include/plat/clock.h | |||
@@ -165,8 +165,8 @@ struct dpll_data { | |||
165 | u8 auto_recal_bit; | 165 | u8 auto_recal_bit; |
166 | u8 recal_en_bit; | 166 | u8 recal_en_bit; |
167 | u8 recal_st_bit; | 167 | u8 recal_st_bit; |
168 | u8 flags; | ||
169 | # endif | 168 | # endif |
169 | u8 flags; | ||
170 | }; | 170 | }; |
171 | 171 | ||
172 | #endif | 172 | #endif |
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h index c50df4814f6f..3ff3e36580f2 100644 --- a/arch/arm/plat-omap/include/plat/common.h +++ b/arch/arm/plat-omap/include/plat/common.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | 31 | ||
32 | #include <plat/i2c.h> | 32 | #include <plat/i2c.h> |
33 | #include <plat/omap_hwmod.h> | ||
33 | 34 | ||
34 | struct sys_timer; | 35 | struct sys_timer; |
35 | 36 | ||
@@ -55,6 +56,8 @@ void am35xx_init_early(void); | |||
55 | void ti816x_init_early(void); | 56 | void ti816x_init_early(void); |
56 | void omap4430_init_early(void); | 57 | void omap4430_init_early(void); |
57 | 58 | ||
59 | extern int omap_dss_reset(struct omap_hwmod *); | ||
60 | |||
58 | void omap_sram_init(void); | 61 | void omap_sram_init(void); |
59 | 62 | ||
60 | /* | 63 | /* |
diff --git a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c index a9276667c2fb..c7adad0e8de0 100644 --- a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c +++ b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c | |||
@@ -12,7 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/module.h> | 15 | #include <linux/export.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/ioport.h> | 17 | #include <linux/ioport.h> |
18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c index e1cbc728c775..c8bec9c7655d 100644 --- a/arch/arm/plat-s5p/sysmmu.c +++ b/arch/arm/plat-s5p/sysmmu.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/export.h> | ||
14 | 15 | ||
15 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
16 | 17 | ||
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h index d48245bb02b3..df8155b9d4d1 100644 --- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h +++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __PLAT_GPIO_CFG_H | 24 | #ifndef __PLAT_GPIO_CFG_H |
25 | #define __PLAT_GPIO_CFG_H __FILE__ | 25 | #define __PLAT_GPIO_CFG_H __FILE__ |
26 | 26 | ||
27 | #include<linux/types.h> | ||
28 | |||
27 | typedef unsigned int __bitwise__ samsung_gpio_pull_t; | 29 | typedef unsigned int __bitwise__ samsung_gpio_pull_t; |
28 | typedef unsigned int __bitwise__ s5p_gpio_drvstr_t; | 30 | typedef unsigned int __bitwise__ s5p_gpio_drvstr_t; |
29 | 31 | ||
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c index efe1d564473e..312b510d86b7 100644 --- a/arch/arm/plat-samsung/pd.c +++ b/arch/arm/plat-samsung/pd.c | |||
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/module.h> | 14 | #include <linux/export.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c index dc1185dcf80d..c559d8438c70 100644 --- a/arch/arm/plat-samsung/pwm.c +++ b/arch/arm/plat-samsung/pwm.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * the Free Software Foundation; either version 2 of the License. | 11 | * the Free Software Foundation; either version 2 of the License. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/export.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 5bdeef969847..ccbe16f47227 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -1123,5 +1123,6 @@ blissc MACH_BLISSC BLISSC 3491 | |||
1123 | thales_adc MACH_THALES_ADC THALES_ADC 3492 | 1123 | thales_adc MACH_THALES_ADC THALES_ADC 3492 |
1124 | ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493 | 1124 | ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493 |
1125 | atdgp318 MACH_ATDGP318 ATDGP318 3494 | 1125 | atdgp318 MACH_ATDGP318 ATDGP318 3494 |
1126 | m28evk MACH_M28EVK M28EVK 3613 | ||
1126 | smdk4212 MACH_SMDK4212 SMDK4212 3638 | 1127 | smdk4212 MACH_SMDK4212 SMDK4212 3638 |
1127 | smdk4412 MACH_SMDK4412 SMDK4412 3765 | 1128 | smdk4412 MACH_SMDK4412 SMDK4412 3765 |
diff --git a/arch/microblaze/include/asm/namei.h b/arch/microblaze/include/asm/namei.h deleted file mode 100644 index 61d60b8a07d5..000000000000 --- a/arch/microblaze/include/asm/namei.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_MICROBLAZE_NAMEI_H | ||
10 | #define _ASM_MICROBLAZE_NAMEI_H | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | |||
14 | /* This dummy routine maybe changed to something useful | ||
15 | * for /usr/gnemul/ emulation stuff. | ||
16 | * Look at asm-sparc/namei.h for details. | ||
17 | */ | ||
18 | #define __emul_prefix() NULL | ||
19 | |||
20 | #endif /* __KERNEL__ */ | ||
21 | |||
22 | #endif /* _ASM_MICROBLAZE_NAMEI_H */ | ||
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts index d9b776740a67..d3b478242ea9 100644 --- a/arch/powerpc/boot/dts/p1023rds.dts +++ b/arch/powerpc/boot/dts/p1023rds.dts | |||
@@ -449,6 +449,7 @@ | |||
449 | interrupt-parent = <&mpic>; | 449 | interrupt-parent = <&mpic>; |
450 | interrupts = <16 2>; | 450 | interrupts = <16 2>; |
451 | interrupt-map-mask = <0xf800 0 0 7>; | 451 | interrupt-map-mask = <0xf800 0 0 7>; |
452 | /* IRQ[0:3] are pulled up on board, set to active-low */ | ||
452 | interrupt-map = < | 453 | interrupt-map = < |
453 | /* IDSEL 0x0 */ | 454 | /* IDSEL 0x0 */ |
454 | 0000 0 0 1 &mpic 0 1 | 455 | 0000 0 0 1 &mpic 0 1 |
@@ -488,11 +489,15 @@ | |||
488 | interrupt-parent = <&mpic>; | 489 | interrupt-parent = <&mpic>; |
489 | interrupts = <16 2>; | 490 | interrupts = <16 2>; |
490 | interrupt-map-mask = <0xf800 0 0 7>; | 491 | interrupt-map-mask = <0xf800 0 0 7>; |
492 | /* | ||
493 | * IRQ[4:6] only for PCIe, set to active-high, | ||
494 | * IRQ[7] is pulled up on board, set to active-low | ||
495 | */ | ||
491 | interrupt-map = < | 496 | interrupt-map = < |
492 | /* IDSEL 0x0 */ | 497 | /* IDSEL 0x0 */ |
493 | 0000 0 0 1 &mpic 4 1 | 498 | 0000 0 0 1 &mpic 4 2 |
494 | 0000 0 0 2 &mpic 5 1 | 499 | 0000 0 0 2 &mpic 5 2 |
495 | 0000 0 0 3 &mpic 6 1 | 500 | 0000 0 0 3 &mpic 6 2 |
496 | 0000 0 0 4 &mpic 7 1 | 501 | 0000 0 0 4 &mpic 7 1 |
497 | >; | 502 | >; |
498 | ranges = <0x2000000 0x0 0xa0000000 | 503 | ranges = <0x2000000 0x0 0xa0000000 |
@@ -527,12 +532,16 @@ | |||
527 | interrupt-parent = <&mpic>; | 532 | interrupt-parent = <&mpic>; |
528 | interrupts = <16 2>; | 533 | interrupts = <16 2>; |
529 | interrupt-map-mask = <0xf800 0 0 7>; | 534 | interrupt-map-mask = <0xf800 0 0 7>; |
535 | /* | ||
536 | * IRQ[8:10] are pulled up on board, set to active-low | ||
537 | * IRQ[11] only for PCIe, set to active-high, | ||
538 | */ | ||
530 | interrupt-map = < | 539 | interrupt-map = < |
531 | /* IDSEL 0x0 */ | 540 | /* IDSEL 0x0 */ |
532 | 0000 0 0 1 &mpic 8 1 | 541 | 0000 0 0 1 &mpic 8 1 |
533 | 0000 0 0 2 &mpic 9 1 | 542 | 0000 0 0 2 &mpic 9 1 |
534 | 0000 0 0 3 &mpic 10 1 | 543 | 0000 0 0 3 &mpic 10 1 |
535 | 0000 0 0 4 &mpic 11 1 | 544 | 0000 0 0 4 &mpic 11 2 |
536 | >; | 545 | >; |
537 | ranges = <0x2000000 0x0 0x80000000 | 546 | ranges = <0x2000000 0x0 0x80000000 |
538 | 0x2000000 0x0 0x80000000 | 547 | 0x2000000 0x0 0x80000000 |
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index 6cdf1c0d2c8a..3b98d7354341 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig | |||
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y | |||
52 | CONFIG_MTD_JEDECPROBE=y | 52 | CONFIG_MTD_JEDECPROBE=y |
53 | CONFIG_MTD_CFI_AMDSTD=y | 53 | CONFIG_MTD_CFI_AMDSTD=y |
54 | CONFIG_MTD_PHYSMAP_OF=y | 54 | CONFIG_MTD_PHYSMAP_OF=y |
55 | CONFIG_MTD_NAND=m | ||
56 | CONFIG_MTD_NAND_NDFC=m | ||
55 | CONFIG_MTD_UBI=m | 57 | CONFIG_MTD_UBI=m |
56 | CONFIG_MTD_UBI_GLUEBI=m | 58 | CONFIG_MTD_UBI_GLUEBI=m |
57 | CONFIG_PROC_DEVICETREE=y | 59 | CONFIG_PROC_DEVICETREE=y |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5964371303ac..8558b572e55d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/of_fdt.h> | 15 | #include <linux/of_fdt.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/bootmem.h> | 17 | #include <linux/bootmem.h> |
18 | #include <linux/moduleparam.h> | ||
18 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
19 | #include <asm/pgalloc.h> | 20 | #include <asm/pgalloc.h> |
20 | #include <asm/tlb.h> | 21 | #include <asm/tlb.h> |
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 45023e26aea3..d7946be298b6 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig | |||
@@ -203,7 +203,7 @@ config P3060_QDS | |||
203 | select PPC_E500MC | 203 | select PPC_E500MC |
204 | select PHYS_64BIT | 204 | select PHYS_64BIT |
205 | select SWIOTLB | 205 | select SWIOTLB |
206 | select MPC8xxx_GPIO | 206 | select GPIO_MPC8XXX |
207 | select HAS_RAPIDIO | 207 | select HAS_RAPIDIO |
208 | select PPC_EPAPR_HV_PIC | 208 | select PPC_EPAPR_HV_PIC |
209 | help | 209 | help |
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c index 01dcf44871e9..081cf4ac1881 100644 --- a/arch/powerpc/platforms/85xx/p3060_qds.c +++ b/arch/powerpc/platforms/85xx/p3060_qds.c | |||
@@ -70,7 +70,7 @@ define_machine(p3060_qds) { | |||
70 | .power_save = e500_idle, | 70 | .power_save = e500_idle, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | machine_device_initcall(p3060_qds, declare_of_platform_devices); | 73 | machine_device_initcall(p3060_qds, corenet_ds_publish_devices); |
74 | 74 | ||
75 | #ifdef CONFIG_SWIOTLB | 75 | #ifdef CONFIG_SWIOTLB |
76 | machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier); | 76 | machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier); |
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index af1a5df46b3e..b6731e4a6646 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c | |||
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void) | |||
280 | 280 | ||
281 | if (!ehv_pic->irqhost) { | 281 | if (!ehv_pic->irqhost) { |
282 | of_node_put(np); | 282 | of_node_put(np); |
283 | kfree(ehv_pic); | ||
283 | return; | 284 | return; |
284 | } | 285 | } |
285 | 286 | ||
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index c4d96fa32ba5..d5c3c90ee698 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c | |||
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) | |||
328 | err: | 328 | err: |
329 | iounmap(fsl_lbc_ctrl_dev->regs); | 329 | iounmap(fsl_lbc_ctrl_dev->regs); |
330 | kfree(fsl_lbc_ctrl_dev); | 330 | kfree(fsl_lbc_ctrl_dev); |
331 | fsl_lbc_ctrl_dev = NULL; | ||
331 | return ret; | 332 | return ret; |
332 | } | 333 | } |
333 | 334 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 3363fbc964f8..ceb09cbd2329 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) | |||
216 | /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says | 216 | /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says |
217 | that the BRG divisor must be even if you're not using divide-by-16 | 217 | that the BRG divisor must be even if you're not using divide-by-16 |
218 | mode. */ | 218 | mode. */ |
219 | if (!div16 && (divisor & 1)) | 219 | if (!div16 && (divisor & 1) && (divisor > 3)) |
220 | divisor++; | 220 | divisor++; |
221 | 221 | ||
222 | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | | 222 | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | |
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h index 118c143a9cb4..2c32df6fe231 100644 --- a/arch/x86/um/asm/processor.h +++ b/arch/x86/um/asm/processor.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | #define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP) | 13 | #define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP) |
14 | #define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_IP) | 14 | #define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP) |
15 | #define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP) | 15 | #define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP) |
16 | 16 | ||
17 | #define ARCH_IS_STACKGROW(address) \ | 17 | #define ARCH_IS_STACKGROW(address) \ |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 127408069ca7..631b9477b99c 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab) | |||
932 | static int erst_open_pstore(struct pstore_info *psi); | 932 | static int erst_open_pstore(struct pstore_info *psi); |
933 | static int erst_close_pstore(struct pstore_info *psi); | 933 | static int erst_close_pstore(struct pstore_info *psi); |
934 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, | 934 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, |
935 | struct timespec *time, struct pstore_info *psi); | 935 | struct timespec *time, char **buf, |
936 | struct pstore_info *psi); | ||
936 | static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part, | 937 | static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part, |
937 | size_t size, struct pstore_info *psi); | 938 | size_t size, struct pstore_info *psi); |
938 | static int erst_clearer(enum pstore_type_id type, u64 id, | 939 | static int erst_clearer(enum pstore_type_id type, u64 id, |
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi) | |||
986 | } | 987 | } |
987 | 988 | ||
988 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, | 989 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, |
989 | struct timespec *time, struct pstore_info *psi) | 990 | struct timespec *time, char **buf, |
991 | struct pstore_info *psi) | ||
990 | { | 992 | { |
991 | int rc; | 993 | int rc; |
992 | ssize_t len = 0; | 994 | ssize_t len = 0; |
993 | u64 record_id; | 995 | u64 record_id; |
994 | struct cper_pstore_record *rcd = (struct cper_pstore_record *) | 996 | struct cper_pstore_record *rcd; |
995 | (erst_info.buf - sizeof(*rcd)); | 997 | size_t rcd_len = sizeof(*rcd) + erst_info.bufsize; |
996 | 998 | ||
997 | if (erst_disable) | 999 | if (erst_disable) |
998 | return -ENODEV; | 1000 | return -ENODEV; |
999 | 1001 | ||
1002 | rcd = kmalloc(rcd_len, GFP_KERNEL); | ||
1003 | if (!rcd) { | ||
1004 | rc = -ENOMEM; | ||
1005 | goto out; | ||
1006 | } | ||
1000 | skip: | 1007 | skip: |
1001 | rc = erst_get_record_id_next(&reader_pos, &record_id); | 1008 | rc = erst_get_record_id_next(&reader_pos, &record_id); |
1002 | if (rc) | 1009 | if (rc) |
@@ -1004,22 +1011,27 @@ skip: | |||
1004 | 1011 | ||
1005 | /* no more record */ | 1012 | /* no more record */ |
1006 | if (record_id == APEI_ERST_INVALID_RECORD_ID) { | 1013 | if (record_id == APEI_ERST_INVALID_RECORD_ID) { |
1007 | rc = -1; | 1014 | rc = -EINVAL; |
1008 | goto out; | 1015 | goto out; |
1009 | } | 1016 | } |
1010 | 1017 | ||
1011 | len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) + | 1018 | len = erst_read(record_id, &rcd->hdr, rcd_len); |
1012 | erst_info.bufsize); | ||
1013 | /* The record may be cleared by others, try read next record */ | 1019 | /* The record may be cleared by others, try read next record */ |
1014 | if (len == -ENOENT) | 1020 | if (len == -ENOENT) |
1015 | goto skip; | 1021 | goto skip; |
1016 | else if (len < 0) { | 1022 | else if (len < sizeof(*rcd)) { |
1017 | rc = -1; | 1023 | rc = -EIO; |
1018 | goto out; | 1024 | goto out; |
1019 | } | 1025 | } |
1020 | if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) | 1026 | if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) |
1021 | goto skip; | 1027 | goto skip; |
1022 | 1028 | ||
1029 | *buf = kmalloc(len, GFP_KERNEL); | ||
1030 | if (*buf == NULL) { | ||
1031 | rc = -ENOMEM; | ||
1032 | goto out; | ||
1033 | } | ||
1034 | memcpy(*buf, rcd->data, len - sizeof(*rcd)); | ||
1023 | *id = record_id; | 1035 | *id = record_id; |
1024 | if (uuid_le_cmp(rcd->sec_hdr.section_type, | 1036 | if (uuid_le_cmp(rcd->sec_hdr.section_type, |
1025 | CPER_SECTION_TYPE_DMESG) == 0) | 1037 | CPER_SECTION_TYPE_DMESG) == 0) |
@@ -1037,6 +1049,7 @@ skip: | |||
1037 | time->tv_nsec = 0; | 1049 | time->tv_nsec = 0; |
1038 | 1050 | ||
1039 | out: | 1051 | out: |
1052 | kfree(rcd); | ||
1040 | return (rc < 0) ? rc : (len - sizeof(*rcd)); | 1053 | return (rc < 0) ? rc : (len - sizeof(*rcd)); |
1041 | } | 1054 | } |
1042 | 1055 | ||
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index ec555951176e..43b875810d1b 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c | |||
@@ -67,7 +67,7 @@ static int __init ahci_probe(struct platform_device *pdev) | |||
67 | struct device *dev = &pdev->dev; | 67 | struct device *dev = &pdev->dev; |
68 | struct ahci_platform_data *pdata = dev_get_platdata(dev); | 68 | struct ahci_platform_data *pdata = dev_get_platdata(dev); |
69 | const struct platform_device_id *id = platform_get_device_id(pdev); | 69 | const struct platform_device_id *id = platform_get_device_id(pdev); |
70 | struct ata_port_info pi = ahci_port_info[id->driver_data]; | 70 | struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0]; |
71 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 71 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
72 | struct ahci_host_priv *hpriv; | 72 | struct ahci_host_priv *hpriv; |
73 | struct ata_host *host; | 73 | struct ata_host *host; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 63d53277d6a9..4cadfa28f940 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev, | |||
2533 | if (rc) | 2533 | if (rc) |
2534 | goto out; | 2534 | goto out; |
2535 | 2535 | ||
2536 | #ifdef CONFIG_ATA_BMDMA | ||
2536 | if (bmdma) | 2537 | if (bmdma) |
2537 | /* prepare and activate BMDMA host */ | 2538 | /* prepare and activate BMDMA host */ |
2538 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); | 2539 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); |
2539 | else | 2540 | else |
2541 | #endif | ||
2540 | /* prepare and activate SFF host */ | 2542 | /* prepare and activate SFF host */ |
2541 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | 2543 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
2542 | if (rc) | 2544 | if (rc) |
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev, | |||
2544 | host->private_data = host_priv; | 2546 | host->private_data = host_priv; |
2545 | host->flags |= hflags; | 2547 | host->flags |= hflags; |
2546 | 2548 | ||
2549 | #ifdef CONFIG_ATA_BMDMA | ||
2547 | if (bmdma) { | 2550 | if (bmdma) { |
2548 | pci_set_master(pdev); | 2551 | pci_set_master(pdev); |
2549 | rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); | 2552 | rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); |
2550 | } else | 2553 | } else |
2554 | #endif | ||
2551 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); | 2555 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
2552 | out: | 2556 | out: |
2553 | if (rc == 0) | 2557 | if (rc == 0) |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 793f796c4da3..5693ecee9a40 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
127 | nid, K(node_page_state(nid, NR_WRITEBACK)), | 127 | nid, K(node_page_state(nid, NR_WRITEBACK)), |
128 | nid, K(node_page_state(nid, NR_FILE_PAGES)), | 128 | nid, K(node_page_state(nid, NR_FILE_PAGES)), |
129 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), | 129 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), |
130 | nid, K(node_page_state(nid, NR_ANON_PAGES) | ||
131 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 130 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
131 | nid, K(node_page_state(nid, NR_ANON_PAGES) | ||
132 | + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * | 132 | + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * |
133 | HPAGE_PMD_NR | 133 | HPAGE_PMD_NR), |
134 | #else | ||
135 | nid, K(node_page_state(nid, NR_ANON_PAGES)), | ||
134 | #endif | 136 | #endif |
135 | ), | ||
136 | nid, K(node_page_state(nid, NR_SHMEM)), | 137 | nid, K(node_page_state(nid, NR_SHMEM)), |
137 | nid, node_page_state(nid, NR_KERNEL_STACK) * | 138 | nid, node_page_state(nid, NR_KERNEL_STACK) * |
138 | THREAD_SIZE / 1024, | 139 | THREAD_SIZE / 1024, |
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
143 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + | 144 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + |
144 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), | 145 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), |
145 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), | 146 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), |
146 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) | ||
147 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 147 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
148 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) | ||
148 | , nid, | 149 | , nid, |
149 | K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * | 150 | K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * |
150 | HPAGE_PMD_NR) | 151 | HPAGE_PMD_NR)); |
152 | #else | ||
153 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); | ||
151 | #endif | 154 | #endif |
152 | ); | ||
153 | n += hugetlb_report_node_meminfo(nid, buf + n); | 155 | n += hugetlb_report_node_meminfo(nid, buf + n); |
154 | return n; | 156 | return n; |
155 | } | 157 | } |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 5c6f56f21443..dcd8babae9eb 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block) | |||
343 | else | 343 | else |
344 | op.config |= CFG_MID_FRAG; | 344 | op.config |= CFG_MID_FRAG; |
345 | 345 | ||
346 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | 346 | if (first_block) { |
347 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | 347 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); |
348 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | 348 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); |
349 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | 349 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); |
350 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | 350 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); |
351 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
352 | } | ||
351 | } | 353 | } |
352 | 354 | ||
353 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 355 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 8af8e864a9cf..73464a62adf7 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { | |||
1128 | { .compatible = "fsl,p1020-memory-controller", }, | 1128 | { .compatible = "fsl,p1020-memory-controller", }, |
1129 | { .compatible = "fsl,p1021-memory-controller", }, | 1129 | { .compatible = "fsl,p1021-memory-controller", }, |
1130 | { .compatible = "fsl,p2020-memory-controller", }, | 1130 | { .compatible = "fsl,p2020-memory-controller", }, |
1131 | { .compatible = "fsl,p4080-memory-controller", }, | 1131 | { .compatible = "fsl,qoriq-memory-controller", }, |
1132 | {}, | 1132 | {}, |
1133 | }; | 1133 | }; |
1134 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); | 1134 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); |
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 8370f72d87ff..b0a81173a268 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c | |||
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi) | |||
457 | } | 457 | } |
458 | 458 | ||
459 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, | 459 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, |
460 | struct timespec *timespec, struct pstore_info *psi) | 460 | struct timespec *timespec, |
461 | char **buf, struct pstore_info *psi) | ||
461 | { | 462 | { |
462 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; | 463 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; |
463 | struct efivars *efivars = psi->data; | 464 | struct efivars *efivars = psi->data; |
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, | |||
478 | timespec->tv_nsec = 0; | 479 | timespec->tv_nsec = 0; |
479 | get_var_data_locked(efivars, &efivars->walk_entry->var); | 480 | get_var_data_locked(efivars, &efivars->walk_entry->var); |
480 | size = efivars->walk_entry->var.DataSize; | 481 | size = efivars->walk_entry->var.DataSize; |
481 | memcpy(psi->buf, efivars->walk_entry->var.Data, size); | 482 | *buf = kmalloc(size, GFP_KERNEL); |
483 | if (*buf == NULL) | ||
484 | return -ENOMEM; | ||
485 | memcpy(*buf, efivars->walk_entry->var.Data, | ||
486 | size); | ||
482 | efivars->walk_entry = list_entry(efivars->walk_entry->list.next, | 487 | efivars->walk_entry = list_entry(efivars->walk_entry->list.next, |
483 | struct efivar_entry, list); | 488 | struct efivar_entry, list); |
484 | return size; | 489 | return size; |
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi) | |||
576 | } | 581 | } |
577 | 582 | ||
578 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, | 583 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, |
579 | struct timespec *time, struct pstore_info *psi) | 584 | struct timespec *timespec, |
585 | char **buf, struct pstore_info *psi) | ||
580 | { | 586 | { |
581 | return -1; | 587 | return -1; |
582 | } | 588 | } |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 147df8ae79db..d3f3e8f54561 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip) | |||
546 | * Translate OpenFirmware node properties into platform_data | 546 | * Translate OpenFirmware node properties into platform_data |
547 | * WARNING: This is DEPRECATED and will be removed eventually! | 547 | * WARNING: This is DEPRECATED and will be removed eventually! |
548 | */ | 548 | */ |
549 | void | 549 | static void |
550 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | 550 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) |
551 | { | 551 | { |
552 | struct device_node *node; | 552 | struct device_node *node; |
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | |||
574 | *invert = *val; | 574 | *invert = *val; |
575 | } | 575 | } |
576 | #else | 576 | #else |
577 | void | 577 | static void |
578 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | 578 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) |
579 | { | 579 | { |
580 | *gpio_base = -1; | 580 | *gpio_base = -1; |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 405c63b9d539..8323fc389840 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | |||
1873 | } | 1873 | } |
1874 | 1874 | ||
1875 | if (num_clips && clips_ptr) { | 1875 | if (num_clips && clips_ptr) { |
1876 | if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { | ||
1877 | ret = -EINVAL; | ||
1878 | goto out_err1; | ||
1879 | } | ||
1876 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | 1880 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); |
1877 | if (!clips) { | 1881 | if (!clips) { |
1878 | ret = -ENOMEM; | 1882 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 6f8afea94fc9..2bb07bca511a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c | |||
@@ -27,82 +27,84 @@ | |||
27 | #include "drm.h" | 27 | #include "drm.h" |
28 | 28 | ||
29 | #include "exynos_drm_drv.h" | 29 | #include "exynos_drm_drv.h" |
30 | #include "exynos_drm_gem.h" | ||
30 | #include "exynos_drm_buf.h" | 31 | #include "exynos_drm_buf.h" |
31 | 32 | ||
32 | static DEFINE_MUTEX(exynos_drm_buf_lock); | ||
33 | |||
34 | static int lowlevel_buffer_allocate(struct drm_device *dev, | 33 | static int lowlevel_buffer_allocate(struct drm_device *dev, |
35 | struct exynos_drm_buf_entry *entry) | 34 | struct exynos_drm_gem_buf *buffer) |
36 | { | 35 | { |
37 | DRM_DEBUG_KMS("%s\n", __FILE__); | 36 | DRM_DEBUG_KMS("%s\n", __FILE__); |
38 | 37 | ||
39 | entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, | 38 | buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size, |
40 | (dma_addr_t *)&entry->paddr, GFP_KERNEL); | 39 | &buffer->dma_addr, GFP_KERNEL); |
41 | if (!entry->paddr) { | 40 | if (!buffer->kvaddr) { |
42 | DRM_ERROR("failed to allocate buffer.\n"); | 41 | DRM_ERROR("failed to allocate buffer.\n"); |
43 | return -ENOMEM; | 42 | return -ENOMEM; |
44 | } | 43 | } |
45 | 44 | ||
46 | DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", | 45 | DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", |
47 | (unsigned int)entry->vaddr, entry->paddr, entry->size); | 46 | (unsigned long)buffer->kvaddr, |
47 | (unsigned long)buffer->dma_addr, | ||
48 | buffer->size); | ||
48 | 49 | ||
49 | return 0; | 50 | return 0; |
50 | } | 51 | } |
51 | 52 | ||
52 | static void lowlevel_buffer_deallocate(struct drm_device *dev, | 53 | static void lowlevel_buffer_deallocate(struct drm_device *dev, |
53 | struct exynos_drm_buf_entry *entry) | 54 | struct exynos_drm_gem_buf *buffer) |
54 | { | 55 | { |
55 | DRM_DEBUG_KMS("%s.\n", __FILE__); | 56 | DRM_DEBUG_KMS("%s.\n", __FILE__); |
56 | 57 | ||
57 | if (entry->paddr && entry->vaddr && entry->size) | 58 | if (buffer->dma_addr && buffer->size) |
58 | dma_free_writecombine(dev->dev, entry->size, entry->vaddr, | 59 | dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr, |
59 | entry->paddr); | 60 | (dma_addr_t)buffer->dma_addr); |
60 | else | 61 | else |
61 | DRM_DEBUG_KMS("entry data is null.\n"); | 62 | DRM_DEBUG_KMS("buffer data are invalid.\n"); |
62 | } | 63 | } |
63 | 64 | ||
64 | struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, | 65 | struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, |
65 | unsigned int size) | 66 | unsigned int size) |
66 | { | 67 | { |
67 | struct exynos_drm_buf_entry *entry; | 68 | struct exynos_drm_gem_buf *buffer; |
68 | 69 | ||
69 | DRM_DEBUG_KMS("%s.\n", __FILE__); | 70 | DRM_DEBUG_KMS("%s.\n", __FILE__); |
71 | DRM_DEBUG_KMS("desired size = 0x%x\n", size); | ||
70 | 72 | ||
71 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | 73 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
72 | if (!entry) { | 74 | if (!buffer) { |
73 | DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); | 75 | DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); |
74 | return ERR_PTR(-ENOMEM); | 76 | return ERR_PTR(-ENOMEM); |
75 | } | 77 | } |
76 | 78 | ||
77 | entry->size = size; | 79 | buffer->size = size; |
78 | 80 | ||
79 | /* | 81 | /* |
80 | * allocate memory region with size and set the memory information | 82 | * allocate memory region with size and set the memory information |
81 | * to vaddr and paddr of a entry object. | 83 | * to vaddr and dma_addr of a buffer object. |
82 | */ | 84 | */ |
83 | if (lowlevel_buffer_allocate(dev, entry) < 0) { | 85 | if (lowlevel_buffer_allocate(dev, buffer) < 0) { |
84 | kfree(entry); | 86 | kfree(buffer); |
85 | entry = NULL; | 87 | buffer = NULL; |
86 | return ERR_PTR(-ENOMEM); | 88 | return ERR_PTR(-ENOMEM); |
87 | } | 89 | } |
88 | 90 | ||
89 | return entry; | 91 | return buffer; |
90 | } | 92 | } |
91 | 93 | ||
92 | void exynos_drm_buf_destroy(struct drm_device *dev, | 94 | void exynos_drm_buf_destroy(struct drm_device *dev, |
93 | struct exynos_drm_buf_entry *entry) | 95 | struct exynos_drm_gem_buf *buffer) |
94 | { | 96 | { |
95 | DRM_DEBUG_KMS("%s.\n", __FILE__); | 97 | DRM_DEBUG_KMS("%s.\n", __FILE__); |
96 | 98 | ||
97 | if (!entry) { | 99 | if (!buffer) { |
98 | DRM_DEBUG_KMS("entry is null.\n"); | 100 | DRM_DEBUG_KMS("buffer is null.\n"); |
99 | return; | 101 | return; |
100 | } | 102 | } |
101 | 103 | ||
102 | lowlevel_buffer_deallocate(dev, entry); | 104 | lowlevel_buffer_deallocate(dev, buffer); |
103 | 105 | ||
104 | kfree(entry); | 106 | kfree(buffer); |
105 | entry = NULL; | 107 | buffer = NULL; |
106 | } | 108 | } |
107 | 109 | ||
108 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | 110 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h index 045d59eab01a..6e91f9caa5db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h | |||
@@ -26,28 +26,15 @@ | |||
26 | #ifndef _EXYNOS_DRM_BUF_H_ | 26 | #ifndef _EXYNOS_DRM_BUF_H_ |
27 | #define _EXYNOS_DRM_BUF_H_ | 27 | #define _EXYNOS_DRM_BUF_H_ |
28 | 28 | ||
29 | /* | ||
30 | * exynos drm buffer entry structure. | ||
31 | * | ||
32 | * @paddr: physical address of allocated memory. | ||
33 | * @vaddr: kernel virtual address of allocated memory. | ||
34 | * @size: size of allocated memory. | ||
35 | */ | ||
36 | struct exynos_drm_buf_entry { | ||
37 | dma_addr_t paddr; | ||
38 | void __iomem *vaddr; | ||
39 | unsigned int size; | ||
40 | }; | ||
41 | |||
42 | /* allocate physical memory. */ | 29 | /* allocate physical memory. */ |
43 | struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, | 30 | struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, |
44 | unsigned int size); | 31 | unsigned int size); |
45 | 32 | ||
46 | /* get physical memory information of a drm framebuffer. */ | 33 | /* get memory information of a drm framebuffer. */ |
47 | struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); | 34 | struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); |
48 | 35 | ||
49 | /* remove allocated physical memory. */ | 36 | /* remove allocated physical memory. */ |
50 | void exynos_drm_buf_destroy(struct drm_device *dev, | 37 | void exynos_drm_buf_destroy(struct drm_device *dev, |
51 | struct exynos_drm_buf_entry *entry); | 38 | struct exynos_drm_gem_buf *buffer); |
52 | 39 | ||
53 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 985d9e768728..d620b0784257 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c | |||
@@ -37,6 +37,8 @@ | |||
37 | 37 | ||
38 | struct exynos_drm_connector { | 38 | struct exynos_drm_connector { |
39 | struct drm_connector drm_connector; | 39 | struct drm_connector drm_connector; |
40 | uint32_t encoder_id; | ||
41 | struct exynos_drm_manager *manager; | ||
40 | }; | 42 | }; |
41 | 43 | ||
42 | /* convert exynos_video_timings to drm_display_mode */ | 44 | /* convert exynos_video_timings to drm_display_mode */ |
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode, | |||
47 | DRM_DEBUG_KMS("%s\n", __FILE__); | 49 | DRM_DEBUG_KMS("%s\n", __FILE__); |
48 | 50 | ||
49 | mode->clock = timing->pixclock / 1000; | 51 | mode->clock = timing->pixclock / 1000; |
52 | mode->vrefresh = timing->refresh; | ||
50 | 53 | ||
51 | mode->hdisplay = timing->xres; | 54 | mode->hdisplay = timing->xres; |
52 | mode->hsync_start = mode->hdisplay + timing->left_margin; | 55 | mode->hsync_start = mode->hdisplay + timing->left_margin; |
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode, | |||
57 | mode->vsync_start = mode->vdisplay + timing->upper_margin; | 60 | mode->vsync_start = mode->vdisplay + timing->upper_margin; |
58 | mode->vsync_end = mode->vsync_start + timing->vsync_len; | 61 | mode->vsync_end = mode->vsync_start + timing->vsync_len; |
59 | mode->vtotal = mode->vsync_end + timing->lower_margin; | 62 | mode->vtotal = mode->vsync_end + timing->lower_margin; |
63 | |||
64 | if (timing->vmode & FB_VMODE_INTERLACED) | ||
65 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
66 | |||
67 | if (timing->vmode & FB_VMODE_DOUBLE) | ||
68 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; | ||
60 | } | 69 | } |
61 | 70 | ||
62 | /* convert drm_display_mode to exynos_video_timings */ | 71 | /* convert drm_display_mode to exynos_video_timings */ |
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing, | |||
69 | memset(timing, 0, sizeof(*timing)); | 78 | memset(timing, 0, sizeof(*timing)); |
70 | 79 | ||
71 | timing->pixclock = mode->clock * 1000; | 80 | timing->pixclock = mode->clock * 1000; |
72 | timing->refresh = mode->vrefresh; | 81 | timing->refresh = drm_mode_vrefresh(mode); |
73 | 82 | ||
74 | timing->xres = mode->hdisplay; | 83 | timing->xres = mode->hdisplay; |
75 | timing->left_margin = mode->hsync_start - mode->hdisplay; | 84 | timing->left_margin = mode->hsync_start - mode->hdisplay; |
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing, | |||
92 | 101 | ||
93 | static int exynos_drm_connector_get_modes(struct drm_connector *connector) | 102 | static int exynos_drm_connector_get_modes(struct drm_connector *connector) |
94 | { | 103 | { |
95 | struct exynos_drm_manager *manager = | 104 | struct exynos_drm_connector *exynos_connector = |
96 | exynos_drm_get_manager(connector->encoder); | 105 | to_exynos_connector(connector); |
97 | struct exynos_drm_display *display = manager->display; | 106 | struct exynos_drm_manager *manager = exynos_connector->manager; |
107 | struct exynos_drm_display_ops *display_ops = manager->display_ops; | ||
98 | unsigned int count; | 108 | unsigned int count; |
99 | 109 | ||
100 | DRM_DEBUG_KMS("%s\n", __FILE__); | 110 | DRM_DEBUG_KMS("%s\n", __FILE__); |
101 | 111 | ||
102 | if (!display) { | 112 | if (!display_ops) { |
103 | DRM_DEBUG_KMS("display is null.\n"); | 113 | DRM_DEBUG_KMS("display_ops is null.\n"); |
104 | return 0; | 114 | return 0; |
105 | } | 115 | } |
106 | 116 | ||
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) | |||
112 | * P.S. in case of lcd panel, count is always 1 if success | 122 | * P.S. in case of lcd panel, count is always 1 if success |
113 | * because lcd panel has only one mode. | 123 | * because lcd panel has only one mode. |
114 | */ | 124 | */ |
115 | if (display->get_edid) { | 125 | if (display_ops->get_edid) { |
116 | int ret; | 126 | int ret; |
117 | void *edid; | 127 | void *edid; |
118 | 128 | ||
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) | |||
122 | return 0; | 132 | return 0; |
123 | } | 133 | } |
124 | 134 | ||
125 | ret = display->get_edid(manager->dev, connector, | 135 | ret = display_ops->get_edid(manager->dev, connector, |
126 | edid, MAX_EDID); | 136 | edid, MAX_EDID); |
127 | if (ret < 0) { | 137 | if (ret < 0) { |
128 | DRM_ERROR("failed to get edid data.\n"); | 138 | DRM_ERROR("failed to get edid data.\n"); |
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) | |||
140 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | 150 | struct drm_display_mode *mode = drm_mode_create(connector->dev); |
141 | struct fb_videomode *timing; | 151 | struct fb_videomode *timing; |
142 | 152 | ||
143 | if (display->get_timing) | 153 | if (display_ops->get_timing) |
144 | timing = display->get_timing(manager->dev); | 154 | timing = display_ops->get_timing(manager->dev); |
145 | else { | 155 | else { |
146 | drm_mode_destroy(connector->dev, mode); | 156 | drm_mode_destroy(connector->dev, mode); |
147 | return 0; | 157 | return 0; |
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) | |||
162 | static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | 172 | static int exynos_drm_connector_mode_valid(struct drm_connector *connector, |
163 | struct drm_display_mode *mode) | 173 | struct drm_display_mode *mode) |
164 | { | 174 | { |
165 | struct exynos_drm_manager *manager = | 175 | struct exynos_drm_connector *exynos_connector = |
166 | exynos_drm_get_manager(connector->encoder); | 176 | to_exynos_connector(connector); |
167 | struct exynos_drm_display *display = manager->display; | 177 | struct exynos_drm_manager *manager = exynos_connector->manager; |
178 | struct exynos_drm_display_ops *display_ops = manager->display_ops; | ||
168 | struct fb_videomode timing; | 179 | struct fb_videomode timing; |
169 | int ret = MODE_BAD; | 180 | int ret = MODE_BAD; |
170 | 181 | ||
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | |||
172 | 183 | ||
173 | convert_to_video_timing(&timing, mode); | 184 | convert_to_video_timing(&timing, mode); |
174 | 185 | ||
175 | if (display && display->check_timing) | 186 | if (display_ops && display_ops->check_timing) |
176 | if (!display->check_timing(manager->dev, (void *)&timing)) | 187 | if (!display_ops->check_timing(manager->dev, (void *)&timing)) |
177 | ret = MODE_OK; | 188 | ret = MODE_OK; |
178 | 189 | ||
179 | return ret; | 190 | return ret; |
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | |||
181 | 192 | ||
182 | struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) | 193 | struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) |
183 | { | 194 | { |
195 | struct drm_device *dev = connector->dev; | ||
196 | struct exynos_drm_connector *exynos_connector = | ||
197 | to_exynos_connector(connector); | ||
198 | struct drm_mode_object *obj; | ||
199 | struct drm_encoder *encoder; | ||
200 | |||
184 | DRM_DEBUG_KMS("%s\n", __FILE__); | 201 | DRM_DEBUG_KMS("%s\n", __FILE__); |
185 | 202 | ||
186 | return connector->encoder; | 203 | obj = drm_mode_object_find(dev, exynos_connector->encoder_id, |
204 | DRM_MODE_OBJECT_ENCODER); | ||
205 | if (!obj) { | ||
206 | DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", | ||
207 | exynos_connector->encoder_id); | ||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | encoder = obj_to_encoder(obj); | ||
212 | |||
213 | return encoder; | ||
187 | } | 214 | } |
188 | 215 | ||
189 | static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | 216 | static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { |
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | |||
196 | static enum drm_connector_status | 223 | static enum drm_connector_status |
197 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) | 224 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) |
198 | { | 225 | { |
199 | struct exynos_drm_manager *manager = | 226 | struct exynos_drm_connector *exynos_connector = |
200 | exynos_drm_get_manager(connector->encoder); | 227 | to_exynos_connector(connector); |
201 | struct exynos_drm_display *display = manager->display; | 228 | struct exynos_drm_manager *manager = exynos_connector->manager; |
229 | struct exynos_drm_display_ops *display_ops = | ||
230 | manager->display_ops; | ||
202 | enum drm_connector_status status = connector_status_disconnected; | 231 | enum drm_connector_status status = connector_status_disconnected; |
203 | 232 | ||
204 | DRM_DEBUG_KMS("%s\n", __FILE__); | 233 | DRM_DEBUG_KMS("%s\n", __FILE__); |
205 | 234 | ||
206 | if (display && display->is_connected) { | 235 | if (display_ops && display_ops->is_connected) { |
207 | if (display->is_connected(manager->dev)) | 236 | if (display_ops->is_connected(manager->dev)) |
208 | status = connector_status_connected; | 237 | status = connector_status_connected; |
209 | else | 238 | else |
210 | status = connector_status_disconnected; | 239 | status = connector_status_disconnected; |
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | |||
251 | 280 | ||
252 | connector = &exynos_connector->drm_connector; | 281 | connector = &exynos_connector->drm_connector; |
253 | 282 | ||
254 | switch (manager->display->type) { | 283 | switch (manager->display_ops->type) { |
255 | case EXYNOS_DISPLAY_TYPE_HDMI: | 284 | case EXYNOS_DISPLAY_TYPE_HDMI: |
256 | type = DRM_MODE_CONNECTOR_HDMIA; | 285 | type = DRM_MODE_CONNECTOR_HDMIA; |
286 | connector->interlace_allowed = true; | ||
287 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
257 | break; | 288 | break; |
258 | default: | 289 | default: |
259 | type = DRM_MODE_CONNECTOR_Unknown; | 290 | type = DRM_MODE_CONNECTOR_Unknown; |
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | |||
267 | if (err) | 298 | if (err) |
268 | goto err_connector; | 299 | goto err_connector; |
269 | 300 | ||
301 | exynos_connector->encoder_id = encoder->base.id; | ||
302 | exynos_connector->manager = manager; | ||
270 | connector->encoder = encoder; | 303 | connector->encoder = encoder; |
304 | |||
271 | err = drm_mode_connector_attach_encoder(connector, encoder); | 305 | err = drm_mode_connector_attach_encoder(connector, encoder); |
272 | if (err) { | 306 | if (err) { |
273 | DRM_ERROR("failed to attach a connector to a encoder\n"); | 307 | DRM_ERROR("failed to attach a connector to a encoder\n"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 9337e5e2dbb6..ee43cc220853 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -29,36 +29,17 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "drm_crtc_helper.h" | 30 | #include "drm_crtc_helper.h" |
31 | 31 | ||
32 | #include "exynos_drm_crtc.h" | ||
32 | #include "exynos_drm_drv.h" | 33 | #include "exynos_drm_drv.h" |
33 | #include "exynos_drm_fb.h" | 34 | #include "exynos_drm_fb.h" |
34 | #include "exynos_drm_encoder.h" | 35 | #include "exynos_drm_encoder.h" |
36 | #include "exynos_drm_gem.h" | ||
35 | #include "exynos_drm_buf.h" | 37 | #include "exynos_drm_buf.h" |
36 | 38 | ||
37 | #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ | 39 | #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ |
38 | drm_crtc) | 40 | drm_crtc) |
39 | 41 | ||
40 | /* | 42 | /* |
41 | * Exynos specific crtc postion structure. | ||
42 | * | ||
43 | * @fb_x: offset x on a framebuffer to be displyed | ||
44 | * - the unit is screen coordinates. | ||
45 | * @fb_y: offset y on a framebuffer to be displayed | ||
46 | * - the unit is screen coordinates. | ||
47 | * @crtc_x: offset x on hardware screen. | ||
48 | * @crtc_y: offset y on hardware screen. | ||
49 | * @crtc_w: width of hardware screen. | ||
50 | * @crtc_h: height of hardware screen. | ||
51 | */ | ||
52 | struct exynos_drm_crtc_pos { | ||
53 | unsigned int fb_x; | ||
54 | unsigned int fb_y; | ||
55 | unsigned int crtc_x; | ||
56 | unsigned int crtc_y; | ||
57 | unsigned int crtc_w; | ||
58 | unsigned int crtc_h; | ||
59 | }; | ||
60 | |||
61 | /* | ||
62 | * Exynos specific crtc structure. | 43 | * Exynos specific crtc structure. |
63 | * | 44 | * |
64 | * @drm_crtc: crtc object. | 45 | * @drm_crtc: crtc object. |
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc) | |||
85 | 66 | ||
86 | exynos_drm_fn_encoder(crtc, overlay, | 67 | exynos_drm_fn_encoder(crtc, overlay, |
87 | exynos_drm_encoder_crtc_mode_set); | 68 | exynos_drm_encoder_crtc_mode_set); |
88 | exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); | 69 | exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, |
70 | exynos_drm_encoder_crtc_commit); | ||
89 | } | 71 | } |
90 | 72 | ||
91 | static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, | 73 | int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, |
92 | struct drm_framebuffer *fb, | 74 | struct drm_framebuffer *fb, |
93 | struct drm_display_mode *mode, | 75 | struct drm_display_mode *mode, |
94 | struct exynos_drm_crtc_pos *pos) | 76 | struct exynos_drm_crtc_pos *pos) |
95 | { | 77 | { |
96 | struct exynos_drm_buf_entry *entry; | 78 | struct exynos_drm_gem_buf *buffer; |
97 | unsigned int actual_w; | 79 | unsigned int actual_w; |
98 | unsigned int actual_h; | 80 | unsigned int actual_h; |
99 | 81 | ||
100 | entry = exynos_drm_fb_get_buf(fb); | 82 | buffer = exynos_drm_fb_get_buf(fb); |
101 | if (!entry) { | 83 | if (!buffer) { |
102 | DRM_LOG_KMS("entry is null.\n"); | 84 | DRM_LOG_KMS("buffer is null.\n"); |
103 | return -EFAULT; | 85 | return -EFAULT; |
104 | } | 86 | } |
105 | 87 | ||
106 | overlay->paddr = entry->paddr; | 88 | overlay->dma_addr = buffer->dma_addr; |
107 | overlay->vaddr = entry->vaddr; | 89 | overlay->vaddr = buffer->kvaddr; |
108 | 90 | ||
109 | DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", | 91 | DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", |
110 | (unsigned long)overlay->vaddr, | 92 | (unsigned long)overlay->vaddr, |
111 | (unsigned long)overlay->paddr); | 93 | (unsigned long)overlay->dma_addr); |
112 | 94 | ||
113 | actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); | 95 | actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); |
114 | actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); | 96 | actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); |
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc) | |||
171 | 153 | ||
172 | static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) | 154 | static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) |
173 | { | 155 | { |
174 | DRM_DEBUG_KMS("%s\n", __FILE__); | 156 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
175 | 157 | ||
176 | /* TODO */ | 158 | DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); |
159 | |||
160 | switch (mode) { | ||
161 | case DRM_MODE_DPMS_ON: | ||
162 | exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, | ||
163 | exynos_drm_encoder_crtc_commit); | ||
164 | break; | ||
165 | case DRM_MODE_DPMS_STANDBY: | ||
166 | case DRM_MODE_DPMS_SUSPEND: | ||
167 | case DRM_MODE_DPMS_OFF: | ||
168 | /* TODO */ | ||
169 | exynos_drm_fn_encoder(crtc, NULL, | ||
170 | exynos_drm_encoder_crtc_disable); | ||
171 | break; | ||
172 | default: | ||
173 | DRM_DEBUG_KMS("unspecified mode %d\n", mode); | ||
174 | break; | ||
175 | } | ||
177 | } | 176 | } |
178 | 177 | ||
179 | static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) | 178 | static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) |
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) | |||
185 | 184 | ||
186 | static void exynos_drm_crtc_commit(struct drm_crtc *crtc) | 185 | static void exynos_drm_crtc_commit(struct drm_crtc *crtc) |
187 | { | 186 | { |
187 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
188 | |||
188 | DRM_DEBUG_KMS("%s\n", __FILE__); | 189 | DRM_DEBUG_KMS("%s\n", __FILE__); |
189 | 190 | ||
190 | /* drm framework doesn't check NULL. */ | 191 | exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, |
192 | exynos_drm_encoder_crtc_commit); | ||
191 | } | 193 | } |
192 | 194 | ||
193 | static bool | 195 | static bool |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index c584042d6d2c..25f72a62cb88 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h | |||
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); | |||
35 | int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); | 35 | int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); |
36 | void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); | 36 | void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); |
37 | 37 | ||
38 | /* | ||
39 | * Exynos specific crtc postion structure. | ||
40 | * | ||
41 | * @fb_x: offset x on a framebuffer to be displyed | ||
42 | * - the unit is screen coordinates. | ||
43 | * @fb_y: offset y on a framebuffer to be displayed | ||
44 | * - the unit is screen coordinates. | ||
45 | * @crtc_x: offset x on hardware screen. | ||
46 | * @crtc_y: offset y on hardware screen. | ||
47 | * @crtc_w: width of hardware screen. | ||
48 | * @crtc_h: height of hardware screen. | ||
49 | */ | ||
50 | struct exynos_drm_crtc_pos { | ||
51 | unsigned int fb_x; | ||
52 | unsigned int fb_y; | ||
53 | unsigned int crtc_x; | ||
54 | unsigned int crtc_y; | ||
55 | unsigned int crtc_w; | ||
56 | unsigned int crtc_h; | ||
57 | }; | ||
58 | |||
59 | int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, | ||
60 | struct drm_framebuffer *fb, | ||
61 | struct drm_display_mode *mode, | ||
62 | struct exynos_drm_crtc_pos *pos); | ||
38 | #endif | 63 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 83810cbe3c17..53e2216de61d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "drm.h" | 29 | #include "drm.h" |
30 | #include "drm_crtc_helper.h" | ||
30 | 31 | ||
31 | #include <drm/exynos_drm.h> | 32 | #include <drm/exynos_drm.h> |
32 | 33 | ||
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) | |||
61 | 62 | ||
62 | drm_mode_config_init(dev); | 63 | drm_mode_config_init(dev); |
63 | 64 | ||
65 | /* init kms poll for handling hpd */ | ||
66 | drm_kms_helper_poll_init(dev); | ||
67 | |||
64 | exynos_drm_mode_config_init(dev); | 68 | exynos_drm_mode_config_init(dev); |
65 | 69 | ||
66 | /* | 70 | /* |
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev) | |||
116 | exynos_drm_fbdev_fini(dev); | 120 | exynos_drm_fbdev_fini(dev); |
117 | exynos_drm_device_unregister(dev); | 121 | exynos_drm_device_unregister(dev); |
118 | drm_vblank_cleanup(dev); | 122 | drm_vblank_cleanup(dev); |
123 | drm_kms_helper_poll_fini(dev); | ||
119 | drm_mode_config_cleanup(dev); | 124 | drm_mode_config_cleanup(dev); |
120 | kfree(dev->dev_private); | 125 | kfree(dev->dev_private); |
121 | 126 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index c03683f2ae72..5e02e6ecc2e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #ifndef _EXYNOS_DRM_DRV_H_ | 29 | #ifndef _EXYNOS_DRM_DRV_H_ |
30 | #define _EXYNOS_DRM_DRV_H_ | 30 | #define _EXYNOS_DRM_DRV_H_ |
31 | 31 | ||
32 | #include <linux/module.h> | ||
32 | #include "drm.h" | 33 | #include "drm.h" |
33 | 34 | ||
34 | #define MAX_CRTC 2 | 35 | #define MAX_CRTC 2 |
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops { | |||
79 | * @scan_flag: interlace or progressive way. | 80 | * @scan_flag: interlace or progressive way. |
80 | * (it could be DRM_MODE_FLAG_*) | 81 | * (it could be DRM_MODE_FLAG_*) |
81 | * @bpp: pixel size.(in bit) | 82 | * @bpp: pixel size.(in bit) |
82 | * @paddr: bus(accessed by dma) physical memory address to this overlay | 83 | * @dma_addr: bus(accessed by dma) address to the memory region allocated |
83 | * and this is physically continuous. | 84 | * for a overlay. |
84 | * @vaddr: virtual memory addresss to this overlay. | 85 | * @vaddr: virtual memory addresss to this overlay. |
85 | * @default_win: a window to be enabled. | 86 | * @default_win: a window to be enabled. |
86 | * @color_key: color key on or off. | 87 | * @color_key: color key on or off. |
@@ -108,7 +109,7 @@ struct exynos_drm_overlay { | |||
108 | unsigned int scan_flag; | 109 | unsigned int scan_flag; |
109 | unsigned int bpp; | 110 | unsigned int bpp; |
110 | unsigned int pitch; | 111 | unsigned int pitch; |
111 | dma_addr_t paddr; | 112 | dma_addr_t dma_addr; |
112 | void __iomem *vaddr; | 113 | void __iomem *vaddr; |
113 | 114 | ||
114 | bool default_win; | 115 | bool default_win; |
@@ -130,7 +131,7 @@ struct exynos_drm_overlay { | |||
130 | * @check_timing: check if timing is valid or not. | 131 | * @check_timing: check if timing is valid or not. |
131 | * @power_on: display device on or off. | 132 | * @power_on: display device on or off. |
132 | */ | 133 | */ |
133 | struct exynos_drm_display { | 134 | struct exynos_drm_display_ops { |
134 | enum exynos_drm_output_type type; | 135 | enum exynos_drm_output_type type; |
135 | bool (*is_connected)(struct device *dev); | 136 | bool (*is_connected)(struct device *dev); |
136 | int (*get_edid)(struct device *dev, struct drm_connector *connector, | 137 | int (*get_edid)(struct device *dev, struct drm_connector *connector, |
@@ -146,12 +147,14 @@ struct exynos_drm_display { | |||
146 | * @mode_set: convert drm_display_mode to hw specific display mode and | 147 | * @mode_set: convert drm_display_mode to hw specific display mode and |
147 | * would be called by encoder->mode_set(). | 148 | * would be called by encoder->mode_set(). |
148 | * @commit: set current hw specific display mode to hw. | 149 | * @commit: set current hw specific display mode to hw. |
150 | * @disable: disable hardware specific display mode. | ||
149 | * @enable_vblank: specific driver callback for enabling vblank interrupt. | 151 | * @enable_vblank: specific driver callback for enabling vblank interrupt. |
150 | * @disable_vblank: specific driver callback for disabling vblank interrupt. | 152 | * @disable_vblank: specific driver callback for disabling vblank interrupt. |
151 | */ | 153 | */ |
152 | struct exynos_drm_manager_ops { | 154 | struct exynos_drm_manager_ops { |
153 | void (*mode_set)(struct device *subdrv_dev, void *mode); | 155 | void (*mode_set)(struct device *subdrv_dev, void *mode); |
154 | void (*commit)(struct device *subdrv_dev); | 156 | void (*commit)(struct device *subdrv_dev); |
157 | void (*disable)(struct device *subdrv_dev); | ||
155 | int (*enable_vblank)(struct device *subdrv_dev); | 158 | int (*enable_vblank)(struct device *subdrv_dev); |
156 | void (*disable_vblank)(struct device *subdrv_dev); | 159 | void (*disable_vblank)(struct device *subdrv_dev); |
157 | }; | 160 | }; |
@@ -178,7 +181,7 @@ struct exynos_drm_manager { | |||
178 | int pipe; | 181 | int pipe; |
179 | struct exynos_drm_manager_ops *ops; | 182 | struct exynos_drm_manager_ops *ops; |
180 | struct exynos_drm_overlay_ops *overlay_ops; | 183 | struct exynos_drm_overlay_ops *overlay_ops; |
181 | struct exynos_drm_display *display; | 184 | struct exynos_drm_display_ops *display_ops; |
182 | }; | 185 | }; |
183 | 186 | ||
184 | /* | 187 | /* |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 7cf6fa86a67e..153061415baf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c | |||
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
53 | struct drm_device *dev = encoder->dev; | 53 | struct drm_device *dev = encoder->dev; |
54 | struct drm_connector *connector; | 54 | struct drm_connector *connector; |
55 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | 55 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); |
56 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | ||
56 | 57 | ||
57 | DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); | 58 | DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); |
58 | 59 | ||
60 | switch (mode) { | ||
61 | case DRM_MODE_DPMS_ON: | ||
62 | if (manager_ops && manager_ops->commit) | ||
63 | manager_ops->commit(manager->dev); | ||
64 | break; | ||
65 | case DRM_MODE_DPMS_STANDBY: | ||
66 | case DRM_MODE_DPMS_SUSPEND: | ||
67 | case DRM_MODE_DPMS_OFF: | ||
68 | /* TODO */ | ||
69 | if (manager_ops && manager_ops->disable) | ||
70 | manager_ops->disable(manager->dev); | ||
71 | break; | ||
72 | default: | ||
73 | DRM_ERROR("unspecified mode %d\n", mode); | ||
74 | break; | ||
75 | } | ||
76 | |||
59 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 77 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
60 | if (connector->encoder == encoder) { | 78 | if (connector->encoder == encoder) { |
61 | struct exynos_drm_display *display = manager->display; | 79 | struct exynos_drm_display_ops *display_ops = |
80 | manager->display_ops; | ||
62 | 81 | ||
63 | if (display && display->power_on) | 82 | DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", |
64 | display->power_on(manager->dev, mode); | 83 | connector->base.id, mode); |
84 | if (display_ops && display_ops->power_on) | ||
85 | display_ops->power_on(manager->dev, mode); | ||
65 | } | 86 | } |
66 | } | 87 | } |
67 | } | 88 | } |
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) | |||
116 | { | 137 | { |
117 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | 138 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); |
118 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | 139 | struct exynos_drm_manager_ops *manager_ops = manager->ops; |
119 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | ||
120 | 140 | ||
121 | DRM_DEBUG_KMS("%s\n", __FILE__); | 141 | DRM_DEBUG_KMS("%s\n", __FILE__); |
122 | 142 | ||
123 | if (manager_ops && manager_ops->commit) | 143 | if (manager_ops && manager_ops->commit) |
124 | manager_ops->commit(manager->dev); | 144 | manager_ops->commit(manager->dev); |
125 | |||
126 | if (overlay_ops && overlay_ops->commit) | ||
127 | overlay_ops->commit(manager->dev); | ||
128 | } | 145 | } |
129 | 146 | ||
130 | static struct drm_crtc * | 147 | static struct drm_crtc * |
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, | |||
208 | { | 225 | { |
209 | struct drm_device *dev = crtc->dev; | 226 | struct drm_device *dev = crtc->dev; |
210 | struct drm_encoder *encoder; | 227 | struct drm_encoder *encoder; |
228 | struct exynos_drm_private *private = dev->dev_private; | ||
229 | struct exynos_drm_manager *manager; | ||
211 | 230 | ||
212 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 231 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
213 | if (encoder->crtc != crtc) | 232 | /* |
214 | continue; | 233 | * if crtc is detached from encoder, check pipe, |
234 | * otherwise check crtc attached to encoder | ||
235 | */ | ||
236 | if (!encoder->crtc) { | ||
237 | manager = to_exynos_encoder(encoder)->manager; | ||
238 | if (manager->pipe < 0 || | ||
239 | private->crtc[manager->pipe] != crtc) | ||
240 | continue; | ||
241 | } else { | ||
242 | if (encoder->crtc != crtc) | ||
243 | continue; | ||
244 | } | ||
215 | 245 | ||
216 | fn(encoder, data); | 246 | fn(encoder, data); |
217 | } | 247 | } |
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) | |||
250 | struct exynos_drm_manager *manager = | 280 | struct exynos_drm_manager *manager = |
251 | to_exynos_encoder(encoder)->manager; | 281 | to_exynos_encoder(encoder)->manager; |
252 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | 282 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; |
283 | int crtc = *(int *)data; | ||
284 | |||
285 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
286 | |||
287 | /* | ||
288 | * when crtc is detached from encoder, this pipe is used | ||
289 | * to select manager operation | ||
290 | */ | ||
291 | manager->pipe = crtc; | ||
253 | 292 | ||
254 | overlay_ops->commit(manager->dev); | 293 | if (overlay_ops && overlay_ops->commit) |
294 | overlay_ops->commit(manager->dev); | ||
255 | } | 295 | } |
256 | 296 | ||
257 | void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) | 297 | void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) |
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) | |||
261 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | 301 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; |
262 | struct exynos_drm_overlay *overlay = data; | 302 | struct exynos_drm_overlay *overlay = data; |
263 | 303 | ||
264 | overlay_ops->mode_set(manager->dev, overlay); | 304 | if (overlay_ops && overlay_ops->mode_set) |
305 | overlay_ops->mode_set(manager->dev, overlay); | ||
306 | } | ||
307 | |||
308 | void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) | ||
309 | { | ||
310 | struct exynos_drm_manager *manager = | ||
311 | to_exynos_encoder(encoder)->manager; | ||
312 | struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; | ||
313 | |||
314 | DRM_DEBUG_KMS("\n"); | ||
315 | |||
316 | if (overlay_ops && overlay_ops->disable) | ||
317 | overlay_ops->disable(manager->dev); | ||
318 | |||
319 | /* | ||
320 | * crtc is already detached from encoder and last | ||
321 | * function for detaching is properly done, so | ||
322 | * clear pipe from manager to prevent repeated call | ||
323 | */ | ||
324 | if (!encoder->crtc) | ||
325 | manager->pipe = -1; | ||
265 | } | 326 | } |
266 | 327 | ||
267 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | 328 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h index 5ecd645d06a9..a22acfbf0e4e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h | |||
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); | |||
41 | void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); | 41 | void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); |
42 | void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); | 42 | void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); |
43 | void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); | 43 | void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); |
44 | void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data); | ||
44 | 45 | ||
45 | #endif | 46 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 48d29cfd5240..5bf4a1ac7f82 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -29,7 +29,9 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "drm_crtc.h" | 30 | #include "drm_crtc.h" |
31 | #include "drm_crtc_helper.h" | 31 | #include "drm_crtc_helper.h" |
32 | #include "drm_fb_helper.h" | ||
32 | 33 | ||
34 | #include "exynos_drm_drv.h" | ||
33 | #include "exynos_drm_fb.h" | 35 | #include "exynos_drm_fb.h" |
34 | #include "exynos_drm_buf.h" | 36 | #include "exynos_drm_buf.h" |
35 | #include "exynos_drm_gem.h" | 37 | #include "exynos_drm_gem.h" |
@@ -41,14 +43,14 @@ | |||
41 | * | 43 | * |
42 | * @fb: drm framebuffer obejct. | 44 | * @fb: drm framebuffer obejct. |
43 | * @exynos_gem_obj: exynos specific gem object containing a gem object. | 45 | * @exynos_gem_obj: exynos specific gem object containing a gem object. |
44 | * @entry: pointer to exynos drm buffer entry object. | 46 | * @buffer: pointer to exynos_drm_gem_buffer object. |
45 | * - containing only the information to physically continuous memory | 47 | * - contain the memory information to memory region allocated |
46 | * region allocated at default framebuffer creation. | 48 | * at default framebuffer creation. |
47 | */ | 49 | */ |
48 | struct exynos_drm_fb { | 50 | struct exynos_drm_fb { |
49 | struct drm_framebuffer fb; | 51 | struct drm_framebuffer fb; |
50 | struct exynos_drm_gem_obj *exynos_gem_obj; | 52 | struct exynos_drm_gem_obj *exynos_gem_obj; |
51 | struct exynos_drm_buf_entry *entry; | 53 | struct exynos_drm_gem_buf *buffer; |
52 | }; | 54 | }; |
53 | 55 | ||
54 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) | 56 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) |
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) | |||
63 | * default framebuffer has no gem object so | 65 | * default framebuffer has no gem object so |
64 | * a buffer of the default framebuffer should be released at here. | 66 | * a buffer of the default framebuffer should be released at here. |
65 | */ | 67 | */ |
66 | if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) | 68 | if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer) |
67 | exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); | 69 | exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer); |
68 | 70 | ||
69 | kfree(exynos_fb); | 71 | kfree(exynos_fb); |
70 | exynos_fb = NULL; | 72 | exynos_fb = NULL; |
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, | |||
143 | */ | 145 | */ |
144 | if (!mode_cmd->handle) { | 146 | if (!mode_cmd->handle) { |
145 | if (!file_priv) { | 147 | if (!file_priv) { |
146 | struct exynos_drm_buf_entry *entry; | 148 | struct exynos_drm_gem_buf *buffer; |
147 | 149 | ||
148 | /* | 150 | /* |
149 | * in case that file_priv is NULL, it allocates | 151 | * in case that file_priv is NULL, it allocates |
150 | * only buffer and this buffer would be used | 152 | * only buffer and this buffer would be used |
151 | * for default framebuffer. | 153 | * for default framebuffer. |
152 | */ | 154 | */ |
153 | entry = exynos_drm_buf_create(dev, size); | 155 | buffer = exynos_drm_buf_create(dev, size); |
154 | if (IS_ERR(entry)) { | 156 | if (IS_ERR(buffer)) { |
155 | ret = PTR_ERR(entry); | 157 | ret = PTR_ERR(buffer); |
156 | goto err_buffer; | 158 | goto err_buffer; |
157 | } | 159 | } |
158 | 160 | ||
159 | exynos_fb->entry = entry; | 161 | exynos_fb->buffer = buffer; |
160 | 162 | ||
161 | DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", | 163 | DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n", |
162 | (unsigned long)entry->paddr, size); | 164 | (unsigned long)buffer->dma_addr, size); |
163 | 165 | ||
164 | goto out; | 166 | goto out; |
165 | } else { | 167 | } else { |
166 | exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, | 168 | exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, |
167 | size, | 169 | &mode_cmd->handle, |
168 | &mode_cmd->handle); | 170 | size); |
169 | if (IS_ERR(exynos_gem_obj)) { | 171 | if (IS_ERR(exynos_gem_obj)) { |
170 | ret = PTR_ERR(exynos_gem_obj); | 172 | ret = PTR_ERR(exynos_gem_obj); |
171 | goto err_buffer; | 173 | goto err_buffer; |
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, | |||
189 | * so that default framebuffer has no its own gem object, | 191 | * so that default framebuffer has no its own gem object, |
190 | * only its own buffer object. | 192 | * only its own buffer object. |
191 | */ | 193 | */ |
192 | exynos_fb->entry = exynos_gem_obj->entry; | 194 | exynos_fb->buffer = exynos_gem_obj->buffer; |
193 | 195 | ||
194 | DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", | 196 | DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n", |
195 | (unsigned long)exynos_fb->entry->paddr, size, | 197 | (unsigned long)exynos_fb->buffer->dma_addr, size, |
196 | (unsigned int)&exynos_gem_obj->base); | 198 | (unsigned int)&exynos_gem_obj->base); |
197 | 199 | ||
198 | out: | 200 | out: |
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, | |||
220 | return exynos_drm_fb_init(file_priv, dev, mode_cmd); | 222 | return exynos_drm_fb_init(file_priv, dev, mode_cmd); |
221 | } | 223 | } |
222 | 224 | ||
223 | struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) | 225 | struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) |
224 | { | 226 | { |
225 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | 227 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); |
226 | struct exynos_drm_buf_entry *entry; | 228 | struct exynos_drm_gem_buf *buffer; |
227 | 229 | ||
228 | DRM_DEBUG_KMS("%s\n", __FILE__); | 230 | DRM_DEBUG_KMS("%s\n", __FILE__); |
229 | 231 | ||
230 | entry = exynos_fb->entry; | 232 | buffer = exynos_fb->buffer; |
231 | if (!entry) | 233 | if (!buffer) |
232 | return NULL; | 234 | return NULL; |
233 | 235 | ||
234 | DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", | 236 | DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", |
235 | (unsigned long)entry->vaddr, | 237 | (unsigned long)buffer->kvaddr, |
236 | (unsigned long)entry->paddr); | 238 | (unsigned long)buffer->dma_addr); |
237 | 239 | ||
238 | return entry; | 240 | return buffer; |
241 | } | ||
242 | |||
243 | static void exynos_drm_output_poll_changed(struct drm_device *dev) | ||
244 | { | ||
245 | struct exynos_drm_private *private = dev->dev_private; | ||
246 | struct drm_fb_helper *fb_helper = private->fb_helper; | ||
247 | |||
248 | if (fb_helper) | ||
249 | drm_fb_helper_hotplug_event(fb_helper); | ||
239 | } | 250 | } |
240 | 251 | ||
241 | static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { | 252 | static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { |
242 | .fb_create = exynos_drm_fb_create, | 253 | .fb_create = exynos_drm_fb_create, |
254 | .output_poll_changed = exynos_drm_output_poll_changed, | ||
243 | }; | 255 | }; |
244 | 256 | ||
245 | void exynos_drm_mode_config_init(struct drm_device *dev) | 257 | void exynos_drm_mode_config_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 1f4b3d1a7713..836f41008187 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | #include "exynos_drm_drv.h" | 34 | #include "exynos_drm_drv.h" |
35 | #include "exynos_drm_fb.h" | 35 | #include "exynos_drm_fb.h" |
36 | #include "exynos_drm_gem.h" | ||
36 | #include "exynos_drm_buf.h" | 37 | #include "exynos_drm_buf.h" |
37 | 38 | ||
38 | #define MAX_CONNECTOR 4 | 39 | #define MAX_CONNECTOR 4 |
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = { | |||
85 | }; | 86 | }; |
86 | 87 | ||
87 | static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | 88 | static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, |
88 | struct drm_framebuffer *fb, | 89 | struct drm_framebuffer *fb) |
89 | unsigned int fb_width, | ||
90 | unsigned int fb_height) | ||
91 | { | 90 | { |
92 | struct fb_info *fbi = helper->fbdev; | 91 | struct fb_info *fbi = helper->fbdev; |
93 | struct drm_device *dev = helper->dev; | 92 | struct drm_device *dev = helper->dev; |
94 | struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); | 93 | struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); |
95 | struct exynos_drm_buf_entry *entry; | 94 | struct exynos_drm_gem_buf *buffer; |
96 | unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); | 95 | unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); |
97 | unsigned long offset; | 96 | unsigned long offset; |
98 | 97 | ||
99 | DRM_DEBUG_KMS("%s\n", __FILE__); | 98 | DRM_DEBUG_KMS("%s\n", __FILE__); |
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | |||
101 | exynos_fb->fb = fb; | 100 | exynos_fb->fb = fb; |
102 | 101 | ||
103 | drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); | 102 | drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); |
104 | drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); | 103 | drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); |
105 | 104 | ||
106 | entry = exynos_drm_fb_get_buf(fb); | 105 | buffer = exynos_drm_fb_get_buf(fb); |
107 | if (!entry) { | 106 | if (!buffer) { |
108 | DRM_LOG_KMS("entry is null.\n"); | 107 | DRM_LOG_KMS("buffer is null.\n"); |
109 | return -EFAULT; | 108 | return -EFAULT; |
110 | } | 109 | } |
111 | 110 | ||
112 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); | 111 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); |
113 | offset += fbi->var.yoffset * fb->pitch; | 112 | offset += fbi->var.yoffset * fb->pitch; |
114 | 113 | ||
115 | dev->mode_config.fb_base = entry->paddr; | 114 | dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; |
116 | fbi->screen_base = entry->vaddr + offset; | 115 | fbi->screen_base = buffer->kvaddr + offset; |
117 | fbi->fix.smem_start = entry->paddr + offset; | 116 | fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); |
118 | fbi->screen_size = size; | 117 | fbi->screen_size = size; |
119 | fbi->fix.smem_len = size; | 118 | fbi->fix.smem_len = size; |
120 | 119 | ||
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, | |||
171 | goto out; | 170 | goto out; |
172 | } | 171 | } |
173 | 172 | ||
174 | ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, | 173 | ret = exynos_drm_fbdev_update(helper, helper->fb); |
175 | sizes->fb_height); | ||
176 | if (ret < 0) | 174 | if (ret < 0) |
177 | fb_dealloc_cmap(&fbi->cmap); | 175 | fb_dealloc_cmap(&fbi->cmap); |
178 | 176 | ||
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper, | |||
235 | } | 233 | } |
236 | 234 | ||
237 | helper->fb = exynos_fbdev->fb; | 235 | helper->fb = exynos_fbdev->fb; |
238 | return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, | 236 | return exynos_drm_fbdev_update(helper, helper->fb); |
239 | sizes->fb_height); | ||
240 | } | 237 | } |
241 | 238 | ||
242 | static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, | 239 | static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, |
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev) | |||
405 | fb_helper = private->fb_helper; | 402 | fb_helper = private->fb_helper; |
406 | 403 | ||
407 | if (fb_helper) { | 404 | if (fb_helper) { |
405 | struct list_head temp_list; | ||
406 | |||
407 | INIT_LIST_HEAD(&temp_list); | ||
408 | |||
409 | /* | ||
410 | * fb_helper is reintialized but kernel fb is reused | ||
411 | * so kernel_fb_list need to be backuped and restored | ||
412 | */ | ||
413 | if (!list_empty(&fb_helper->kernel_fb_list)) | ||
414 | list_replace_init(&fb_helper->kernel_fb_list, | ||
415 | &temp_list); | ||
416 | |||
408 | drm_fb_helper_fini(fb_helper); | 417 | drm_fb_helper_fini(fb_helper); |
409 | 418 | ||
410 | ret = drm_fb_helper_init(dev, fb_helper, | 419 | ret = drm_fb_helper_init(dev, fb_helper, |
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev) | |||
414 | return ret; | 423 | return ret; |
415 | } | 424 | } |
416 | 425 | ||
426 | if (!list_empty(&temp_list)) | ||
427 | list_replace(&temp_list, &fb_helper->kernel_fb_list); | ||
428 | |||
417 | ret = drm_fb_helper_single_add_all_connectors(fb_helper); | 429 | ret = drm_fb_helper_single_add_all_connectors(fb_helper); |
418 | if (ret < 0) { | 430 | if (ret < 0) { |
419 | DRM_ERROR("failed to add fb helper to connectors\n"); | 431 | DRM_ERROR("failed to add fb helper to connectors\n"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 4659c88cdd9b..db3b3d9e731d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -64,7 +64,7 @@ struct fimd_win_data { | |||
64 | unsigned int fb_width; | 64 | unsigned int fb_width; |
65 | unsigned int fb_height; | 65 | unsigned int fb_height; |
66 | unsigned int bpp; | 66 | unsigned int bpp; |
67 | dma_addr_t paddr; | 67 | dma_addr_t dma_addr; |
68 | void __iomem *vaddr; | 68 | void __iomem *vaddr; |
69 | unsigned int buf_offsize; | 69 | unsigned int buf_offsize; |
70 | unsigned int line_size; /* bytes */ | 70 | unsigned int line_size; /* bytes */ |
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode) | |||
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
127 | static struct exynos_drm_display fimd_display = { | 127 | static struct exynos_drm_display_ops fimd_display_ops = { |
128 | .type = EXYNOS_DISPLAY_TYPE_LCD, | 128 | .type = EXYNOS_DISPLAY_TYPE_LCD, |
129 | .is_connected = fimd_display_is_connected, | 129 | .is_connected = fimd_display_is_connected, |
130 | .get_timing = fimd_get_timing, | 130 | .get_timing = fimd_get_timing, |
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev) | |||
177 | writel(val, ctx->regs + VIDCON0); | 177 | writel(val, ctx->regs + VIDCON0); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void fimd_disable(struct device *dev) | ||
181 | { | ||
182 | struct fimd_context *ctx = get_fimd_context(dev); | ||
183 | struct exynos_drm_subdrv *subdrv = &ctx->subdrv; | ||
184 | struct drm_device *drm_dev = subdrv->drm_dev; | ||
185 | struct exynos_drm_manager *manager = &subdrv->manager; | ||
186 | u32 val; | ||
187 | |||
188 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
189 | |||
190 | /* fimd dma off */ | ||
191 | val = readl(ctx->regs + VIDCON0); | ||
192 | val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F); | ||
193 | writel(val, ctx->regs + VIDCON0); | ||
194 | |||
195 | /* | ||
196 | * if vblank is enabled status with dma off then | ||
197 | * it disables vsync interrupt. | ||
198 | */ | ||
199 | if (drm_dev->vblank_enabled[manager->pipe] && | ||
200 | atomic_read(&drm_dev->vblank_refcount[manager->pipe])) { | ||
201 | drm_vblank_put(drm_dev, manager->pipe); | ||
202 | |||
203 | /* | ||
204 | * if vblank_disable_allowed is 0 then disable | ||
205 | * vsync interrupt right now else the vsync interrupt | ||
206 | * would be disabled by drm timer once a current process | ||
207 | * gives up ownershop of vblank event. | ||
208 | */ | ||
209 | if (!drm_dev->vblank_disable_allowed) | ||
210 | drm_vblank_off(drm_dev, manager->pipe); | ||
211 | } | ||
212 | } | ||
213 | |||
180 | static int fimd_enable_vblank(struct device *dev) | 214 | static int fimd_enable_vblank(struct device *dev) |
181 | { | 215 | { |
182 | struct fimd_context *ctx = get_fimd_context(dev); | 216 | struct fimd_context *ctx = get_fimd_context(dev); |
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev) | |||
220 | 254 | ||
221 | static struct exynos_drm_manager_ops fimd_manager_ops = { | 255 | static struct exynos_drm_manager_ops fimd_manager_ops = { |
222 | .commit = fimd_commit, | 256 | .commit = fimd_commit, |
257 | .disable = fimd_disable, | ||
223 | .enable_vblank = fimd_enable_vblank, | 258 | .enable_vblank = fimd_enable_vblank, |
224 | .disable_vblank = fimd_disable_vblank, | 259 | .disable_vblank = fimd_disable_vblank, |
225 | }; | 260 | }; |
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev, | |||
251 | win_data->ovl_height = overlay->crtc_height; | 286 | win_data->ovl_height = overlay->crtc_height; |
252 | win_data->fb_width = overlay->fb_width; | 287 | win_data->fb_width = overlay->fb_width; |
253 | win_data->fb_height = overlay->fb_height; | 288 | win_data->fb_height = overlay->fb_height; |
254 | win_data->paddr = overlay->paddr + offset; | 289 | win_data->dma_addr = overlay->dma_addr + offset; |
255 | win_data->vaddr = overlay->vaddr + offset; | 290 | win_data->vaddr = overlay->vaddr + offset; |
256 | win_data->bpp = overlay->bpp; | 291 | win_data->bpp = overlay->bpp; |
257 | win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * | 292 | win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * |
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev, | |||
263 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", | 298 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", |
264 | win_data->ovl_width, win_data->ovl_height); | 299 | win_data->ovl_width, win_data->ovl_height); |
265 | DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", | 300 | DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", |
266 | (unsigned long)win_data->paddr, | 301 | (unsigned long)win_data->dma_addr, |
267 | (unsigned long)win_data->vaddr); | 302 | (unsigned long)win_data->vaddr); |
268 | DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", | 303 | DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", |
269 | overlay->fb_width, overlay->crtc_width); | 304 | overlay->fb_width, overlay->crtc_width); |
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev) | |||
376 | writel(val, ctx->regs + SHADOWCON); | 411 | writel(val, ctx->regs + SHADOWCON); |
377 | 412 | ||
378 | /* buffer start address */ | 413 | /* buffer start address */ |
379 | val = win_data->paddr; | 414 | val = (unsigned long)win_data->dma_addr; |
380 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); | 415 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); |
381 | 416 | ||
382 | /* buffer end address */ | 417 | /* buffer end address */ |
383 | size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); | 418 | size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); |
384 | val = win_data->paddr + size; | 419 | val = (unsigned long)(win_data->dma_addr + size); |
385 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); | 420 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); |
386 | 421 | ||
387 | DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", | 422 | DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", |
388 | (unsigned long)win_data->paddr, val, size); | 423 | (unsigned long)win_data->dma_addr, val, size); |
389 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", | 424 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", |
390 | win_data->ovl_width, win_data->ovl_height); | 425 | win_data->ovl_width, win_data->ovl_height); |
391 | 426 | ||
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev) | |||
447 | static void fimd_win_disable(struct device *dev) | 482 | static void fimd_win_disable(struct device *dev) |
448 | { | 483 | { |
449 | struct fimd_context *ctx = get_fimd_context(dev); | 484 | struct fimd_context *ctx = get_fimd_context(dev); |
450 | struct fimd_win_data *win_data; | ||
451 | int win = ctx->default_win; | 485 | int win = ctx->default_win; |
452 | u32 val; | 486 | u32 val; |
453 | 487 | ||
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev) | |||
456 | if (win < 0 || win > WINDOWS_NR) | 490 | if (win < 0 || win > WINDOWS_NR) |
457 | return; | 491 | return; |
458 | 492 | ||
459 | win_data = &ctx->win_data[win]; | ||
460 | |||
461 | /* protect windows */ | 493 | /* protect windows */ |
462 | val = readl(ctx->regs + SHADOWCON); | 494 | val = readl(ctx->regs + SHADOWCON); |
463 | val |= SHADOWCON_WINx_PROTECT(win); | 495 | val |= SHADOWCON_WINx_PROTECT(win); |
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) | |||
528 | /* VSYNC interrupt */ | 560 | /* VSYNC interrupt */ |
529 | writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); | 561 | writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); |
530 | 562 | ||
563 | /* | ||
564 | * in case that vblank_disable_allowed is 1, it could induce | ||
565 | * the problem that manager->pipe could be -1 because with | ||
566 | * disable callback, vsync interrupt isn't disabled and at this moment, | ||
567 | * vsync interrupt could occur. the vsync interrupt would be disabled | ||
568 | * by timer handler later. | ||
569 | */ | ||
570 | if (manager->pipe == -1) | ||
571 | return IRQ_HANDLED; | ||
572 | |||
531 | drm_handle_vblank(drm_dev, manager->pipe); | 573 | drm_handle_vblank(drm_dev, manager->pipe); |
532 | fimd_finish_pageflip(drm_dev, manager->pipe); | 574 | fimd_finish_pageflip(drm_dev, manager->pipe); |
533 | 575 | ||
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |||
548 | */ | 590 | */ |
549 | drm_dev->irq_enabled = 1; | 591 | drm_dev->irq_enabled = 1; |
550 | 592 | ||
551 | /* | ||
552 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | ||
553 | * by drm timer once a current process gives up ownership of | ||
554 | * vblank event.(drm_vblank_put function was called) | ||
555 | */ | ||
556 | drm_dev->vblank_disable_allowed = 1; | ||
557 | |||
558 | return 0; | 593 | return 0; |
559 | } | 594 | } |
560 | 595 | ||
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) | |||
731 | subdrv->manager.pipe = -1; | 766 | subdrv->manager.pipe = -1; |
732 | subdrv->manager.ops = &fimd_manager_ops; | 767 | subdrv->manager.ops = &fimd_manager_ops; |
733 | subdrv->manager.overlay_ops = &fimd_overlay_ops; | 768 | subdrv->manager.overlay_ops = &fimd_overlay_ops; |
734 | subdrv->manager.display = &fimd_display; | 769 | subdrv->manager.display_ops = &fimd_display_ops; |
735 | subdrv->manager.dev = dev; | 770 | subdrv->manager.dev = dev; |
736 | 771 | ||
737 | platform_set_drvdata(pdev, ctx); | 772 | platform_set_drvdata(pdev, ctx); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index a8e7a88906ed..aba0fe47f7ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) | |||
62 | return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; | 62 | return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; |
63 | } | 63 | } |
64 | 64 | ||
65 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, | 65 | static struct exynos_drm_gem_obj |
66 | struct drm_device *dev, unsigned int size, | 66 | *exynos_drm_gem_init(struct drm_device *drm_dev, |
67 | unsigned int *handle) | 67 | struct drm_file *file_priv, unsigned int *handle, |
68 | unsigned int size) | ||
68 | { | 69 | { |
69 | struct exynos_drm_gem_obj *exynos_gem_obj; | 70 | struct exynos_drm_gem_obj *exynos_gem_obj; |
70 | struct exynos_drm_buf_entry *entry; | ||
71 | struct drm_gem_object *obj; | 71 | struct drm_gem_object *obj; |
72 | int ret; | 72 | int ret; |
73 | 73 | ||
74 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
75 | |||
76 | size = roundup(size, PAGE_SIZE); | ||
77 | |||
78 | exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); | 74 | exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); |
79 | if (!exynos_gem_obj) { | 75 | if (!exynos_gem_obj) { |
80 | DRM_ERROR("failed to allocate exynos gem object.\n"); | 76 | DRM_ERROR("failed to allocate exynos gem object.\n"); |
81 | return ERR_PTR(-ENOMEM); | 77 | return ERR_PTR(-ENOMEM); |
82 | } | 78 | } |
83 | 79 | ||
84 | /* allocate the new buffer object and memory region. */ | ||
85 | entry = exynos_drm_buf_create(dev, size); | ||
86 | if (!entry) { | ||
87 | kfree(exynos_gem_obj); | ||
88 | return ERR_PTR(-ENOMEM); | ||
89 | } | ||
90 | |||
91 | exynos_gem_obj->entry = entry; | ||
92 | |||
93 | obj = &exynos_gem_obj->base; | 80 | obj = &exynos_gem_obj->base; |
94 | 81 | ||
95 | ret = drm_gem_object_init(dev, obj, size); | 82 | ret = drm_gem_object_init(drm_dev, obj, size); |
96 | if (ret < 0) { | 83 | if (ret < 0) { |
97 | DRM_ERROR("failed to initailize gem object.\n"); | 84 | DRM_ERROR("failed to initialize gem object.\n"); |
98 | goto err_obj_init; | 85 | ret = -EINVAL; |
86 | goto err_object_init; | ||
99 | } | 87 | } |
100 | 88 | ||
101 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); | 89 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); |
@@ -127,24 +115,50 @@ err_handle_create: | |||
127 | err_create_mmap_offset: | 115 | err_create_mmap_offset: |
128 | drm_gem_object_release(obj); | 116 | drm_gem_object_release(obj); |
129 | 117 | ||
130 | err_obj_init: | 118 | err_object_init: |
131 | exynos_drm_buf_destroy(dev, exynos_gem_obj->entry); | ||
132 | |||
133 | kfree(exynos_gem_obj); | 119 | kfree(exynos_gem_obj); |
134 | 120 | ||
135 | return ERR_PTR(ret); | 121 | return ERR_PTR(ret); |
136 | } | 122 | } |
137 | 123 | ||
124 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | ||
125 | struct drm_file *file_priv, | ||
126 | unsigned int *handle, unsigned long size) | ||
127 | { | ||
128 | |||
129 | struct exynos_drm_gem_obj *exynos_gem_obj = NULL; | ||
130 | struct exynos_drm_gem_buf *buffer; | ||
131 | |||
132 | size = roundup(size, PAGE_SIZE); | ||
133 | |||
134 | DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); | ||
135 | |||
136 | buffer = exynos_drm_buf_create(dev, size); | ||
137 | if (IS_ERR(buffer)) { | ||
138 | return ERR_CAST(buffer); | ||
139 | } | ||
140 | |||
141 | exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size); | ||
142 | if (IS_ERR(exynos_gem_obj)) { | ||
143 | exynos_drm_buf_destroy(dev, buffer); | ||
144 | return exynos_gem_obj; | ||
145 | } | ||
146 | |||
147 | exynos_gem_obj->buffer = buffer; | ||
148 | |||
149 | return exynos_gem_obj; | ||
150 | } | ||
151 | |||
138 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | 152 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, |
139 | struct drm_file *file_priv) | 153 | struct drm_file *file_priv) |
140 | { | 154 | { |
141 | struct drm_exynos_gem_create *args = data; | 155 | struct drm_exynos_gem_create *args = data; |
142 | struct exynos_drm_gem_obj *exynos_gem_obj; | 156 | struct exynos_drm_gem_obj *exynos_gem_obj = NULL; |
143 | 157 | ||
144 | DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); | 158 | DRM_DEBUG_KMS("%s\n", __FILE__); |
145 | 159 | ||
146 | exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, | 160 | exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, |
147 | &args->handle); | 161 | &args->handle, args->size); |
148 | if (IS_ERR(exynos_gem_obj)) | 162 | if (IS_ERR(exynos_gem_obj)) |
149 | return PTR_ERR(exynos_gem_obj); | 163 | return PTR_ERR(exynos_gem_obj); |
150 | 164 | ||
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
175 | { | 189 | { |
176 | struct drm_gem_object *obj = filp->private_data; | 190 | struct drm_gem_object *obj = filp->private_data; |
177 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 191 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
178 | struct exynos_drm_buf_entry *entry; | 192 | struct exynos_drm_gem_buf *buffer; |
179 | unsigned long pfn, vm_size; | 193 | unsigned long pfn, vm_size; |
180 | 194 | ||
181 | DRM_DEBUG_KMS("%s\n", __FILE__); | 195 | DRM_DEBUG_KMS("%s\n", __FILE__); |
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
187 | 201 | ||
188 | vm_size = vma->vm_end - vma->vm_start; | 202 | vm_size = vma->vm_end - vma->vm_start; |
189 | /* | 203 | /* |
190 | * a entry contains information to physically continuous memory | 204 | * a buffer contains information to physically continuous memory |
191 | * allocated by user request or at framebuffer creation. | 205 | * allocated by user request or at framebuffer creation. |
192 | */ | 206 | */ |
193 | entry = exynos_gem_obj->entry; | 207 | buffer = exynos_gem_obj->buffer; |
194 | 208 | ||
195 | /* check if user-requested size is valid. */ | 209 | /* check if user-requested size is valid. */ |
196 | if (vm_size > entry->size) | 210 | if (vm_size > buffer->size) |
197 | return -EINVAL; | 211 | return -EINVAL; |
198 | 212 | ||
199 | /* | 213 | /* |
200 | * get page frame number to physical memory to be mapped | 214 | * get page frame number to physical memory to be mapped |
201 | * to user space. | 215 | * to user space. |
202 | */ | 216 | */ |
203 | pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; | 217 | pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT; |
204 | 218 | ||
205 | DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); | 219 | DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); |
206 | 220 | ||
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj) | |||
281 | 295 | ||
282 | exynos_gem_obj = to_exynos_gem_obj(gem_obj); | 296 | exynos_gem_obj = to_exynos_gem_obj(gem_obj); |
283 | 297 | ||
284 | exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); | 298 | exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer); |
285 | 299 | ||
286 | kfree(exynos_gem_obj); | 300 | kfree(exynos_gem_obj); |
287 | } | 301 | } |
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | |||
302 | args->pitch = args->width * args->bpp >> 3; | 316 | args->pitch = args->width * args->bpp >> 3; |
303 | args->size = args->pitch * args->height; | 317 | args->size = args->pitch * args->height; |
304 | 318 | ||
305 | exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, | 319 | exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle, |
306 | &args->handle); | 320 | args->size); |
307 | if (IS_ERR(exynos_gem_obj)) | 321 | if (IS_ERR(exynos_gem_obj)) |
308 | return PTR_ERR(exynos_gem_obj); | 322 | return PTR_ERR(exynos_gem_obj); |
309 | 323 | ||
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
360 | 374 | ||
361 | mutex_lock(&dev->struct_mutex); | 375 | mutex_lock(&dev->struct_mutex); |
362 | 376 | ||
363 | pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; | 377 | pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >> |
378 | PAGE_SHIFT) + page_offset; | ||
364 | 379 | ||
365 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | 380 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); |
366 | 381 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index e5fc0148277b..ef8797334e6d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -30,13 +30,29 @@ | |||
30 | struct exynos_drm_gem_obj, base) | 30 | struct exynos_drm_gem_obj, base) |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * exynos drm gem buffer structure. | ||
34 | * | ||
35 | * @kvaddr: kernel virtual address to allocated memory region. | ||
36 | * @dma_addr: bus address(accessed by dma) to allocated memory region. | ||
37 | * - this address could be physical address without IOMMU and | ||
38 | * device address with IOMMU. | ||
39 | * @size: size of allocated memory region. | ||
40 | */ | ||
41 | struct exynos_drm_gem_buf { | ||
42 | void __iomem *kvaddr; | ||
43 | dma_addr_t dma_addr; | ||
44 | unsigned long size; | ||
45 | }; | ||
46 | |||
47 | /* | ||
33 | * exynos drm buffer structure. | 48 | * exynos drm buffer structure. |
34 | * | 49 | * |
35 | * @base: a gem object. | 50 | * @base: a gem object. |
36 | * - a new handle to this gem object would be created | 51 | * - a new handle to this gem object would be created |
37 | * by drm_gem_handle_create(). | 52 | * by drm_gem_handle_create(). |
38 | * @entry: pointer to exynos drm buffer entry object. | 53 | * @buffer: a pointer to exynos_drm_gem_buffer object. |
39 | * - containing the information to physically | 54 | * - contain the information to memory region allocated |
55 | * by user request or at framebuffer creation. | ||
40 | * continuous memory region allocated by user request | 56 | * continuous memory region allocated by user request |
41 | * or at framebuffer creation. | 57 | * or at framebuffer creation. |
42 | * | 58 | * |
@@ -45,13 +61,13 @@ | |||
45 | */ | 61 | */ |
46 | struct exynos_drm_gem_obj { | 62 | struct exynos_drm_gem_obj { |
47 | struct drm_gem_object base; | 63 | struct drm_gem_object base; |
48 | struct exynos_drm_buf_entry *entry; | 64 | struct exynos_drm_gem_buf *buffer; |
49 | }; | 65 | }; |
50 | 66 | ||
51 | /* create a new buffer and get a new gem handle. */ | 67 | /* create a new buffer and get a new gem handle. */ |
52 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, | 68 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, |
53 | struct drm_device *dev, unsigned int size, | 69 | struct drm_file *file_priv, |
54 | unsigned int *handle); | 70 | unsigned int *handle, unsigned long size); |
55 | 71 | ||
56 | /* | 72 | /* |
57 | * request gem object creation and buffer allocation as the size | 73 | * request gem object creation and buffer allocation as the size |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4f40f1ce1d8e..d09a6e02dc95 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
636 | struct drm_device *dev = node->minor->dev; | 636 | struct drm_device *dev = node->minor->dev; |
637 | drm_i915_private_t *dev_priv = dev->dev_private; | 637 | drm_i915_private_t *dev_priv = dev->dev_private; |
638 | struct intel_ring_buffer *ring; | 638 | struct intel_ring_buffer *ring; |
639 | int ret; | ||
639 | 640 | ||
640 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; | 641 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
641 | if (ring->size == 0) | 642 | if (ring->size == 0) |
642 | return 0; | 643 | return 0; |
643 | 644 | ||
645 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
646 | if (ret) | ||
647 | return ret; | ||
648 | |||
644 | seq_printf(m, "Ring %s:\n", ring->name); | 649 | seq_printf(m, "Ring %s:\n", ring->name); |
645 | seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); | 650 | seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); |
646 | seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); | 651 | seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); |
@@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
654 | seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); | 659 | seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); |
655 | seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); | 660 | seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); |
656 | 661 | ||
662 | mutex_unlock(&dev->struct_mutex); | ||
663 | |||
657 | return 0; | 664 | return 0; |
658 | } | 665 | } |
659 | 666 | ||
@@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused) | |||
842 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 849 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
843 | struct drm_device *dev = node->minor->dev; | 850 | struct drm_device *dev = node->minor->dev; |
844 | drm_i915_private_t *dev_priv = dev->dev_private; | 851 | drm_i915_private_t *dev_priv = dev->dev_private; |
845 | u16 crstanddelay = I915_READ16(CRSTANDVID); | 852 | u16 crstanddelay; |
853 | int ret; | ||
854 | |||
855 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
856 | if (ret) | ||
857 | return ret; | ||
858 | |||
859 | crstanddelay = I915_READ16(CRSTANDVID); | ||
860 | |||
861 | mutex_unlock(&dev->struct_mutex); | ||
846 | 862 | ||
847 | seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); | 863 | seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); |
848 | 864 | ||
@@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) | |||
940 | struct drm_device *dev = node->minor->dev; | 956 | struct drm_device *dev = node->minor->dev; |
941 | drm_i915_private_t *dev_priv = dev->dev_private; | 957 | drm_i915_private_t *dev_priv = dev->dev_private; |
942 | u32 delayfreq; | 958 | u32 delayfreq; |
943 | int i; | 959 | int ret, i; |
960 | |||
961 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
962 | if (ret) | ||
963 | return ret; | ||
944 | 964 | ||
945 | for (i = 0; i < 16; i++) { | 965 | for (i = 0; i < 16; i++) { |
946 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); | 966 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); |
@@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) | |||
948 | (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); | 968 | (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); |
949 | } | 969 | } |
950 | 970 | ||
971 | mutex_unlock(&dev->struct_mutex); | ||
972 | |||
951 | return 0; | 973 | return 0; |
952 | } | 974 | } |
953 | 975 | ||
@@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused) | |||
962 | struct drm_device *dev = node->minor->dev; | 984 | struct drm_device *dev = node->minor->dev; |
963 | drm_i915_private_t *dev_priv = dev->dev_private; | 985 | drm_i915_private_t *dev_priv = dev->dev_private; |
964 | u32 inttoext; | 986 | u32 inttoext; |
965 | int i; | 987 | int ret, i; |
988 | |||
989 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
990 | if (ret) | ||
991 | return ret; | ||
966 | 992 | ||
967 | for (i = 1; i <= 32; i++) { | 993 | for (i = 1; i <= 32; i++) { |
968 | inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); | 994 | inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); |
969 | seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); | 995 | seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); |
970 | } | 996 | } |
971 | 997 | ||
998 | mutex_unlock(&dev->struct_mutex); | ||
999 | |||
972 | return 0; | 1000 | return 0; |
973 | } | 1001 | } |
974 | 1002 | ||
@@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
977 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1005 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
978 | struct drm_device *dev = node->minor->dev; | 1006 | struct drm_device *dev = node->minor->dev; |
979 | drm_i915_private_t *dev_priv = dev->dev_private; | 1007 | drm_i915_private_t *dev_priv = dev->dev_private; |
980 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 1008 | u32 rgvmodectl, rstdbyctl; |
981 | u32 rstdbyctl = I915_READ(RSTDBYCTL); | 1009 | u16 crstandvid; |
982 | u16 crstandvid = I915_READ16(CRSTANDVID); | 1010 | int ret; |
1011 | |||
1012 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1013 | if (ret) | ||
1014 | return ret; | ||
1015 | |||
1016 | rgvmodectl = I915_READ(MEMMODECTL); | ||
1017 | rstdbyctl = I915_READ(RSTDBYCTL); | ||
1018 | crstandvid = I915_READ16(CRSTANDVID); | ||
1019 | |||
1020 | mutex_unlock(&dev->struct_mutex); | ||
983 | 1021 | ||
984 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 1022 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? |
985 | "yes" : "no"); | 1023 | "yes" : "no"); |
@@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused) | |||
1167 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1205 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1168 | struct drm_device *dev = node->minor->dev; | 1206 | struct drm_device *dev = node->minor->dev; |
1169 | drm_i915_private_t *dev_priv = dev->dev_private; | 1207 | drm_i915_private_t *dev_priv = dev->dev_private; |
1208 | int ret; | ||
1209 | |||
1210 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1211 | if (ret) | ||
1212 | return ret; | ||
1170 | 1213 | ||
1171 | seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); | 1214 | seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); |
1172 | 1215 | ||
1216 | mutex_unlock(&dev->struct_mutex); | ||
1217 | |||
1173 | return 0; | 1218 | return 0; |
1174 | } | 1219 | } |
1175 | 1220 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e9c2cfe45daa..15bfa9145d2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | |||
68 | MODULE_PARM_DESC(i915_enable_rc6, | 68 | MODULE_PARM_DESC(i915_enable_rc6, |
69 | "Enable power-saving render C-state 6 (default: true)"); | 69 | "Enable power-saving render C-state 6 (default: true)"); |
70 | 70 | ||
71 | unsigned int i915_enable_fbc __read_mostly = -1; | 71 | int i915_enable_fbc __read_mostly = -1; |
72 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 72 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
73 | MODULE_PARM_DESC(i915_enable_fbc, | 73 | MODULE_PARM_DESC(i915_enable_fbc, |
74 | "Enable frame buffer compression for power savings " | 74 | "Enable frame buffer compression for power savings " |
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock, | |||
80 | "Use panel (LVDS/eDP) downclocking for power savings " | 80 | "Use panel (LVDS/eDP) downclocking for power savings " |
81 | "(default: false)"); | 81 | "(default: false)"); |
82 | 82 | ||
83 | unsigned int i915_panel_use_ssc __read_mostly = -1; | 83 | int i915_panel_use_ssc __read_mostly = -1; |
84 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | 84 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
85 | MODULE_PARM_DESC(lvds_use_ssc, | 85 | MODULE_PARM_DESC(lvds_use_ssc, |
86 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " | 86 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " |
@@ -107,7 +107,7 @@ static struct drm_driver driver; | |||
107 | extern int intel_agp_enabled; | 107 | extern int intel_agp_enabled; |
108 | 108 | ||
109 | #define INTEL_VGA_DEVICE(id, info) { \ | 109 | #define INTEL_VGA_DEVICE(id, info) { \ |
110 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ | 110 | .class = PCI_BASE_CLASS_DISPLAY << 16, \ |
111 | .class_mask = 0xff0000, \ | 111 | .class_mask = 0xff0000, \ |
112 | .vendor = 0x8086, \ | 112 | .vendor = 0x8086, \ |
113 | .device = id, \ | 113 | .device = id, \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 06a37f4fd74b..4a9c1b979804 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -126,6 +126,9 @@ struct drm_i915_master_private { | |||
126 | struct _drm_i915_sarea *sarea_priv; | 126 | struct _drm_i915_sarea *sarea_priv; |
127 | }; | 127 | }; |
128 | #define I915_FENCE_REG_NONE -1 | 128 | #define I915_FENCE_REG_NONE -1 |
129 | #define I915_MAX_NUM_FENCES 16 | ||
130 | /* 16 fences + sign bit for FENCE_REG_NONE */ | ||
131 | #define I915_MAX_NUM_FENCE_BITS 5 | ||
129 | 132 | ||
130 | struct drm_i915_fence_reg { | 133 | struct drm_i915_fence_reg { |
131 | struct list_head lru_list; | 134 | struct list_head lru_list; |
@@ -168,7 +171,7 @@ struct drm_i915_error_state { | |||
168 | u32 instdone1; | 171 | u32 instdone1; |
169 | u32 seqno; | 172 | u32 seqno; |
170 | u64 bbaddr; | 173 | u64 bbaddr; |
171 | u64 fence[16]; | 174 | u64 fence[I915_MAX_NUM_FENCES]; |
172 | struct timeval time; | 175 | struct timeval time; |
173 | struct drm_i915_error_object { | 176 | struct drm_i915_error_object { |
174 | int page_count; | 177 | int page_count; |
@@ -182,7 +185,7 @@ struct drm_i915_error_state { | |||
182 | u32 gtt_offset; | 185 | u32 gtt_offset; |
183 | u32 read_domains; | 186 | u32 read_domains; |
184 | u32 write_domain; | 187 | u32 write_domain; |
185 | s32 fence_reg:5; | 188 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
186 | s32 pinned:2; | 189 | s32 pinned:2; |
187 | u32 tiling:2; | 190 | u32 tiling:2; |
188 | u32 dirty:1; | 191 | u32 dirty:1; |
@@ -375,7 +378,7 @@ typedef struct drm_i915_private { | |||
375 | struct notifier_block lid_notifier; | 378 | struct notifier_block lid_notifier; |
376 | 379 | ||
377 | int crt_ddc_pin; | 380 | int crt_ddc_pin; |
378 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 381 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
379 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 382 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
380 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | 383 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
381 | 384 | ||
@@ -506,7 +509,7 @@ typedef struct drm_i915_private { | |||
506 | u8 saveAR[21]; | 509 | u8 saveAR[21]; |
507 | u8 saveDACMASK; | 510 | u8 saveDACMASK; |
508 | u8 saveCR[37]; | 511 | u8 saveCR[37]; |
509 | uint64_t saveFENCE[16]; | 512 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
510 | u32 saveCURACNTR; | 513 | u32 saveCURACNTR; |
511 | u32 saveCURAPOS; | 514 | u32 saveCURAPOS; |
512 | u32 saveCURABASE; | 515 | u32 saveCURABASE; |
@@ -777,10 +780,8 @@ struct drm_i915_gem_object { | |||
777 | * Fence register bits (if any) for this object. Will be set | 780 | * Fence register bits (if any) for this object. Will be set |
778 | * as needed when mapped into the GTT. | 781 | * as needed when mapped into the GTT. |
779 | * Protected by dev->struct_mutex. | 782 | * Protected by dev->struct_mutex. |
780 | * | ||
781 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) | ||
782 | */ | 783 | */ |
783 | signed int fence_reg:5; | 784 | signed int fence_reg:I915_MAX_NUM_FENCE_BITS; |
784 | 785 | ||
785 | /** | 786 | /** |
786 | * Advice: are the backing pages purgeable? | 787 | * Advice: are the backing pages purgeable? |
@@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly; | |||
999 | extern unsigned int i915_powersave __read_mostly; | 1000 | extern unsigned int i915_powersave __read_mostly; |
1000 | extern unsigned int i915_semaphores __read_mostly; | 1001 | extern unsigned int i915_semaphores __read_mostly; |
1001 | extern unsigned int i915_lvds_downclock __read_mostly; | 1002 | extern unsigned int i915_lvds_downclock __read_mostly; |
1002 | extern unsigned int i915_panel_use_ssc __read_mostly; | 1003 | extern int i915_panel_use_ssc __read_mostly; |
1003 | extern int i915_vbt_sdvo_panel_type __read_mostly; | 1004 | extern int i915_vbt_sdvo_panel_type __read_mostly; |
1004 | extern unsigned int i915_enable_rc6 __read_mostly; | 1005 | extern unsigned int i915_enable_rc6 __read_mostly; |
1005 | extern unsigned int i915_enable_fbc __read_mostly; | 1006 | extern int i915_enable_fbc __read_mostly; |
1006 | extern bool i915_enable_hangcheck __read_mostly; | 1007 | extern bool i915_enable_hangcheck __read_mostly; |
1007 | 1008 | ||
1008 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1009 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d18b07adcffa..8359dc777041 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev) | |||
1745 | struct drm_i915_private *dev_priv = dev->dev_private; | 1745 | struct drm_i915_private *dev_priv = dev->dev_private; |
1746 | int i; | 1746 | int i; |
1747 | 1747 | ||
1748 | for (i = 0; i < 16; i++) { | 1748 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
1749 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 1749 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
1750 | struct drm_i915_gem_object *obj = reg->obj; | 1750 | struct drm_i915_gem_object *obj = reg->obj; |
1751 | 1751 | ||
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3512 | * so emit a request to do so. | 3512 | * so emit a request to do so. |
3513 | */ | 3513 | */ |
3514 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 3514 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
3515 | if (request) | 3515 | if (request) { |
3516 | ret = i915_add_request(obj->ring, NULL, request); | 3516 | ret = i915_add_request(obj->ring, NULL, request); |
3517 | else | 3517 | if (ret) |
3518 | kfree(request); | ||
3519 | } else | ||
3518 | ret = -ENOMEM; | 3520 | ret = -ENOMEM; |
3519 | } | 3521 | } |
3520 | 3522 | ||
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3613 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3615 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3614 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 3616 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3615 | 3617 | ||
3616 | if (IS_GEN6(dev)) { | 3618 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
3617 | /* On Gen6, we can have the GPU use the LLC (the CPU | 3619 | /* On Gen6, we can have the GPU use the LLC (the CPU |
3618 | * cache) for about a 10% performance improvement | 3620 | * cache) for about a 10% performance improvement |
3619 | * compared to uncached. Graphics requests other than | 3621 | * compared to uncached. Graphics requests other than |
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev) | |||
3877 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); | 3879 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
3878 | for (i = 0; i < I915_NUM_RINGS; i++) | 3880 | for (i = 0; i < I915_NUM_RINGS; i++) |
3879 | init_ring_lists(&dev_priv->ring[i]); | 3881 | init_ring_lists(&dev_priv->ring[i]); |
3880 | for (i = 0; i < 16; i++) | 3882 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
3881 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 3883 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
3882 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 3884 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
3883 | i915_gem_retire_work_handler); | 3885 | i915_gem_retire_work_handler); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9ee2729fe5c6..b40004b55977 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
824 | 824 | ||
825 | /* Fences */ | 825 | /* Fences */ |
826 | switch (INTEL_INFO(dev)->gen) { | 826 | switch (INTEL_INFO(dev)->gen) { |
827 | case 7: | ||
827 | case 6: | 828 | case 6: |
828 | for (i = 0; i < 16; i++) | 829 | for (i = 0; i < 16; i++) |
829 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 830 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a09416e611f..b080cc824001 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1553,12 +1553,21 @@ | |||
1553 | */ | 1553 | */ |
1554 | #define PP_READY (1 << 30) | 1554 | #define PP_READY (1 << 30) |
1555 | #define PP_SEQUENCE_NONE (0 << 28) | 1555 | #define PP_SEQUENCE_NONE (0 << 28) |
1556 | #define PP_SEQUENCE_ON (1 << 28) | 1556 | #define PP_SEQUENCE_POWER_UP (1 << 28) |
1557 | #define PP_SEQUENCE_OFF (2 << 28) | 1557 | #define PP_SEQUENCE_POWER_DOWN (2 << 28) |
1558 | #define PP_SEQUENCE_MASK 0x30000000 | 1558 | #define PP_SEQUENCE_MASK (3 << 28) |
1559 | #define PP_SEQUENCE_SHIFT 28 | ||
1559 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) | 1560 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) |
1560 | #define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) | ||
1561 | #define PP_SEQUENCE_STATE_MASK 0x0000000f | 1561 | #define PP_SEQUENCE_STATE_MASK 0x0000000f |
1562 | #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) | ||
1563 | #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) | ||
1564 | #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) | ||
1565 | #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) | ||
1566 | #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) | ||
1567 | #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) | ||
1568 | #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) | ||
1569 | #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) | ||
1570 | #define PP_SEQUENCE_STATE_RESET (0xf << 0) | ||
1562 | #define PP_CONTROL 0x61204 | 1571 | #define PP_CONTROL 0x61204 |
1563 | #define POWER_TARGET_ON (1 << 0) | 1572 | #define POWER_TARGET_ON (1 << 0) |
1564 | #define PP_ON_DELAYS 0x61208 | 1573 | #define PP_ON_DELAYS 0x61208 |
@@ -3444,6 +3453,10 @@ | |||
3444 | #define GT_FIFO_FREE_ENTRIES 0x120008 | 3453 | #define GT_FIFO_FREE_ENTRIES 0x120008 |
3445 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | 3454 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 |
3446 | 3455 | ||
3456 | #define GEN6_UCGCTL2 0x9404 | ||
3457 | # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) | ||
3458 | # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) | ||
3459 | |||
3447 | #define GEN6_RPNSWREQ 0xA008 | 3460 | #define GEN6_RPNSWREQ 0xA008 |
3448 | #define GEN6_TURBO_DISABLE (1<<31) | 3461 | #define GEN6_TURBO_DISABLE (1<<31) |
3449 | #define GEN6_FREQUENCY(x) ((x)<<25) | 3462 | #define GEN6_FREQUENCY(x) ((x)<<25) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f8f602d76650..7886e4fb60e3 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
370 | 370 | ||
371 | /* Fences */ | 371 | /* Fences */ |
372 | switch (INTEL_INFO(dev)->gen) { | 372 | switch (INTEL_INFO(dev)->gen) { |
373 | case 7: | ||
373 | case 6: | 374 | case 6: |
374 | for (i = 0; i < 16; i++) | 375 | for (i = 0; i < 16; i++) |
375 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 376 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
404 | 405 | ||
405 | /* Fences */ | 406 | /* Fences */ |
406 | switch (INTEL_INFO(dev)->gen) { | 407 | switch (INTEL_INFO(dev)->gen) { |
408 | case 7: | ||
407 | case 6: | 409 | case 6: |
408 | for (i = 0; i < 16; i++) | 410 | for (i = 0; i < 16; i++) |
409 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | 411 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 981b1f1c04d8..e77a863a3833 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2933 | 2933 | ||
2934 | /* For PCH DP, enable TRANS_DP_CTL */ | 2934 | /* For PCH DP, enable TRANS_DP_CTL */ |
2935 | if (HAS_PCH_CPT(dev) && | 2935 | if (HAS_PCH_CPT(dev) && |
2936 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2936 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
2937 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { | ||
2937 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; | 2938 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; |
2938 | reg = TRANS_DP_CTL(pipe); | 2939 | reg = TRANS_DP_CTL(pipe); |
2939 | temp = I915_READ(reg); | 2940 | temp = I915_READ(reg); |
@@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4711 | lvds_bpc = 6; | 4712 | lvds_bpc = 6; |
4712 | 4713 | ||
4713 | if (lvds_bpc < display_bpc) { | 4714 | if (lvds_bpc < display_bpc) { |
4714 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); | 4715 | DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); |
4715 | display_bpc = lvds_bpc; | 4716 | display_bpc = lvds_bpc; |
4716 | } | 4717 | } |
4717 | continue; | 4718 | continue; |
@@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4722 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; | 4723 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; |
4723 | 4724 | ||
4724 | if (edp_bpc < display_bpc) { | 4725 | if (edp_bpc < display_bpc) { |
4725 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); | 4726 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
4726 | display_bpc = edp_bpc; | 4727 | display_bpc = edp_bpc; |
4727 | } | 4728 | } |
4728 | continue; | 4729 | continue; |
@@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4737 | /* Don't use an invalid EDID bpc value */ | 4738 | /* Don't use an invalid EDID bpc value */ |
4738 | if (connector->display_info.bpc && | 4739 | if (connector->display_info.bpc && |
4739 | connector->display_info.bpc < display_bpc) { | 4740 | connector->display_info.bpc < display_bpc) { |
4740 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); | 4741 | DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
4741 | display_bpc = connector->display_info.bpc; | 4742 | display_bpc = connector->display_info.bpc; |
4742 | } | 4743 | } |
4743 | } | 4744 | } |
@@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4748 | */ | 4749 | */ |
4749 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { | 4750 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
4750 | if (display_bpc > 8 && display_bpc < 12) { | 4751 | if (display_bpc > 8 && display_bpc < 12) { |
4751 | DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); | 4752 | DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); |
4752 | display_bpc = 12; | 4753 | display_bpc = 12; |
4753 | } else { | 4754 | } else { |
4754 | DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); | 4755 | DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); |
4755 | display_bpc = 8; | 4756 | display_bpc = 8; |
4756 | } | 4757 | } |
4757 | } | 4758 | } |
@@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4789 | 4790 | ||
4790 | display_bpc = min(display_bpc, bpc); | 4791 | display_bpc = min(display_bpc, bpc); |
4791 | 4792 | ||
4792 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", | 4793 | DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", |
4793 | bpc, display_bpc); | 4794 | bpc, display_bpc); |
4794 | 4795 | ||
4795 | *pipe_bpp = display_bpc * 3; | 4796 | *pipe_bpp = display_bpc * 3; |
4796 | 4797 | ||
@@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5671 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; | 5672 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; |
5672 | if ((is_lvds && dev_priv->lvds_dither) || dither) { | 5673 | if ((is_lvds && dev_priv->lvds_dither) || dither) { |
5673 | pipeconf |= PIPECONF_DITHER_EN; | 5674 | pipeconf |= PIPECONF_DITHER_EN; |
5674 | pipeconf |= PIPECONF_DITHER_TYPE_ST1; | 5675 | pipeconf |= PIPECONF_DITHER_TYPE_SP; |
5675 | } | 5676 | } |
5676 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5677 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5677 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 5678 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
@@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
8148 | I915_WRITE(WM2_LP_ILK, 0); | 8149 | I915_WRITE(WM2_LP_ILK, 0); |
8149 | I915_WRITE(WM1_LP_ILK, 0); | 8150 | I915_WRITE(WM1_LP_ILK, 0); |
8150 | 8151 | ||
8152 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock | ||
8153 | * gating disable must be set. Failure to set it results in | ||
8154 | * flickering pixels due to Z write ordering failures after | ||
8155 | * some amount of runtime in the Mesa "fire" demo, and Unigine | ||
8156 | * Sanctuary and Tropics, and apparently anything else with | ||
8157 | * alpha test or pixel discard. | ||
8158 | * | ||
8159 | * According to the spec, bit 11 (RCCUNIT) must also be set, | ||
8160 | * but we didn't debug actual testcases to find it out. | ||
8161 | */ | ||
8162 | I915_WRITE(GEN6_UCGCTL2, | ||
8163 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | ||
8164 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | ||
8165 | |||
8151 | /* | 8166 | /* |
8152 | * According to the spec the following bits should be | 8167 | * According to the spec the following bits should be |
8153 | * set in order to enable memory self-refresh and fbc: | 8168 | * set in order to enable memory self-refresh and fbc: |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09b318b0227f..4d0358fad937 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -59,7 +59,6 @@ struct intel_dp { | |||
59 | struct i2c_algo_dp_aux_data algo; | 59 | struct i2c_algo_dp_aux_data algo; |
60 | bool is_pch_edp; | 60 | bool is_pch_edp; |
61 | uint8_t train_set[4]; | 61 | uint8_t train_set[4]; |
62 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
63 | int panel_power_up_delay; | 62 | int panel_power_up_delay; |
64 | int panel_power_down_delay; | 63 | int panel_power_down_delay; |
65 | int panel_power_cycle_delay; | 64 | int panel_power_cycle_delay; |
@@ -68,7 +67,6 @@ struct intel_dp { | |||
68 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | 67 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ |
69 | struct delayed_work panel_vdd_work; | 68 | struct delayed_work panel_vdd_work; |
70 | bool want_panel_vdd; | 69 | bool want_panel_vdd; |
71 | unsigned long panel_off_jiffies; | ||
72 | }; | 70 | }; |
73 | 71 | ||
74 | /** | 72 | /** |
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder, | |||
157 | static int | 155 | static int |
158 | intel_dp_max_lane_count(struct intel_dp *intel_dp) | 156 | intel_dp_max_lane_count(struct intel_dp *intel_dp) |
159 | { | 157 | { |
160 | int max_lane_count = 4; | 158 | int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; |
161 | 159 | switch (max_lane_count) { | |
162 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { | 160 | case 1: case 2: case 4: |
163 | max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; | 161 | break; |
164 | switch (max_lane_count) { | 162 | default: |
165 | case 1: case 2: case 4: | 163 | max_lane_count = 4; |
166 | break; | ||
167 | default: | ||
168 | max_lane_count = 4; | ||
169 | } | ||
170 | } | 164 | } |
171 | return max_lane_count; | 165 | return max_lane_count; |
172 | } | 166 | } |
@@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
768 | continue; | 762 | continue; |
769 | 763 | ||
770 | intel_dp = enc_to_intel_dp(encoder); | 764 | intel_dp = enc_to_intel_dp(encoder); |
771 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { | 765 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || |
766 | intel_dp->base.type == INTEL_OUTPUT_EDP) | ||
767 | { | ||
772 | lane_count = intel_dp->lane_count; | 768 | lane_count = intel_dp->lane_count; |
773 | break; | 769 | break; |
774 | } else if (is_edp(intel_dp)) { | ||
775 | lane_count = dev_priv->edp.lanes; | ||
776 | break; | ||
777 | } | 770 | } |
778 | } | 771 | } |
779 | 772 | ||
@@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
810 | struct drm_display_mode *adjusted_mode) | 803 | struct drm_display_mode *adjusted_mode) |
811 | { | 804 | { |
812 | struct drm_device *dev = encoder->dev; | 805 | struct drm_device *dev = encoder->dev; |
806 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
813 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 807 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
814 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 808 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
815 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 809 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
822 | ironlake_edp_pll_off(encoder); | 816 | ironlake_edp_pll_off(encoder); |
823 | } | 817 | } |
824 | 818 | ||
825 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | 819 | /* |
826 | intel_dp->DP |= intel_dp->color_range; | 820 | * There are three kinds of DP registers: |
821 | * | ||
822 | * IBX PCH | ||
823 | * CPU | ||
824 | * CPT PCH | ||
825 | * | ||
826 | * IBX PCH and CPU are the same for almost everything, | ||
827 | * except that the CPU DP PLL is configured in this | ||
828 | * register | ||
829 | * | ||
830 | * CPT PCH is quite different, having many bits moved | ||
831 | * to the TRANS_DP_CTL register instead. That | ||
832 | * configuration happens (oddly) in ironlake_pch_enable | ||
833 | */ | ||
827 | 834 | ||
828 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 835 | /* Preserve the BIOS-computed detected bit. This is |
829 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 836 | * supposed to be read-only. |
830 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 837 | */ |
831 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 838 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; |
839 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | ||
832 | 840 | ||
833 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) | 841 | /* Handle DP bits in common between all three register formats */ |
834 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 842 | |
835 | else | 843 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
836 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | ||
837 | 844 | ||
838 | switch (intel_dp->lane_count) { | 845 | switch (intel_dp->lane_count) { |
839 | case 1: | 846 | case 1: |
@@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
852 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; | 859 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
853 | intel_write_eld(encoder, adjusted_mode); | 860 | intel_write_eld(encoder, adjusted_mode); |
854 | } | 861 | } |
855 | |||
856 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 862 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
857 | intel_dp->link_configuration[0] = intel_dp->link_bw; | 863 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
858 | intel_dp->link_configuration[1] = intel_dp->lane_count; | 864 | intel_dp->link_configuration[1] = intel_dp->lane_count; |
859 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; | 865 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; |
860 | |||
861 | /* | 866 | /* |
862 | * Check for DPCD version > 1.1 and enhanced framing support | 867 | * Check for DPCD version > 1.1 and enhanced framing support |
863 | */ | 868 | */ |
864 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 869 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
865 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | 870 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { |
866 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 871 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
867 | intel_dp->DP |= DP_ENHANCED_FRAMING; | ||
868 | } | 872 | } |
869 | 873 | ||
870 | /* CPT DP's pipe select is decided in TRANS_DP_CTL */ | 874 | /* Split out the IBX/CPU vs CPT settings */ |
871 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | ||
872 | intel_dp->DP |= DP_PIPEB_SELECT; | ||
873 | 875 | ||
874 | if (is_cpu_edp(intel_dp)) { | 876 | if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
875 | /* don't miss out required setting for eDP */ | 877 | intel_dp->DP |= intel_dp->color_range; |
876 | intel_dp->DP |= DP_PLL_ENABLE; | 878 | |
877 | if (adjusted_mode->clock < 200000) | 879 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
878 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; | 880 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
879 | else | 881 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
880 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; | 882 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
883 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | ||
884 | |||
885 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) | ||
886 | intel_dp->DP |= DP_ENHANCED_FRAMING; | ||
887 | |||
888 | if (intel_crtc->pipe == 1) | ||
889 | intel_dp->DP |= DP_PIPEB_SELECT; | ||
890 | |||
891 | if (is_cpu_edp(intel_dp)) { | ||
892 | /* don't miss out required setting for eDP */ | ||
893 | intel_dp->DP |= DP_PLL_ENABLE; | ||
894 | if (adjusted_mode->clock < 200000) | ||
895 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; | ||
896 | else | ||
897 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; | ||
898 | } | ||
899 | } else { | ||
900 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | ||
881 | } | 901 | } |
882 | } | 902 | } |
883 | 903 | ||
884 | static void ironlake_wait_panel_off(struct intel_dp *intel_dp) | 904 | #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
905 | #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) | ||
906 | |||
907 | #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) | ||
908 | #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) | ||
909 | |||
910 | #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) | ||
911 | #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) | ||
912 | |||
913 | static void ironlake_wait_panel_status(struct intel_dp *intel_dp, | ||
914 | u32 mask, | ||
915 | u32 value) | ||
885 | { | 916 | { |
886 | unsigned long off_time; | 917 | struct drm_device *dev = intel_dp->base.base.dev; |
887 | unsigned long delay; | 918 | struct drm_i915_private *dev_priv = dev->dev_private; |
888 | 919 | ||
889 | DRM_DEBUG_KMS("Wait for panel power off time\n"); | 920 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", |
921 | mask, value, | ||
922 | I915_READ(PCH_PP_STATUS), | ||
923 | I915_READ(PCH_PP_CONTROL)); | ||
890 | 924 | ||
891 | if (ironlake_edp_have_panel_power(intel_dp) || | 925 | if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { |
892 | ironlake_edp_have_panel_vdd(intel_dp)) | 926 | DRM_ERROR("Panel status timeout: status %08x control %08x\n", |
893 | { | 927 | I915_READ(PCH_PP_STATUS), |
894 | DRM_DEBUG_KMS("Panel still on, no delay needed\n"); | 928 | I915_READ(PCH_PP_CONTROL)); |
895 | return; | ||
896 | } | 929 | } |
930 | } | ||
897 | 931 | ||
898 | off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); | 932 | static void ironlake_wait_panel_on(struct intel_dp *intel_dp) |
899 | if (time_after(jiffies, off_time)) { | 933 | { |
900 | DRM_DEBUG_KMS("Time already passed"); | 934 | DRM_DEBUG_KMS("Wait for panel power on\n"); |
901 | return; | 935 | ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); |
902 | } | 936 | } |
903 | delay = jiffies_to_msecs(off_time - jiffies); | 937 | |
904 | if (delay > intel_dp->panel_power_down_delay) | 938 | static void ironlake_wait_panel_off(struct intel_dp *intel_dp) |
905 | delay = intel_dp->panel_power_down_delay; | 939 | { |
906 | DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); | 940 | DRM_DEBUG_KMS("Wait for panel power off time\n"); |
907 | msleep(delay); | 941 | ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); |
942 | } | ||
943 | |||
944 | static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) | ||
945 | { | ||
946 | DRM_DEBUG_KMS("Wait for panel power cycle\n"); | ||
947 | ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); | ||
948 | } | ||
949 | |||
950 | |||
951 | /* Read the current pp_control value, unlocking the register if it | ||
952 | * is locked | ||
953 | */ | ||
954 | |||
955 | static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) | ||
956 | { | ||
957 | u32 control = I915_READ(PCH_PP_CONTROL); | ||
958 | |||
959 | control &= ~PANEL_UNLOCK_MASK; | ||
960 | control |= PANEL_UNLOCK_REGS; | ||
961 | return control; | ||
908 | } | 962 | } |
909 | 963 | ||
910 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | 964 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
@@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
921 | "eDP VDD already requested on\n"); | 975 | "eDP VDD already requested on\n"); |
922 | 976 | ||
923 | intel_dp->want_panel_vdd = true; | 977 | intel_dp->want_panel_vdd = true; |
978 | |||
924 | if (ironlake_edp_have_panel_vdd(intel_dp)) { | 979 | if (ironlake_edp_have_panel_vdd(intel_dp)) { |
925 | DRM_DEBUG_KMS("eDP VDD already on\n"); | 980 | DRM_DEBUG_KMS("eDP VDD already on\n"); |
926 | return; | 981 | return; |
927 | } | 982 | } |
928 | 983 | ||
929 | ironlake_wait_panel_off(intel_dp); | 984 | if (!ironlake_edp_have_panel_power(intel_dp)) |
930 | pp = I915_READ(PCH_PP_CONTROL); | 985 | ironlake_wait_panel_power_cycle(intel_dp); |
931 | pp &= ~PANEL_UNLOCK_MASK; | 986 | |
932 | pp |= PANEL_UNLOCK_REGS; | 987 | pp = ironlake_get_pp_control(dev_priv); |
933 | pp |= EDP_FORCE_VDD; | 988 | pp |= EDP_FORCE_VDD; |
934 | I915_WRITE(PCH_PP_CONTROL, pp); | 989 | I915_WRITE(PCH_PP_CONTROL, pp); |
935 | POSTING_READ(PCH_PP_CONTROL); | 990 | POSTING_READ(PCH_PP_CONTROL); |
@@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) | |||
952 | u32 pp; | 1007 | u32 pp; |
953 | 1008 | ||
954 | if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { | 1009 | if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { |
955 | pp = I915_READ(PCH_PP_CONTROL); | 1010 | pp = ironlake_get_pp_control(dev_priv); |
956 | pp &= ~PANEL_UNLOCK_MASK; | ||
957 | pp |= PANEL_UNLOCK_REGS; | ||
958 | pp &= ~EDP_FORCE_VDD; | 1011 | pp &= ~EDP_FORCE_VDD; |
959 | I915_WRITE(PCH_PP_CONTROL, pp); | 1012 | I915_WRITE(PCH_PP_CONTROL, pp); |
960 | POSTING_READ(PCH_PP_CONTROL); | 1013 | POSTING_READ(PCH_PP_CONTROL); |
@@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) | |||
962 | /* Make sure sequencer is idle before allowing subsequent activity */ | 1015 | /* Make sure sequencer is idle before allowing subsequent activity */ |
963 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", | 1016 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", |
964 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); | 1017 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); |
965 | intel_dp->panel_off_jiffies = jiffies; | 1018 | |
1019 | msleep(intel_dp->panel_power_down_delay); | ||
966 | } | 1020 | } |
967 | } | 1021 | } |
968 | 1022 | ||
@@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work) | |||
972 | struct intel_dp, panel_vdd_work); | 1026 | struct intel_dp, panel_vdd_work); |
973 | struct drm_device *dev = intel_dp->base.base.dev; | 1027 | struct drm_device *dev = intel_dp->base.base.dev; |
974 | 1028 | ||
975 | mutex_lock(&dev->struct_mutex); | 1029 | mutex_lock(&dev->mode_config.mutex); |
976 | ironlake_panel_vdd_off_sync(intel_dp); | 1030 | ironlake_panel_vdd_off_sync(intel_dp); |
977 | mutex_unlock(&dev->struct_mutex); | 1031 | mutex_unlock(&dev->mode_config.mutex); |
978 | } | 1032 | } |
979 | 1033 | ||
980 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | 1034 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
@@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | |||
984 | 1038 | ||
985 | DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); | 1039 | DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); |
986 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); | 1040 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); |
987 | 1041 | ||
988 | intel_dp->want_panel_vdd = false; | 1042 | intel_dp->want_panel_vdd = false; |
989 | 1043 | ||
990 | if (sync) { | 1044 | if (sync) { |
@@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | |||
1000 | } | 1054 | } |
1001 | } | 1055 | } |
1002 | 1056 | ||
1003 | /* Returns true if the panel was already on when called */ | ||
1004 | static void ironlake_edp_panel_on(struct intel_dp *intel_dp) | 1057 | static void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
1005 | { | 1058 | { |
1006 | struct drm_device *dev = intel_dp->base.base.dev; | 1059 | struct drm_device *dev = intel_dp->base.base.dev; |
1007 | struct drm_i915_private *dev_priv = dev->dev_private; | 1060 | struct drm_i915_private *dev_priv = dev->dev_private; |
1008 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; | 1061 | u32 pp; |
1009 | 1062 | ||
1010 | if (!is_edp(intel_dp)) | 1063 | if (!is_edp(intel_dp)) |
1011 | return; | 1064 | return; |
1012 | if (ironlake_edp_have_panel_power(intel_dp)) | 1065 | |
1066 | DRM_DEBUG_KMS("Turn eDP power on\n"); | ||
1067 | |||
1068 | if (ironlake_edp_have_panel_power(intel_dp)) { | ||
1069 | DRM_DEBUG_KMS("eDP power already on\n"); | ||
1013 | return; | 1070 | return; |
1071 | } | ||
1014 | 1072 | ||
1015 | ironlake_wait_panel_off(intel_dp); | 1073 | ironlake_wait_panel_power_cycle(intel_dp); |
1016 | pp = I915_READ(PCH_PP_CONTROL); | ||
1017 | pp &= ~PANEL_UNLOCK_MASK; | ||
1018 | pp |= PANEL_UNLOCK_REGS; | ||
1019 | 1074 | ||
1075 | pp = ironlake_get_pp_control(dev_priv); | ||
1020 | if (IS_GEN5(dev)) { | 1076 | if (IS_GEN5(dev)) { |
1021 | /* ILK workaround: disable reset around power sequence */ | 1077 | /* ILK workaround: disable reset around power sequence */ |
1022 | pp &= ~PANEL_POWER_RESET; | 1078 | pp &= ~PANEL_POWER_RESET; |
@@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) | |||
1025 | } | 1081 | } |
1026 | 1082 | ||
1027 | pp |= POWER_TARGET_ON; | 1083 | pp |= POWER_TARGET_ON; |
1084 | if (!IS_GEN5(dev)) | ||
1085 | pp |= PANEL_POWER_RESET; | ||
1086 | |||
1028 | I915_WRITE(PCH_PP_CONTROL, pp); | 1087 | I915_WRITE(PCH_PP_CONTROL, pp); |
1029 | POSTING_READ(PCH_PP_CONTROL); | 1088 | POSTING_READ(PCH_PP_CONTROL); |
1030 | 1089 | ||
1031 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, | 1090 | ironlake_wait_panel_on(intel_dp); |
1032 | 5000)) | ||
1033 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | ||
1034 | I915_READ(PCH_PP_STATUS)); | ||
1035 | 1091 | ||
1036 | if (IS_GEN5(dev)) { | 1092 | if (IS_GEN5(dev)) { |
1037 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1093 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
@@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) | |||
1040 | } | 1096 | } |
1041 | } | 1097 | } |
1042 | 1098 | ||
1043 | static void ironlake_edp_panel_off(struct drm_encoder *encoder) | 1099 | static void ironlake_edp_panel_off(struct intel_dp *intel_dp) |
1044 | { | 1100 | { |
1045 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1101 | struct drm_device *dev = intel_dp->base.base.dev; |
1046 | struct drm_device *dev = encoder->dev; | ||
1047 | struct drm_i915_private *dev_priv = dev->dev_private; | 1102 | struct drm_i915_private *dev_priv = dev->dev_private; |
1048 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | | 1103 | u32 pp; |
1049 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; | ||
1050 | 1104 | ||
1051 | if (!is_edp(intel_dp)) | 1105 | if (!is_edp(intel_dp)) |
1052 | return; | 1106 | return; |
1053 | pp = I915_READ(PCH_PP_CONTROL); | ||
1054 | pp &= ~PANEL_UNLOCK_MASK; | ||
1055 | pp |= PANEL_UNLOCK_REGS; | ||
1056 | 1107 | ||
1057 | if (IS_GEN5(dev)) { | 1108 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
1058 | /* ILK workaround: disable reset around power sequence */ | ||
1059 | pp &= ~PANEL_POWER_RESET; | ||
1060 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1061 | POSTING_READ(PCH_PP_CONTROL); | ||
1062 | } | ||
1063 | 1109 | ||
1064 | intel_dp->panel_off_jiffies = jiffies; | 1110 | WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); |
1065 | 1111 | ||
1066 | if (IS_GEN5(dev)) { | 1112 | pp = ironlake_get_pp_control(dev_priv); |
1067 | pp &= ~POWER_TARGET_ON; | 1113 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
1068 | I915_WRITE(PCH_PP_CONTROL, pp); | 1114 | I915_WRITE(PCH_PP_CONTROL, pp); |
1069 | POSTING_READ(PCH_PP_CONTROL); | 1115 | POSTING_READ(PCH_PP_CONTROL); |
1070 | pp &= ~POWER_TARGET_ON; | ||
1071 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1072 | POSTING_READ(PCH_PP_CONTROL); | ||
1073 | msleep(intel_dp->panel_power_cycle_delay); | ||
1074 | |||
1075 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) | ||
1076 | DRM_ERROR("panel off wait timed out: 0x%08x\n", | ||
1077 | I915_READ(PCH_PP_STATUS)); | ||
1078 | 1116 | ||
1079 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1117 | ironlake_wait_panel_off(intel_dp); |
1080 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1081 | POSTING_READ(PCH_PP_CONTROL); | ||
1082 | } | ||
1083 | } | 1118 | } |
1084 | 1119 | ||
1085 | static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | 1120 | static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
@@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | |||
1099 | * allowing it to appear. | 1134 | * allowing it to appear. |
1100 | */ | 1135 | */ |
1101 | msleep(intel_dp->backlight_on_delay); | 1136 | msleep(intel_dp->backlight_on_delay); |
1102 | pp = I915_READ(PCH_PP_CONTROL); | 1137 | pp = ironlake_get_pp_control(dev_priv); |
1103 | pp &= ~PANEL_UNLOCK_MASK; | ||
1104 | pp |= PANEL_UNLOCK_REGS; | ||
1105 | pp |= EDP_BLC_ENABLE; | 1138 | pp |= EDP_BLC_ENABLE; |
1106 | I915_WRITE(PCH_PP_CONTROL, pp); | 1139 | I915_WRITE(PCH_PP_CONTROL, pp); |
1107 | POSTING_READ(PCH_PP_CONTROL); | 1140 | POSTING_READ(PCH_PP_CONTROL); |
@@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) | |||
1117 | return; | 1150 | return; |
1118 | 1151 | ||
1119 | DRM_DEBUG_KMS("\n"); | 1152 | DRM_DEBUG_KMS("\n"); |
1120 | pp = I915_READ(PCH_PP_CONTROL); | 1153 | pp = ironlake_get_pp_control(dev_priv); |
1121 | pp &= ~PANEL_UNLOCK_MASK; | ||
1122 | pp |= PANEL_UNLOCK_REGS; | ||
1123 | pp &= ~EDP_BLC_ENABLE; | 1154 | pp &= ~EDP_BLC_ENABLE; |
1124 | I915_WRITE(PCH_PP_CONTROL, pp); | 1155 | I915_WRITE(PCH_PP_CONTROL, pp); |
1125 | POSTING_READ(PCH_PP_CONTROL); | 1156 | POSTING_READ(PCH_PP_CONTROL); |
@@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder) | |||
1187 | { | 1218 | { |
1188 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1219 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1189 | 1220 | ||
1221 | ironlake_edp_backlight_off(intel_dp); | ||
1222 | ironlake_edp_panel_off(intel_dp); | ||
1223 | |||
1190 | /* Wake up the sink first */ | 1224 | /* Wake up the sink first */ |
1191 | ironlake_edp_panel_vdd_on(intel_dp); | 1225 | ironlake_edp_panel_vdd_on(intel_dp); |
1192 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 1226 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1227 | intel_dp_link_down(intel_dp); | ||
1193 | ironlake_edp_panel_vdd_off(intel_dp, false); | 1228 | ironlake_edp_panel_vdd_off(intel_dp, false); |
1194 | 1229 | ||
1195 | /* Make sure the panel is off before trying to | 1230 | /* Make sure the panel is off before trying to |
1196 | * change the mode | 1231 | * change the mode |
1197 | */ | 1232 | */ |
1198 | ironlake_edp_backlight_off(intel_dp); | ||
1199 | intel_dp_link_down(intel_dp); | ||
1200 | ironlake_edp_panel_off(encoder); | ||
1201 | } | 1233 | } |
1202 | 1234 | ||
1203 | static void intel_dp_commit(struct drm_encoder *encoder) | 1235 | static void intel_dp_commit(struct drm_encoder *encoder) |
@@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder) | |||
1211 | intel_dp_start_link_train(intel_dp); | 1243 | intel_dp_start_link_train(intel_dp); |
1212 | ironlake_edp_panel_on(intel_dp); | 1244 | ironlake_edp_panel_on(intel_dp); |
1213 | ironlake_edp_panel_vdd_off(intel_dp, true); | 1245 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1214 | |||
1215 | intel_dp_complete_link_train(intel_dp); | 1246 | intel_dp_complete_link_train(intel_dp); |
1216 | ironlake_edp_backlight_on(intel_dp); | 1247 | ironlake_edp_backlight_on(intel_dp); |
1217 | 1248 | ||
@@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1230 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 1261 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
1231 | 1262 | ||
1232 | if (mode != DRM_MODE_DPMS_ON) { | 1263 | if (mode != DRM_MODE_DPMS_ON) { |
1264 | ironlake_edp_backlight_off(intel_dp); | ||
1265 | ironlake_edp_panel_off(intel_dp); | ||
1266 | |||
1233 | ironlake_edp_panel_vdd_on(intel_dp); | 1267 | ironlake_edp_panel_vdd_on(intel_dp); |
1234 | if (is_edp(intel_dp)) | ||
1235 | ironlake_edp_backlight_off(intel_dp); | ||
1236 | intel_dp_sink_dpms(intel_dp, mode); | 1268 | intel_dp_sink_dpms(intel_dp, mode); |
1237 | intel_dp_link_down(intel_dp); | 1269 | intel_dp_link_down(intel_dp); |
1238 | ironlake_edp_panel_off(encoder); | ||
1239 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) | ||
1240 | ironlake_edp_pll_off(encoder); | ||
1241 | ironlake_edp_panel_vdd_off(intel_dp, false); | 1270 | ironlake_edp_panel_vdd_off(intel_dp, false); |
1271 | |||
1272 | if (is_cpu_edp(intel_dp)) | ||
1273 | ironlake_edp_pll_off(encoder); | ||
1242 | } else { | 1274 | } else { |
1275 | if (is_cpu_edp(intel_dp)) | ||
1276 | ironlake_edp_pll_on(encoder); | ||
1277 | |||
1243 | ironlake_edp_panel_vdd_on(intel_dp); | 1278 | ironlake_edp_panel_vdd_on(intel_dp); |
1244 | intel_dp_sink_dpms(intel_dp, mode); | 1279 | intel_dp_sink_dpms(intel_dp, mode); |
1245 | if (!(dp_reg & DP_PORT_EN)) { | 1280 | if (!(dp_reg & DP_PORT_EN)) { |
@@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1247 | ironlake_edp_panel_on(intel_dp); | 1282 | ironlake_edp_panel_on(intel_dp); |
1248 | ironlake_edp_panel_vdd_off(intel_dp, true); | 1283 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1249 | intel_dp_complete_link_train(intel_dp); | 1284 | intel_dp_complete_link_train(intel_dp); |
1250 | ironlake_edp_backlight_on(intel_dp); | ||
1251 | } else | 1285 | } else |
1252 | ironlake_edp_panel_vdd_off(intel_dp, false); | 1286 | ironlake_edp_panel_vdd_off(intel_dp, false); |
1253 | ironlake_edp_backlight_on(intel_dp); | 1287 | ironlake_edp_backlight_on(intel_dp); |
@@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | |||
1285 | * link status information | 1319 | * link status information |
1286 | */ | 1320 | */ |
1287 | static bool | 1321 | static bool |
1288 | intel_dp_get_link_status(struct intel_dp *intel_dp) | 1322 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1289 | { | 1323 | { |
1290 | return intel_dp_aux_native_read_retry(intel_dp, | 1324 | return intel_dp_aux_native_read_retry(intel_dp, |
1291 | DP_LANE0_1_STATUS, | 1325 | DP_LANE0_1_STATUS, |
1292 | intel_dp->link_status, | 1326 | link_status, |
1293 | DP_LINK_STATUS_SIZE); | 1327 | DP_LINK_STATUS_SIZE); |
1294 | } | 1328 | } |
1295 | 1329 | ||
@@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | |||
1301 | } | 1335 | } |
1302 | 1336 | ||
1303 | static uint8_t | 1337 | static uint8_t |
1304 | intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], | 1338 | intel_get_adjust_request_voltage(uint8_t adjust_request[2], |
1305 | int lane) | 1339 | int lane) |
1306 | { | 1340 | { |
1307 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
1308 | int s = ((lane & 1) ? | 1341 | int s = ((lane & 1) ? |
1309 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | 1342 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : |
1310 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | 1343 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); |
1311 | uint8_t l = intel_dp_link_status(link_status, i); | 1344 | uint8_t l = adjust_request[lane>>1]; |
1312 | 1345 | ||
1313 | return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | 1346 | return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; |
1314 | } | 1347 | } |
1315 | 1348 | ||
1316 | static uint8_t | 1349 | static uint8_t |
1317 | intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], | 1350 | intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], |
1318 | int lane) | 1351 | int lane) |
1319 | { | 1352 | { |
1320 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
1321 | int s = ((lane & 1) ? | 1353 | int s = ((lane & 1) ? |
1322 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | 1354 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : |
1323 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | 1355 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); |
1324 | uint8_t l = intel_dp_link_status(link_status, i); | 1356 | uint8_t l = adjust_request[lane>>1]; |
1325 | 1357 | ||
1326 | return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | 1358 | return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; |
1327 | } | 1359 | } |
@@ -1344,6 +1376,7 @@ static char *link_train_names[] = { | |||
1344 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB | 1376 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB |
1345 | */ | 1377 | */ |
1346 | #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 | 1378 | #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 |
1379 | #define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200 | ||
1347 | 1380 | ||
1348 | static uint8_t | 1381 | static uint8_t |
1349 | intel_dp_pre_emphasis_max(uint8_t voltage_swing) | 1382 | intel_dp_pre_emphasis_max(uint8_t voltage_swing) |
@@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) | |||
1362 | } | 1395 | } |
1363 | 1396 | ||
1364 | static void | 1397 | static void |
1365 | intel_get_adjust_train(struct intel_dp *intel_dp) | 1398 | intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1366 | { | 1399 | { |
1400 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1367 | uint8_t v = 0; | 1401 | uint8_t v = 0; |
1368 | uint8_t p = 0; | 1402 | uint8_t p = 0; |
1369 | int lane; | 1403 | int lane; |
1404 | uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); | ||
1405 | int voltage_max; | ||
1370 | 1406 | ||
1371 | for (lane = 0; lane < intel_dp->lane_count; lane++) { | 1407 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1372 | uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); | 1408 | uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); |
1373 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); | 1409 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); |
1374 | 1410 | ||
1375 | if (this_v > v) | 1411 | if (this_v > v) |
1376 | v = this_v; | 1412 | v = this_v; |
@@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp) | |||
1378 | p = this_p; | 1414 | p = this_p; |
1379 | } | 1415 | } |
1380 | 1416 | ||
1381 | if (v >= I830_DP_VOLTAGE_MAX) | 1417 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1382 | v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; | 1418 | voltage_max = I830_DP_VOLTAGE_MAX_CPT; |
1419 | else | ||
1420 | voltage_max = I830_DP_VOLTAGE_MAX; | ||
1421 | if (v >= voltage_max) | ||
1422 | v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; | ||
1383 | 1423 | ||
1384 | if (p >= intel_dp_pre_emphasis_max(v)) | 1424 | if (p >= intel_dp_pre_emphasis_max(v)) |
1385 | p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | 1425 | p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
@@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp) | |||
1389 | } | 1429 | } |
1390 | 1430 | ||
1391 | static uint32_t | 1431 | static uint32_t |
1392 | intel_dp_signal_levels(uint8_t train_set, int lane_count) | 1432 | intel_dp_signal_levels(uint8_t train_set) |
1393 | { | 1433 | { |
1394 | uint32_t signal_levels = 0; | 1434 | uint32_t signal_levels = 0; |
1395 | 1435 | ||
@@ -1458,9 +1498,8 @@ static uint8_t | |||
1458 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | 1498 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], |
1459 | int lane) | 1499 | int lane) |
1460 | { | 1500 | { |
1461 | int i = DP_LANE0_1_STATUS + (lane >> 1); | ||
1462 | int s = (lane & 1) * 4; | 1501 | int s = (lane & 1) * 4; |
1463 | uint8_t l = intel_dp_link_status(link_status, i); | 1502 | uint8_t l = link_status[lane>>1]; |
1464 | 1503 | ||
1465 | return (l >> s) & 0xf; | 1504 | return (l >> s) & 0xf; |
1466 | } | 1505 | } |
@@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count | |||
1485 | DP_LANE_CHANNEL_EQ_DONE|\ | 1524 | DP_LANE_CHANNEL_EQ_DONE|\ |
1486 | DP_LANE_SYMBOL_LOCKED) | 1525 | DP_LANE_SYMBOL_LOCKED) |
1487 | static bool | 1526 | static bool |
1488 | intel_channel_eq_ok(struct intel_dp *intel_dp) | 1527 | intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1489 | { | 1528 | { |
1490 | uint8_t lane_align; | 1529 | uint8_t lane_align; |
1491 | uint8_t lane_status; | 1530 | uint8_t lane_status; |
1492 | int lane; | 1531 | int lane; |
1493 | 1532 | ||
1494 | lane_align = intel_dp_link_status(intel_dp->link_status, | 1533 | lane_align = intel_dp_link_status(link_status, |
1495 | DP_LANE_ALIGN_STATUS_UPDATED); | 1534 | DP_LANE_ALIGN_STATUS_UPDATED); |
1496 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | 1535 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) |
1497 | return false; | 1536 | return false; |
1498 | for (lane = 0; lane < intel_dp->lane_count; lane++) { | 1537 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1499 | lane_status = intel_get_lane_status(intel_dp->link_status, lane); | 1538 | lane_status = intel_get_lane_status(link_status, lane); |
1500 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) | 1539 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) |
1501 | return false; | 1540 | return false; |
1502 | } | 1541 | } |
@@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1521 | 1560 | ||
1522 | ret = intel_dp_aux_native_write(intel_dp, | 1561 | ret = intel_dp_aux_native_write(intel_dp, |
1523 | DP_TRAINING_LANE0_SET, | 1562 | DP_TRAINING_LANE0_SET, |
1524 | intel_dp->train_set, 4); | 1563 | intel_dp->train_set, |
1525 | if (ret != 4) | 1564 | intel_dp->lane_count); |
1565 | if (ret != intel_dp->lane_count) | ||
1526 | return false; | 1566 | return false; |
1527 | 1567 | ||
1528 | return true; | 1568 | return true; |
@@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1538 | int i; | 1578 | int i; |
1539 | uint8_t voltage; | 1579 | uint8_t voltage; |
1540 | bool clock_recovery = false; | 1580 | bool clock_recovery = false; |
1541 | int tries; | 1581 | int voltage_tries, loop_tries; |
1542 | u32 reg; | 1582 | u32 reg; |
1543 | uint32_t DP = intel_dp->DP; | 1583 | uint32_t DP = intel_dp->DP; |
1544 | 1584 | ||
@@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1565 | DP &= ~DP_LINK_TRAIN_MASK; | 1605 | DP &= ~DP_LINK_TRAIN_MASK; |
1566 | memset(intel_dp->train_set, 0, 4); | 1606 | memset(intel_dp->train_set, 0, 4); |
1567 | voltage = 0xff; | 1607 | voltage = 0xff; |
1568 | tries = 0; | 1608 | voltage_tries = 0; |
1609 | loop_tries = 0; | ||
1569 | clock_recovery = false; | 1610 | clock_recovery = false; |
1570 | for (;;) { | 1611 | for (;;) { |
1571 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1612 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1613 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
1572 | uint32_t signal_levels; | 1614 | uint32_t signal_levels; |
1573 | if (IS_GEN6(dev) && is_edp(intel_dp)) { | 1615 | |
1616 | if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { | ||
1574 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1617 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1575 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1618 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1576 | } else { | 1619 | } else { |
1577 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); | 1620 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); |
1621 | DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); | ||
1578 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1622 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1579 | } | 1623 | } |
1580 | 1624 | ||
@@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1590 | /* Set training pattern 1 */ | 1634 | /* Set training pattern 1 */ |
1591 | 1635 | ||
1592 | udelay(100); | 1636 | udelay(100); |
1593 | if (!intel_dp_get_link_status(intel_dp)) | 1637 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
1638 | DRM_ERROR("failed to get link status\n"); | ||
1594 | break; | 1639 | break; |
1640 | } | ||
1595 | 1641 | ||
1596 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | 1642 | if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1643 | DRM_DEBUG_KMS("clock recovery OK\n"); | ||
1597 | clock_recovery = true; | 1644 | clock_recovery = true; |
1598 | break; | 1645 | break; |
1599 | } | 1646 | } |
@@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1602 | for (i = 0; i < intel_dp->lane_count; i++) | 1649 | for (i = 0; i < intel_dp->lane_count; i++) |
1603 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1650 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1604 | break; | 1651 | break; |
1605 | if (i == intel_dp->lane_count) | 1652 | if (i == intel_dp->lane_count) { |
1606 | break; | 1653 | ++loop_tries; |
1654 | if (loop_tries == 5) { | ||
1655 | DRM_DEBUG_KMS("too many full retries, give up\n"); | ||
1656 | break; | ||
1657 | } | ||
1658 | memset(intel_dp->train_set, 0, 4); | ||
1659 | voltage_tries = 0; | ||
1660 | continue; | ||
1661 | } | ||
1607 | 1662 | ||
1608 | /* Check to see if we've tried the same voltage 5 times */ | 1663 | /* Check to see if we've tried the same voltage 5 times */ |
1609 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1664 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1610 | ++tries; | 1665 | ++voltage_tries; |
1611 | if (tries == 5) | 1666 | if (voltage_tries == 5) { |
1667 | DRM_DEBUG_KMS("too many voltage retries, give up\n"); | ||
1612 | break; | 1668 | break; |
1669 | } | ||
1613 | } else | 1670 | } else |
1614 | tries = 0; | 1671 | voltage_tries = 0; |
1615 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1672 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1616 | 1673 | ||
1617 | /* Compute new intel_dp->train_set as requested by target */ | 1674 | /* Compute new intel_dp->train_set as requested by target */ |
1618 | intel_get_adjust_train(intel_dp); | 1675 | intel_get_adjust_train(intel_dp, link_status); |
1619 | } | 1676 | } |
1620 | 1677 | ||
1621 | intel_dp->DP = DP; | 1678 | intel_dp->DP = DP; |
@@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1638 | for (;;) { | 1695 | for (;;) { |
1639 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1696 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1640 | uint32_t signal_levels; | 1697 | uint32_t signal_levels; |
1698 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
1641 | 1699 | ||
1642 | if (cr_tries > 5) { | 1700 | if (cr_tries > 5) { |
1643 | DRM_ERROR("failed to train DP, aborting\n"); | 1701 | DRM_ERROR("failed to train DP, aborting\n"); |
@@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1645 | break; | 1703 | break; |
1646 | } | 1704 | } |
1647 | 1705 | ||
1648 | if (IS_GEN6(dev) && is_edp(intel_dp)) { | 1706 | if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
1649 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1707 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1650 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1708 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1651 | } else { | 1709 | } else { |
1652 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); | 1710 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); |
1653 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1711 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1654 | } | 1712 | } |
1655 | 1713 | ||
@@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1665 | break; | 1723 | break; |
1666 | 1724 | ||
1667 | udelay(400); | 1725 | udelay(400); |
1668 | if (!intel_dp_get_link_status(intel_dp)) | 1726 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
1669 | break; | 1727 | break; |
1670 | 1728 | ||
1671 | /* Make sure clock is still ok */ | 1729 | /* Make sure clock is still ok */ |
1672 | if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | 1730 | if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1673 | intel_dp_start_link_train(intel_dp); | 1731 | intel_dp_start_link_train(intel_dp); |
1674 | cr_tries++; | 1732 | cr_tries++; |
1675 | continue; | 1733 | continue; |
1676 | } | 1734 | } |
1677 | 1735 | ||
1678 | if (intel_channel_eq_ok(intel_dp)) { | 1736 | if (intel_channel_eq_ok(intel_dp, link_status)) { |
1679 | channel_eq = true; | 1737 | channel_eq = true; |
1680 | break; | 1738 | break; |
1681 | } | 1739 | } |
@@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1690 | } | 1748 | } |
1691 | 1749 | ||
1692 | /* Compute new intel_dp->train_set as requested by target */ | 1750 | /* Compute new intel_dp->train_set as requested by target */ |
1693 | intel_get_adjust_train(intel_dp); | 1751 | intel_get_adjust_train(intel_dp, link_status); |
1694 | ++tries; | 1752 | ++tries; |
1695 | } | 1753 | } |
1696 | 1754 | ||
@@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1735 | 1793 | ||
1736 | msleep(17); | 1794 | msleep(17); |
1737 | 1795 | ||
1738 | if (is_edp(intel_dp)) | 1796 | if (is_edp(intel_dp)) { |
1739 | DP |= DP_LINK_TRAIN_OFF; | 1797 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1798 | DP |= DP_LINK_TRAIN_OFF_CPT; | ||
1799 | else | ||
1800 | DP |= DP_LINK_TRAIN_OFF; | ||
1801 | } | ||
1740 | 1802 | ||
1741 | if (!HAS_PCH_CPT(dev) && | 1803 | if (!HAS_PCH_CPT(dev) && |
1742 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | 1804 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
@@ -1822,6 +1884,7 @@ static void | |||
1822 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1884 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1823 | { | 1885 | { |
1824 | u8 sink_irq_vector; | 1886 | u8 sink_irq_vector; |
1887 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
1825 | 1888 | ||
1826 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) | 1889 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) |
1827 | return; | 1890 | return; |
@@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1830 | return; | 1893 | return; |
1831 | 1894 | ||
1832 | /* Try to read receiver status if the link appears to be up */ | 1895 | /* Try to read receiver status if the link appears to be up */ |
1833 | if (!intel_dp_get_link_status(intel_dp)) { | 1896 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
1834 | intel_dp_link_down(intel_dp); | 1897 | intel_dp_link_down(intel_dp); |
1835 | return; | 1898 | return; |
1836 | } | 1899 | } |
@@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1855 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | 1918 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); |
1856 | } | 1919 | } |
1857 | 1920 | ||
1858 | if (!intel_channel_eq_ok(intel_dp)) { | 1921 | if (!intel_channel_eq_ok(intel_dp, link_status)) { |
1859 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | 1922 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
1860 | drm_get_encoder_name(&intel_dp->base.base)); | 1923 | drm_get_encoder_name(&intel_dp->base.base)); |
1861 | intel_dp_start_link_train(intel_dp); | 1924 | intel_dp_start_link_train(intel_dp); |
@@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) | |||
2179 | continue; | 2242 | continue; |
2180 | 2243 | ||
2181 | intel_dp = enc_to_intel_dp(encoder); | 2244 | intel_dp = enc_to_intel_dp(encoder); |
2182 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) | 2245 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || |
2246 | intel_dp->base.type == INTEL_OUTPUT_EDP) | ||
2183 | return intel_dp->output_reg; | 2247 | return intel_dp->output_reg; |
2184 | } | 2248 | } |
2185 | 2249 | ||
@@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2321 | 2385 | ||
2322 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> | 2386 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
2323 | PANEL_LIGHT_ON_DELAY_SHIFT; | 2387 | PANEL_LIGHT_ON_DELAY_SHIFT; |
2324 | 2388 | ||
2325 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> | 2389 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> |
2326 | PANEL_LIGHT_OFF_DELAY_SHIFT; | 2390 | PANEL_LIGHT_OFF_DELAY_SHIFT; |
2327 | 2391 | ||
@@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2354 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | 2418 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", |
2355 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | 2419 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); |
2356 | 2420 | ||
2357 | intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; | ||
2358 | |||
2359 | ironlake_edp_panel_vdd_on(intel_dp); | 2421 | ironlake_edp_panel_vdd_on(intel_dp); |
2360 | ret = intel_dp_get_dpcd(intel_dp); | 2422 | ret = intel_dp_get_dpcd(intel_dp); |
2361 | ironlake_edp_panel_vdd_off(intel_dp, false); | 2423 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2424 | |||
2362 | if (ret) { | 2425 | if (ret) { |
2363 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 2426 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2364 | dev_priv->no_aux_handshake = | 2427 | dev_priv->no_aux_handshake = |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 499d4c0dbeeb..21f60b7d69a3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd) | |||
326 | static int intel_panel_get_brightness(struct backlight_device *bd) | 326 | static int intel_panel_get_brightness(struct backlight_device *bd) |
327 | { | 327 | { |
328 | struct drm_device *dev = bl_get_data(bd); | 328 | struct drm_device *dev = bl_get_data(bd); |
329 | return intel_panel_get_backlight(dev); | 329 | struct drm_i915_private *dev_priv = dev->dev_private; |
330 | return dev_priv->backlight_level; | ||
330 | } | 331 | } |
331 | 332 | ||
332 | static const struct backlight_ops intel_panel_bl_ops = { | 333 | static const struct backlight_ops intel_panel_bl_ops = { |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7fdfa8ea7570..38e1bda73d33 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -480,21 +480,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
480 | } | 480 | } |
481 | break; | 481 | break; |
482 | case DB_Z_INFO: | 482 | case DB_Z_INFO: |
483 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
484 | if (r) { | ||
485 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
486 | "0x%04X\n", reg); | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | track->db_z_info = radeon_get_ib_value(p, idx); | 483 | track->db_z_info = radeon_get_ib_value(p, idx); |
490 | ib[idx] &= ~Z_ARRAY_MODE(0xf); | 484 | if (!p->keep_tiling_flags) { |
491 | track->db_z_info &= ~Z_ARRAY_MODE(0xf); | 485 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
492 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 486 | if (r) { |
493 | ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 487 | dev_warn(p->dev, "bad SET_CONTEXT_REG " |
494 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 488 | "0x%04X\n", reg); |
495 | } else { | 489 | return -EINVAL; |
496 | ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 490 | } |
497 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 491 | ib[idx] &= ~Z_ARRAY_MODE(0xf); |
492 | track->db_z_info &= ~Z_ARRAY_MODE(0xf); | ||
493 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
494 | ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
495 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
496 | } else { | ||
497 | ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
498 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
499 | } | ||
498 | } | 500 | } |
499 | break; | 501 | break; |
500 | case DB_STENCIL_INFO: | 502 | case DB_STENCIL_INFO: |
@@ -607,40 +609,44 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
607 | case CB_COLOR5_INFO: | 609 | case CB_COLOR5_INFO: |
608 | case CB_COLOR6_INFO: | 610 | case CB_COLOR6_INFO: |
609 | case CB_COLOR7_INFO: | 611 | case CB_COLOR7_INFO: |
610 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
611 | if (r) { | ||
612 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
613 | "0x%04X\n", reg); | ||
614 | return -EINVAL; | ||
615 | } | ||
616 | tmp = (reg - CB_COLOR0_INFO) / 0x3c; | 612 | tmp = (reg - CB_COLOR0_INFO) / 0x3c; |
617 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | 613 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); |
618 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 614 | if (!p->keep_tiling_flags) { |
619 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 615 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
620 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 616 | if (r) { |
621 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | 617 | dev_warn(p->dev, "bad SET_CONTEXT_REG " |
622 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 618 | "0x%04X\n", reg); |
623 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 619 | return -EINVAL; |
620 | } | ||
621 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
622 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
623 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
624 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
625 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
626 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
627 | } | ||
624 | } | 628 | } |
625 | break; | 629 | break; |
626 | case CB_COLOR8_INFO: | 630 | case CB_COLOR8_INFO: |
627 | case CB_COLOR9_INFO: | 631 | case CB_COLOR9_INFO: |
628 | case CB_COLOR10_INFO: | 632 | case CB_COLOR10_INFO: |
629 | case CB_COLOR11_INFO: | 633 | case CB_COLOR11_INFO: |
630 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
631 | if (r) { | ||
632 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
633 | "0x%04X\n", reg); | ||
634 | return -EINVAL; | ||
635 | } | ||
636 | tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; | 634 | tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; |
637 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | 635 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); |
638 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 636 | if (!p->keep_tiling_flags) { |
639 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 637 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
640 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 638 | if (r) { |
641 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | 639 | dev_warn(p->dev, "bad SET_CONTEXT_REG " |
642 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 640 | "0x%04X\n", reg); |
643 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 641 | return -EINVAL; |
642 | } | ||
643 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
644 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
645 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
646 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
647 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
648 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
649 | } | ||
644 | } | 650 | } |
645 | break; | 651 | break; |
646 | case CB_COLOR0_PITCH: | 652 | case CB_COLOR0_PITCH: |
@@ -1311,10 +1317,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1311 | return -EINVAL; | 1317 | return -EINVAL; |
1312 | } | 1318 | } |
1313 | ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1319 | ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1314 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1320 | if (!p->keep_tiling_flags) { |
1315 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 1321 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
1316 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1322 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); |
1317 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | 1323 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
1324 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
1325 | } | ||
1318 | texture = reloc->robj; | 1326 | texture = reloc->robj; |
1319 | /* tex mip base */ | 1327 | /* tex mip base */ |
1320 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 1328 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 400b26df652a..c93bc64707e1 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
701 | return r; | 701 | return r; |
702 | } | 702 | } |
703 | 703 | ||
704 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 704 | if (p->keep_tiling_flags) { |
705 | tile_flags |= R300_TXO_MACRO_TILE; | 705 | ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ |
706 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 706 | ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); |
707 | tile_flags |= R300_TXO_MICRO_TILE; | 707 | } else { |
708 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | 708 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
709 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; | 709 | tile_flags |= R300_TXO_MACRO_TILE; |
710 | 710 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | |
711 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); | 711 | tile_flags |= R300_TXO_MICRO_TILE; |
712 | tmp |= tile_flags; | 712 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) |
713 | ib[idx] = tmp; | 713 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; |
714 | |||
715 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); | ||
716 | tmp |= tile_flags; | ||
717 | ib[idx] = tmp; | ||
718 | } | ||
714 | track->textures[i].robj = reloc->robj; | 719 | track->textures[i].robj = reloc->robj; |
715 | track->tex_dirty = true; | 720 | track->tex_dirty = true; |
716 | break; | 721 | break; |
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
760 | /* RB3D_COLORPITCH1 */ | 765 | /* RB3D_COLORPITCH1 */ |
761 | /* RB3D_COLORPITCH2 */ | 766 | /* RB3D_COLORPITCH2 */ |
762 | /* RB3D_COLORPITCH3 */ | 767 | /* RB3D_COLORPITCH3 */ |
763 | r = r100_cs_packet_next_reloc(p, &reloc); | 768 | if (!p->keep_tiling_flags) { |
764 | if (r) { | 769 | r = r100_cs_packet_next_reloc(p, &reloc); |
765 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | 770 | if (r) { |
766 | idx, reg); | 771 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
767 | r100_cs_dump_packet(p, pkt); | 772 | idx, reg); |
768 | return r; | 773 | r100_cs_dump_packet(p, pkt); |
769 | } | 774 | return r; |
775 | } | ||
770 | 776 | ||
771 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 777 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
772 | tile_flags |= R300_COLOR_TILE_ENABLE; | 778 | tile_flags |= R300_COLOR_TILE_ENABLE; |
773 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 779 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
774 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | 780 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
775 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | 781 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) |
776 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; | 782 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; |
777 | 783 | ||
778 | tmp = idx_value & ~(0x7 << 16); | 784 | tmp = idx_value & ~(0x7 << 16); |
779 | tmp |= tile_flags; | 785 | tmp |= tile_flags; |
780 | ib[idx] = tmp; | 786 | ib[idx] = tmp; |
787 | } | ||
781 | i = (reg - 0x4E38) >> 2; | 788 | i = (reg - 0x4E38) >> 2; |
782 | track->cb[i].pitch = idx_value & 0x3FFE; | 789 | track->cb[i].pitch = idx_value & 0x3FFE; |
783 | switch (((idx_value >> 21) & 0xF)) { | 790 | switch (((idx_value >> 21) & 0xF)) { |
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
843 | break; | 850 | break; |
844 | case 0x4F24: | 851 | case 0x4F24: |
845 | /* ZB_DEPTHPITCH */ | 852 | /* ZB_DEPTHPITCH */ |
846 | r = r100_cs_packet_next_reloc(p, &reloc); | 853 | if (!p->keep_tiling_flags) { |
847 | if (r) { | 854 | r = r100_cs_packet_next_reloc(p, &reloc); |
848 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | 855 | if (r) { |
849 | idx, reg); | 856 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
850 | r100_cs_dump_packet(p, pkt); | 857 | idx, reg); |
851 | return r; | 858 | r100_cs_dump_packet(p, pkt); |
852 | } | 859 | return r; |
853 | 860 | } | |
854 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
855 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | ||
856 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
857 | tile_flags |= R300_DEPTHMICROTILE_TILED; | ||
858 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | ||
859 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; | ||
860 | 861 | ||
861 | tmp = idx_value & ~(0x7 << 16); | 862 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
862 | tmp |= tile_flags; | 863 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
863 | ib[idx] = tmp; | 864 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
865 | tile_flags |= R300_DEPTHMICROTILE_TILED; | ||
866 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | ||
867 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; | ||
864 | 868 | ||
869 | tmp = idx_value & ~(0x7 << 16); | ||
870 | tmp |= tile_flags; | ||
871 | ib[idx] = tmp; | ||
872 | } | ||
865 | track->zb.pitch = idx_value & 0x3FFC; | 873 | track->zb.pitch = idx_value & 0x3FFC; |
866 | track->zb_dirty = true; | 874 | track->zb_dirty = true; |
867 | break; | 875 | break; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0a2e023c1557..cb1acffd2430 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
941 | track->db_depth_control = radeon_get_ib_value(p, idx); | 941 | track->db_depth_control = radeon_get_ib_value(p, idx); |
942 | break; | 942 | break; |
943 | case R_028010_DB_DEPTH_INFO: | 943 | case R_028010_DB_DEPTH_INFO: |
944 | if (r600_cs_packet_next_is_pkt3_nop(p)) { | 944 | if (!p->keep_tiling_flags && |
945 | r600_cs_packet_next_is_pkt3_nop(p)) { | ||
945 | r = r600_cs_packet_next_reloc(p, &reloc); | 946 | r = r600_cs_packet_next_reloc(p, &reloc); |
946 | if (r) { | 947 | if (r) { |
947 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | 948 | dev_warn(p->dev, "bad SET_CONTEXT_REG " |
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
992 | case R_0280B4_CB_COLOR5_INFO: | 993 | case R_0280B4_CB_COLOR5_INFO: |
993 | case R_0280B8_CB_COLOR6_INFO: | 994 | case R_0280B8_CB_COLOR6_INFO: |
994 | case R_0280BC_CB_COLOR7_INFO: | 995 | case R_0280BC_CB_COLOR7_INFO: |
995 | if (r600_cs_packet_next_is_pkt3_nop(p)) { | 996 | if (!p->keep_tiling_flags && |
997 | r600_cs_packet_next_is_pkt3_nop(p)) { | ||
996 | r = r600_cs_packet_next_reloc(p, &reloc); | 998 | r = r600_cs_packet_next_reloc(p, &reloc); |
997 | if (r) { | 999 | if (r) { |
998 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | 1000 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); |
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | |||
1291 | mip_offset <<= 8; | 1293 | mip_offset <<= 8; |
1292 | 1294 | ||
1293 | word0 = radeon_get_ib_value(p, idx + 0); | 1295 | word0 = radeon_get_ib_value(p, idx + 0); |
1294 | if (tiling_flags & RADEON_TILING_MACRO) | 1296 | if (!p->keep_tiling_flags) { |
1295 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1297 | if (tiling_flags & RADEON_TILING_MACRO) |
1296 | else if (tiling_flags & RADEON_TILING_MICRO) | 1298 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
1297 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); | 1299 | else if (tiling_flags & RADEON_TILING_MICRO) |
1300 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); | ||
1301 | } | ||
1298 | word1 = radeon_get_ib_value(p, idx + 1); | 1302 | word1 = radeon_get_ib_value(p, idx + 1); |
1299 | w0 = G_038000_TEX_WIDTH(word0) + 1; | 1303 | w0 = G_038000_TEX_WIDTH(word0) + 1; |
1300 | h0 = G_038004_TEX_HEIGHT(word1) + 1; | 1304 | h0 = G_038004_TEX_HEIGHT(word1) + 1; |
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1621 | return -EINVAL; | 1625 | return -EINVAL; |
1622 | } | 1626 | } |
1623 | base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1627 | base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1624 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1628 | if (!p->keep_tiling_flags) { |
1625 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1629 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
1626 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1630 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
1627 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); | 1631 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
1632 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); | ||
1633 | } | ||
1628 | texture = reloc->robj; | 1634 | texture = reloc->robj; |
1629 | /* tex mip base */ | 1635 | /* tex mip base */ |
1630 | r = r600_cs_packet_next_reloc(p, &reloc); | 1636 | r = r600_cs_packet_next_reloc(p, &reloc); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index fc5a1d642cb5..8227e76b5c70 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -611,7 +611,8 @@ struct radeon_cs_parser { | |||
611 | struct radeon_ib *ib; | 611 | struct radeon_ib *ib; |
612 | void *track; | 612 | void *track; |
613 | unsigned family; | 613 | unsigned family; |
614 | int parser_error; | 614 | int parser_error; |
615 | bool keep_tiling_flags; | ||
615 | }; | 616 | }; |
616 | 617 | ||
617 | extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); | 618 | extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fecd705a1a5f..d24baf30efcb 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -62,6 +62,87 @@ union atom_supported_devices { | |||
62 | struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; | 62 | struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev, | ||
66 | ATOM_GPIO_I2C_ASSIGMENT *gpio, | ||
67 | u8 index) | ||
68 | { | ||
69 | /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ | ||
70 | if ((rdev->family == CHIP_R420) || | ||
71 | (rdev->family == CHIP_R423) || | ||
72 | (rdev->family == CHIP_RV410)) { | ||
73 | if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || | ||
74 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || | ||
75 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { | ||
76 | gpio->ucClkMaskShift = 0x19; | ||
77 | gpio->ucDataMaskShift = 0x18; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | /* some evergreen boards have bad data for this entry */ | ||
82 | if (ASIC_IS_DCE4(rdev)) { | ||
83 | if ((index == 7) && | ||
84 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && | ||
85 | (gpio->sucI2cId.ucAccess == 0)) { | ||
86 | gpio->sucI2cId.ucAccess = 0x97; | ||
87 | gpio->ucDataMaskShift = 8; | ||
88 | gpio->ucDataEnShift = 8; | ||
89 | gpio->ucDataY_Shift = 8; | ||
90 | gpio->ucDataA_Shift = 8; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | /* some DCE3 boards have bad data for this entry */ | ||
95 | if (ASIC_IS_DCE3(rdev)) { | ||
96 | if ((index == 4) && | ||
97 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && | ||
98 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
99 | gpio->sucI2cId.ucAccess = 0x14; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) | ||
104 | { | ||
105 | struct radeon_i2c_bus_rec i2c; | ||
106 | |||
107 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); | ||
108 | |||
109 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | ||
110 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | ||
111 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | ||
112 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; | ||
113 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; | ||
114 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; | ||
115 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; | ||
116 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; | ||
117 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); | ||
118 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); | ||
119 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); | ||
120 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); | ||
121 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); | ||
122 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); | ||
123 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); | ||
124 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); | ||
125 | |||
126 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | ||
127 | i2c.hw_capable = true; | ||
128 | else | ||
129 | i2c.hw_capable = false; | ||
130 | |||
131 | if (gpio->sucI2cId.ucAccess == 0xa0) | ||
132 | i2c.mm_i2c = true; | ||
133 | else | ||
134 | i2c.mm_i2c = false; | ||
135 | |||
136 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | ||
137 | |||
138 | if (i2c.mask_clk_reg) | ||
139 | i2c.valid = true; | ||
140 | else | ||
141 | i2c.valid = false; | ||
142 | |||
143 | return i2c; | ||
144 | } | ||
145 | |||
65 | static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, | 146 | static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, |
66 | uint8_t id) | 147 | uint8_t id) |
67 | { | 148 | { |
@@ -85,71 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd | |||
85 | for (i = 0; i < num_indices; i++) { | 166 | for (i = 0; i < num_indices; i++) { |
86 | gpio = &i2c_info->asGPIO_Info[i]; | 167 | gpio = &i2c_info->asGPIO_Info[i]; |
87 | 168 | ||
88 | /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ | 169 | radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); |
89 | if ((rdev->family == CHIP_R420) || | ||
90 | (rdev->family == CHIP_R423) || | ||
91 | (rdev->family == CHIP_RV410)) { | ||
92 | if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || | ||
93 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || | ||
94 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { | ||
95 | gpio->ucClkMaskShift = 0x19; | ||
96 | gpio->ucDataMaskShift = 0x18; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | /* some evergreen boards have bad data for this entry */ | ||
101 | if (ASIC_IS_DCE4(rdev)) { | ||
102 | if ((i == 7) && | ||
103 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && | ||
104 | (gpio->sucI2cId.ucAccess == 0)) { | ||
105 | gpio->sucI2cId.ucAccess = 0x97; | ||
106 | gpio->ucDataMaskShift = 8; | ||
107 | gpio->ucDataEnShift = 8; | ||
108 | gpio->ucDataY_Shift = 8; | ||
109 | gpio->ucDataA_Shift = 8; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* some DCE3 boards have bad data for this entry */ | ||
114 | if (ASIC_IS_DCE3(rdev)) { | ||
115 | if ((i == 4) && | ||
116 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && | ||
117 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
118 | gpio->sucI2cId.ucAccess = 0x14; | ||
119 | } | ||
120 | 170 | ||
121 | if (gpio->sucI2cId.ucAccess == id) { | 171 | if (gpio->sucI2cId.ucAccess == id) { |
122 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 172 | i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); |
123 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | ||
124 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | ||
125 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; | ||
126 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; | ||
127 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; | ||
128 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; | ||
129 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; | ||
130 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); | ||
131 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); | ||
132 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); | ||
133 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); | ||
134 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); | ||
135 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); | ||
136 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); | ||
137 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); | ||
138 | |||
139 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | ||
140 | i2c.hw_capable = true; | ||
141 | else | ||
142 | i2c.hw_capable = false; | ||
143 | |||
144 | if (gpio->sucI2cId.ucAccess == 0xa0) | ||
145 | i2c.mm_i2c = true; | ||
146 | else | ||
147 | i2c.mm_i2c = false; | ||
148 | |||
149 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | ||
150 | |||
151 | if (i2c.mask_clk_reg) | ||
152 | i2c.valid = true; | ||
153 | break; | 173 | break; |
154 | } | 174 | } |
155 | } | 175 | } |
@@ -169,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
169 | int i, num_indices; | 189 | int i, num_indices; |
170 | char stmp[32]; | 190 | char stmp[32]; |
171 | 191 | ||
172 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); | ||
173 | |||
174 | if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { | 192 | if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { |
175 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | 193 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); |
176 | 194 | ||
@@ -179,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
179 | 197 | ||
180 | for (i = 0; i < num_indices; i++) { | 198 | for (i = 0; i < num_indices; i++) { |
181 | gpio = &i2c_info->asGPIO_Info[i]; | 199 | gpio = &i2c_info->asGPIO_Info[i]; |
182 | i2c.valid = false; | ||
183 | |||
184 | /* some evergreen boards have bad data for this entry */ | ||
185 | if (ASIC_IS_DCE4(rdev)) { | ||
186 | if ((i == 7) && | ||
187 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && | ||
188 | (gpio->sucI2cId.ucAccess == 0)) { | ||
189 | gpio->sucI2cId.ucAccess = 0x97; | ||
190 | gpio->ucDataMaskShift = 8; | ||
191 | gpio->ucDataEnShift = 8; | ||
192 | gpio->ucDataY_Shift = 8; | ||
193 | gpio->ucDataA_Shift = 8; | ||
194 | } | ||
195 | } | ||
196 | 200 | ||
197 | /* some DCE3 boards have bad data for this entry */ | 201 | radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); |
198 | if (ASIC_IS_DCE3(rdev)) { | ||
199 | if ((i == 4) && | ||
200 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && | ||
201 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
202 | gpio->sucI2cId.ucAccess = 0x14; | ||
203 | } | ||
204 | |||
205 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | ||
206 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | ||
207 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | ||
208 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; | ||
209 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; | ||
210 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; | ||
211 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; | ||
212 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; | ||
213 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); | ||
214 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); | ||
215 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); | ||
216 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); | ||
217 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); | ||
218 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); | ||
219 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); | ||
220 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); | ||
221 | |||
222 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | ||
223 | i2c.hw_capable = true; | ||
224 | else | ||
225 | i2c.hw_capable = false; | ||
226 | |||
227 | if (gpio->sucI2cId.ucAccess == 0xa0) | ||
228 | i2c.mm_i2c = true; | ||
229 | else | ||
230 | i2c.mm_i2c = false; | ||
231 | 202 | ||
232 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | 203 | i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); |
233 | 204 | ||
234 | if (i2c.mask_clk_reg) { | 205 | if (i2c.valid) { |
235 | i2c.valid = true; | ||
236 | sprintf(stmp, "0x%x", i2c.i2c_id); | 206 | sprintf(stmp, "0x%x", i2c.i2c_id); |
237 | rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); | 207 | rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); |
238 | } | 208 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ccaa243c1442..29afd71e0840 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
93 | { | 93 | { |
94 | struct drm_radeon_cs *cs = data; | 94 | struct drm_radeon_cs *cs = data; |
95 | uint64_t *chunk_array_ptr; | 95 | uint64_t *chunk_array_ptr; |
96 | unsigned size, i; | 96 | unsigned size, i, flags = 0; |
97 | 97 | ||
98 | if (!cs->num_chunks) { | 98 | if (!cs->num_chunks) { |
99 | return 0; | 99 | return 0; |
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
140 | if (p->chunks[i].length_dw == 0) | 140 | if (p->chunks[i].length_dw == 0) |
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | } | 142 | } |
143 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && | ||
144 | !p->chunks[i].length_dw) { | ||
145 | return -EINVAL; | ||
146 | } | ||
143 | 147 | ||
144 | p->chunks[i].length_dw = user_chunk.length_dw; | 148 | p->chunks[i].length_dw = user_chunk.length_dw; |
145 | p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; | 149 | p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; |
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
155 | p->chunks[i].user_ptr, size)) { | 159 | p->chunks[i].user_ptr, size)) { |
156 | return -EFAULT; | 160 | return -EFAULT; |
157 | } | 161 | } |
162 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { | ||
163 | flags = p->chunks[i].kdata[0]; | ||
164 | } | ||
158 | } else { | 165 | } else { |
159 | p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); | 166 | p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); |
160 | p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); | 167 | p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); |
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
174 | p->chunks[p->chunk_ib_idx].length_dw); | 181 | p->chunks[p->chunk_ib_idx].length_dw); |
175 | return -EINVAL; | 182 | return -EINVAL; |
176 | } | 183 | } |
184 | |||
185 | p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; | ||
177 | return 0; | 186 | return 0; |
178 | } | 187 | } |
179 | 188 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a0b35e909489..71499fc3daf5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -53,9 +53,10 @@ | |||
53 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query | 53 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query |
54 | * 2.10.0 - fusion 2D tiling | 54 | * 2.10.0 - fusion 2D tiling |
55 | * 2.11.0 - backend map, initial compute support for the CS checker | 55 | * 2.11.0 - backend map, initial compute support for the CS checker |
56 | * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS | ||
56 | */ | 57 | */ |
57 | #define KMS_DRIVER_MAJOR 2 | 58 | #define KMS_DRIVER_MAJOR 2 |
58 | #define KMS_DRIVER_MINOR 11 | 59 | #define KMS_DRIVER_MINOR 12 |
59 | #define KMS_DRIVER_PATCHLEVEL 0 | 60 | #define KMS_DRIVER_PATCHLEVEL 0 |
60 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 61 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
61 | int radeon_driver_unload_kms(struct drm_device *dev); | 62 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 617b64678fc6..0bb0f5f713e6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -574,10 +574,16 @@ retry: | |||
574 | return ret; | 574 | return ret; |
575 | 575 | ||
576 | spin_lock(&glob->lru_lock); | 576 | spin_lock(&glob->lru_lock); |
577 | |||
578 | if (unlikely(list_empty(&bo->ddestroy))) { | ||
579 | spin_unlock(&glob->lru_lock); | ||
580 | return 0; | ||
581 | } | ||
582 | |||
577 | ret = ttm_bo_reserve_locked(bo, interruptible, | 583 | ret = ttm_bo_reserve_locked(bo, interruptible, |
578 | no_wait_reserve, false, 0); | 584 | no_wait_reserve, false, 0); |
579 | 585 | ||
580 | if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { | 586 | if (unlikely(ret != 0)) { |
581 | spin_unlock(&glob->lru_lock); | 587 | spin_unlock(&glob->lru_lock); |
582 | return ret; | 588 | return ret; |
583 | } | 589 | } |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index bdde899af72e..111d956d8e7d 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -991,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
991 | uc = &priv->cards[i]; | 991 | uc = &priv->cards[i]; |
992 | } | 992 | } |
993 | 993 | ||
994 | if (!uc) | 994 | if (!uc) { |
995 | return -EINVAL; | 995 | ret_val = -EINVAL; |
996 | goto done; | ||
997 | } | ||
996 | 998 | ||
997 | if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) | 999 | if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { |
998 | return -EINVAL; | 1000 | ret_val = -EINVAL; |
1001 | goto done; | ||
1002 | } | ||
999 | 1003 | ||
1000 | if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) | 1004 | if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { |
1001 | return -EINVAL; | 1005 | ret_val = -EINVAL; |
1006 | goto done; | ||
1007 | } | ||
1002 | 1008 | ||
1003 | vga_put(pdev, io_state); | 1009 | vga_put(pdev, io_state); |
1004 | 1010 | ||
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index 318e38e85376..5d760f3d21c2 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c | |||
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id); | |||
160 | static struct spi_driver ad7314_driver = { | 160 | static struct spi_driver ad7314_driver = { |
161 | .driver = { | 161 | .driver = { |
162 | .name = "ad7314", | 162 | .name = "ad7314", |
163 | .bus = &spi_bus_type, | ||
164 | .owner = THIS_MODULE, | 163 | .owner = THIS_MODULE, |
165 | }, | 164 | }, |
166 | .probe = ad7314_probe, | 165 | .probe = ad7314_probe, |
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c index 52319340e182..04450f8bf5da 100644 --- a/drivers/hwmon/ads7871.c +++ b/drivers/hwmon/ads7871.c | |||
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi) | |||
227 | static struct spi_driver ads7871_driver = { | 227 | static struct spi_driver ads7871_driver = { |
228 | .driver = { | 228 | .driver = { |
229 | .name = DEVICE_NAME, | 229 | .name = DEVICE_NAME, |
230 | .bus = &spi_bus_type, | ||
231 | .owner = THIS_MODULE, | 230 | .owner = THIS_MODULE, |
232 | }, | 231 | }, |
233 | 232 | ||
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c index faa0884f61f6..f2359a0093bd 100644 --- a/drivers/hwmon/exynos4_tmu.c +++ b/drivers/hwmon/exynos4_tmu.c | |||
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = { | |||
506 | .resume = exynos4_tmu_resume, | 506 | .resume = exynos4_tmu_resume, |
507 | }; | 507 | }; |
508 | 508 | ||
509 | static int __init exynos4_tmu_driver_init(void) | 509 | module_platform_driver(exynos4_tmu_driver); |
510 | { | ||
511 | return platform_driver_register(&exynos4_tmu_driver); | ||
512 | } | ||
513 | module_init(exynos4_tmu_driver_init); | ||
514 | |||
515 | static void __exit exynos4_tmu_driver_exit(void) | ||
516 | { | ||
517 | platform_driver_unregister(&exynos4_tmu_driver); | ||
518 | } | ||
519 | module_exit(exynos4_tmu_driver_exit); | ||
520 | 510 | ||
521 | MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); | 511 | MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); |
522 | MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); | 512 | MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); |
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index 89aa9fb743af..9ba38f318ffb 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = { | |||
539 | }, | 539 | }, |
540 | }; | 540 | }; |
541 | 541 | ||
542 | static int __init gpio_fan_init(void) | 542 | module_platform_driver(gpio_fan_driver); |
543 | { | ||
544 | return platform_driver_register(&gpio_fan_driver); | ||
545 | } | ||
546 | |||
547 | static void __exit gpio_fan_exit(void) | ||
548 | { | ||
549 | platform_driver_unregister(&gpio_fan_driver); | ||
550 | } | ||
551 | |||
552 | module_init(gpio_fan_init); | ||
553 | module_exit(gpio_fan_exit); | ||
554 | 543 | ||
555 | MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); | 544 | MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); |
556 | MODULE_DESCRIPTION("GPIO FAN driver"); | 545 | MODULE_DESCRIPTION("GPIO FAN driver"); |
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c index fea292d43407..7a48b1eb4233 100644 --- a/drivers/hwmon/jz4740-hwmon.c +++ b/drivers/hwmon/jz4740-hwmon.c | |||
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = { | |||
212 | }, | 212 | }, |
213 | }; | 213 | }; |
214 | 214 | ||
215 | static int __init jz4740_hwmon_init(void) | 215 | module_platform_driver(jz4740_hwmon_driver); |
216 | { | ||
217 | return platform_driver_register(&jz4740_hwmon_driver); | ||
218 | } | ||
219 | module_init(jz4740_hwmon_init); | ||
220 | |||
221 | static void __exit jz4740_hwmon_exit(void) | ||
222 | { | ||
223 | platform_driver_unregister(&jz4740_hwmon_driver); | ||
224 | } | ||
225 | module_exit(jz4740_hwmon_exit); | ||
226 | 216 | ||
227 | MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); | 217 | MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); |
228 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | 218 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index eab11615dced..9b382ec2c3bd 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = { | |||
432 | .id_table = ntc_thermistor_id, | 432 | .id_table = ntc_thermistor_id, |
433 | }; | 433 | }; |
434 | 434 | ||
435 | static int __init ntc_thermistor_init(void) | 435 | module_platform_driver(ntc_thermistor_driver); |
436 | { | ||
437 | return platform_driver_register(&ntc_thermistor_driver); | ||
438 | } | ||
439 | |||
440 | module_init(ntc_thermistor_init); | ||
441 | |||
442 | static void __exit ntc_thermistor_cleanup(void) | ||
443 | { | ||
444 | platform_driver_unregister(&ntc_thermistor_driver); | ||
445 | } | ||
446 | |||
447 | module_exit(ntc_thermistor_cleanup); | ||
448 | 436 | ||
449 | MODULE_DESCRIPTION("NTC Thermistor Driver"); | 437 | MODULE_DESCRIPTION("NTC Thermistor Driver"); |
450 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | 438 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); |
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c index b39f52e2752a..f6c26d19f521 100644 --- a/drivers/hwmon/s3c-hwmon.c +++ b/drivers/hwmon/s3c-hwmon.c | |||
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = { | |||
393 | .remove = __devexit_p(s3c_hwmon_remove), | 393 | .remove = __devexit_p(s3c_hwmon_remove), |
394 | }; | 394 | }; |
395 | 395 | ||
396 | static int __init s3c_hwmon_init(void) | 396 | module_platform_driver(s3c_hwmon_driver); |
397 | { | ||
398 | return platform_driver_register(&s3c_hwmon_driver); | ||
399 | } | ||
400 | |||
401 | static void __exit s3c_hwmon_exit(void) | ||
402 | { | ||
403 | platform_driver_unregister(&s3c_hwmon_driver); | ||
404 | } | ||
405 | |||
406 | module_init(s3c_hwmon_init); | ||
407 | module_exit(s3c_hwmon_exit); | ||
408 | 397 | ||
409 | MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); | 398 | MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); |
410 | MODULE_DESCRIPTION("S3C ADC HWMon driver"); | 399 | MODULE_DESCRIPTION("S3C ADC HWMon driver"); |
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c index e3b5c6039c25..79b6dabe3161 100644 --- a/drivers/hwmon/sch5627.c +++ b/drivers/hwmon/sch5627.c | |||
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = { | |||
590 | .remove = sch5627_remove, | 590 | .remove = sch5627_remove, |
591 | }; | 591 | }; |
592 | 592 | ||
593 | static int __init sch5627_init(void) | 593 | module_platform_driver(sch5627_driver); |
594 | { | ||
595 | return platform_driver_register(&sch5627_driver); | ||
596 | } | ||
597 | |||
598 | static void __exit sch5627_exit(void) | ||
599 | { | ||
600 | platform_driver_unregister(&sch5627_driver); | ||
601 | } | ||
602 | 594 | ||
603 | MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); | 595 | MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); |
604 | MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); | 596 | MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); |
605 | MODULE_LICENSE("GPL"); | 597 | MODULE_LICENSE("GPL"); |
606 | |||
607 | module_init(sch5627_init); | ||
608 | module_exit(sch5627_exit); | ||
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c index 244407aa79fc..9d5236fb09b4 100644 --- a/drivers/hwmon/sch5636.c +++ b/drivers/hwmon/sch5636.c | |||
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = { | |||
521 | .remove = sch5636_remove, | 521 | .remove = sch5636_remove, |
522 | }; | 522 | }; |
523 | 523 | ||
524 | static int __init sch5636_init(void) | 524 | module_platform_driver(sch5636_driver); |
525 | { | ||
526 | return platform_driver_register(&sch5636_driver); | ||
527 | } | ||
528 | |||
529 | static void __exit sch5636_exit(void) | ||
530 | { | ||
531 | platform_driver_unregister(&sch5636_driver); | ||
532 | } | ||
533 | 525 | ||
534 | MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); | 526 | MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); |
535 | MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); | 527 | MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); |
536 | MODULE_LICENSE("GPL"); | 528 | MODULE_LICENSE("GPL"); |
537 | |||
538 | module_init(sch5636_init); | ||
539 | module_exit(sch5636_exit); | ||
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c index 57240740b161..0018c7dd0097 100644 --- a/drivers/hwmon/twl4030-madc-hwmon.c +++ b/drivers/hwmon/twl4030-madc-hwmon.c | |||
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = { | |||
136 | }, | 136 | }, |
137 | }; | 137 | }; |
138 | 138 | ||
139 | static int __init twl4030_madc_hwmon_init(void) | 139 | module_platform_driver(twl4030_madc_hwmon_driver); |
140 | { | ||
141 | return platform_driver_register(&twl4030_madc_hwmon_driver); | ||
142 | } | ||
143 | |||
144 | module_init(twl4030_madc_hwmon_init); | ||
145 | |||
146 | static void __exit twl4030_madc_hwmon_exit(void) | ||
147 | { | ||
148 | platform_driver_unregister(&twl4030_madc_hwmon_driver); | ||
149 | } | ||
150 | |||
151 | module_exit(twl4030_madc_hwmon_exit); | ||
152 | 140 | ||
153 | MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); | 141 | MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); |
154 | MODULE_LICENSE("GPL"); | 142 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c index 3cd07bf42dca..b9a87e89bab4 100644 --- a/drivers/hwmon/ultra45_env.c +++ b/drivers/hwmon/ultra45_env.c | |||
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = { | |||
309 | .remove = __devexit_p(env_remove), | 309 | .remove = __devexit_p(env_remove), |
310 | }; | 310 | }; |
311 | 311 | ||
312 | static int __init env_init(void) | 312 | module_platform_driver(env_driver); |
313 | { | ||
314 | return platform_driver_register(&env_driver); | ||
315 | } | ||
316 | |||
317 | static void __exit env_exit(void) | ||
318 | { | ||
319 | platform_driver_unregister(&env_driver); | ||
320 | } | ||
321 | |||
322 | module_init(env_init); | ||
323 | module_exit(env_exit); | ||
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c index 97b1f834a471..9b598ed26020 100644 --- a/drivers/hwmon/wm831x-hwmon.c +++ b/drivers/hwmon/wm831x-hwmon.c | |||
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = { | |||
209 | }, | 209 | }, |
210 | }; | 210 | }; |
211 | 211 | ||
212 | static int __init wm831x_hwmon_init(void) | 212 | module_platform_driver(wm831x_hwmon_driver); |
213 | { | ||
214 | return platform_driver_register(&wm831x_hwmon_driver); | ||
215 | } | ||
216 | module_init(wm831x_hwmon_init); | ||
217 | |||
218 | static void __exit wm831x_hwmon_exit(void) | ||
219 | { | ||
220 | platform_driver_unregister(&wm831x_hwmon_driver); | ||
221 | } | ||
222 | module_exit(wm831x_hwmon_exit); | ||
223 | 213 | ||
224 | MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); | 214 | MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); |
225 | MODULE_DESCRIPTION("WM831x Hardware Monitoring"); | 215 | MODULE_DESCRIPTION("WM831x Hardware Monitoring"); |
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c index 13290595ca86..3ff67edbdc44 100644 --- a/drivers/hwmon/wm8350-hwmon.c +++ b/drivers/hwmon/wm8350-hwmon.c | |||
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = { | |||
133 | }, | 133 | }, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static int __init wm8350_hwmon_init(void) | 136 | module_platform_driver(wm8350_hwmon_driver); |
137 | { | ||
138 | return platform_driver_register(&wm8350_hwmon_driver); | ||
139 | } | ||
140 | module_init(wm8350_hwmon_init); | ||
141 | |||
142 | static void __exit wm8350_hwmon_exit(void) | ||
143 | { | ||
144 | platform_driver_unregister(&wm8350_hwmon_driver); | ||
145 | } | ||
146 | module_exit(wm8350_hwmon_exit); | ||
147 | 137 | ||
148 | MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); | 138 | MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); |
149 | MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); | 139 | MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); |
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 85584a547c25..525c7345fa0b 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
488 | 488 | ||
489 | if (flags & I2C_M_TEN) { | 489 | if (flags & I2C_M_TEN) { |
490 | /* a ten bit address */ | 490 | /* a ten bit address */ |
491 | addr = 0xf0 | ((msg->addr >> 7) & 0x03); | 491 | addr = 0xf0 | ((msg->addr >> 7) & 0x06); |
492 | bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); | 492 | bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); |
493 | /* try extended address code...*/ | 493 | /* try extended address code...*/ |
494 | ret = try_address(i2c_adap, addr, retries); | 494 | ret = try_address(i2c_adap, addr, retries); |
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
498 | return -ENXIO; | 498 | return -ENXIO; |
499 | } | 499 | } |
500 | /* the remaining 8 bit address */ | 500 | /* the remaining 8 bit address */ |
501 | ret = i2c_outb(i2c_adap, msg->addr & 0x7f); | 501 | ret = i2c_outb(i2c_adap, msg->addr & 0xff); |
502 | if ((ret != 1) && !nak_ok) { | 502 | if ((ret != 1) && !nak_ok) { |
503 | /* the chip did not ack / xmission error occurred */ | 503 | /* the chip did not ack / xmission error occurred */ |
504 | dev_err(&i2c_adap->dev, "died at 2nd address code\n"); | 504 | dev_err(&i2c_adap->dev, "died at 2nd address code\n"); |
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c index 835e47b39bc2..03b615778887 100644 --- a/drivers/i2c/busses/i2c-nuc900.c +++ b/drivers/i2c/busses/i2c-nuc900.c | |||
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev) | |||
593 | i2c->adap.algo_data = i2c; | 593 | i2c->adap.algo_data = i2c; |
594 | i2c->adap.dev.parent = &pdev->dev; | 594 | i2c->adap.dev.parent = &pdev->dev; |
595 | 595 | ||
596 | mfp_set_groupg(&pdev->dev); | 596 | mfp_set_groupg(&pdev->dev, NULL); |
597 | 597 | ||
598 | clk_get_rate(i2c->clk); | 598 | clk_get_rate(i2c->clk); |
599 | 599 | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 131079a3e292..1e5606185b4f 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) | |||
539 | client->dev.type = &i2c_client_type; | 539 | client->dev.type = &i2c_client_type; |
540 | client->dev.of_node = info->of_node; | 540 | client->dev.of_node = info->of_node; |
541 | 541 | ||
542 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ | ||
542 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), | 543 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), |
543 | client->addr); | 544 | client->addr | ((client->flags & I2C_CLIENT_TEN) |
545 | ? 0xa000 : 0)); | ||
544 | status = device_register(&client->dev); | 546 | status = device_register(&client->dev); |
545 | if (status) | 547 | if (status) |
546 | goto out_err; | 548 | goto out_err; |
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index c90ce50b619f..57a45ce84b2d 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c | |||
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) | |||
579 | return 0; | 579 | return 0; |
580 | } | 580 | } |
581 | 581 | ||
582 | int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, | 582 | static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, |
583 | void *data) | 583 | void *data) |
584 | { | 584 | { |
585 | struct device *dev = data; | 585 | struct device *dev = data; |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index adf0757280ed..a20c3c8224ea 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in, | |||
216 | 216 | ||
217 | neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); | 217 | neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); |
218 | if (!neigh || !(neigh->nud_state & NUD_VALID)) { | 218 | if (!neigh || !(neigh->nud_state & NUD_VALID)) { |
219 | rcu_read_lock(); | ||
219 | neigh_event_send(dst_get_neighbour(&rt->dst), NULL); | 220 | neigh_event_send(dst_get_neighbour(&rt->dst), NULL); |
221 | rcu_read_unlock(); | ||
220 | ret = -ENODATA; | 222 | ret = -ENODATA; |
221 | if (neigh) | 223 | if (neigh) |
222 | goto release; | 224 | goto release; |
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
274 | goto put; | 276 | goto put; |
275 | } | 277 | } |
276 | 278 | ||
279 | rcu_read_lock(); | ||
277 | neigh = dst_get_neighbour(dst); | 280 | neigh = dst_get_neighbour(dst); |
278 | if (!neigh || !(neigh->nud_state & NUD_VALID)) { | 281 | if (!neigh || !(neigh->nud_state & NUD_VALID)) { |
279 | if (neigh) | 282 | if (neigh) |
280 | neigh_event_send(neigh, NULL); | 283 | neigh_event_send(neigh, NULL); |
281 | ret = -ENODATA; | 284 | ret = -ENODATA; |
282 | goto put; | 285 | } else { |
286 | ret = rdma_copy_addr(addr, dst->dev, neigh->ha); | ||
283 | } | 287 | } |
284 | 288 | rcu_read_unlock(); | |
285 | ret = rdma_copy_addr(addr, dst->dev, neigh->ha); | ||
286 | put: | 289 | put: |
287 | dst_release(dst); | 290 | dst_release(dst); |
288 | return ret; | 291 | return ret; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index de6d0774e609..c88b12beef25 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1375 | goto reject; | 1375 | goto reject; |
1376 | } | 1376 | } |
1377 | dst = &rt->dst; | 1377 | dst = &rt->dst; |
1378 | rcu_read_lock(); | ||
1378 | neigh = dst_get_neighbour(dst); | 1379 | neigh = dst_get_neighbour(dst); |
1379 | l2t = t3_l2t_get(tdev, neigh, neigh->dev); | 1380 | l2t = t3_l2t_get(tdev, neigh, neigh->dev); |
1381 | rcu_read_unlock(); | ||
1380 | if (!l2t) { | 1382 | if (!l2t) { |
1381 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | 1383 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", |
1382 | __func__); | 1384 | __func__); |
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1946 | } | 1948 | } |
1947 | ep->dst = &rt->dst; | 1949 | ep->dst = &rt->dst; |
1948 | 1950 | ||
1951 | rcu_read_lock(); | ||
1949 | neigh = dst_get_neighbour(ep->dst); | 1952 | neigh = dst_get_neighbour(ep->dst); |
1950 | 1953 | ||
1951 | /* get a l2t entry */ | 1954 | /* get a l2t entry */ |
1952 | ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); | 1955 | ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); |
1956 | rcu_read_unlock(); | ||
1953 | if (!ep->l2t) { | 1957 | if (!ep->l2t) { |
1954 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | 1958 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
1955 | err = -ENOMEM; | 1959 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b36cdac9c558..0747004313ad 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, | |||
542 | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); | 542 | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); |
543 | mpa->private_data_size = htons(ep->plen); | 543 | mpa->private_data_size = htons(ep->plen); |
544 | mpa->revision = mpa_rev_to_use; | 544 | mpa->revision = mpa_rev_to_use; |
545 | if (mpa_rev_to_use == 1) | 545 | if (mpa_rev_to_use == 1) { |
546 | ep->tried_with_mpa_v1 = 1; | 546 | ep->tried_with_mpa_v1 = 1; |
547 | ep->retry_with_mpa_v1 = 0; | ||
548 | } | ||
547 | 549 | ||
548 | if (mpa_rev_to_use == 2) { | 550 | if (mpa_rev_to_use == 2) { |
549 | mpa->private_data_size += | 551 | mpa->private_data_size += |
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1594 | goto reject; | 1596 | goto reject; |
1595 | } | 1597 | } |
1596 | dst = &rt->dst; | 1598 | dst = &rt->dst; |
1599 | rcu_read_lock(); | ||
1597 | neigh = dst_get_neighbour(dst); | 1600 | neigh = dst_get_neighbour(dst); |
1598 | if (neigh->dev->flags & IFF_LOOPBACK) { | 1601 | if (neigh->dev->flags & IFF_LOOPBACK) { |
1599 | pdev = ip_dev_find(&init_net, peer_ip); | 1602 | pdev = ip_dev_find(&init_net, peer_ip); |
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1620 | rss_qid = dev->rdev.lldi.rxq_ids[ | 1623 | rss_qid = dev->rdev.lldi.rxq_ids[ |
1621 | cxgb4_port_idx(neigh->dev) * step]; | 1624 | cxgb4_port_idx(neigh->dev) * step]; |
1622 | } | 1625 | } |
1626 | rcu_read_unlock(); | ||
1623 | if (!l2t) { | 1627 | if (!l2t) { |
1624 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | 1628 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", |
1625 | __func__); | 1629 | __func__); |
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) | |||
1820 | } | 1824 | } |
1821 | ep->dst = &rt->dst; | 1825 | ep->dst = &rt->dst; |
1822 | 1826 | ||
1827 | rcu_read_lock(); | ||
1823 | neigh = dst_get_neighbour(ep->dst); | 1828 | neigh = dst_get_neighbour(ep->dst); |
1824 | 1829 | ||
1825 | /* get a l2t entry */ | 1830 | /* get a l2t entry */ |
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) | |||
1856 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | 1861 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ |
1857 | cxgb4_port_idx(neigh->dev) * step]; | 1862 | cxgb4_port_idx(neigh->dev) * step]; |
1858 | } | 1863 | } |
1864 | rcu_read_unlock(); | ||
1859 | if (!ep->l2t) { | 1865 | if (!ep->l2t) { |
1860 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | 1866 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
1861 | err = -ENOMEM; | 1867 | err = -ENOMEM; |
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2301 | } | 2307 | } |
2302 | ep->dst = &rt->dst; | 2308 | ep->dst = &rt->dst; |
2303 | 2309 | ||
2310 | rcu_read_lock(); | ||
2304 | neigh = dst_get_neighbour(ep->dst); | 2311 | neigh = dst_get_neighbour(ep->dst); |
2305 | 2312 | ||
2306 | /* get a l2t entry */ | 2313 | /* get a l2t entry */ |
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2339 | ep->retry_with_mpa_v1 = 0; | 2346 | ep->retry_with_mpa_v1 = 0; |
2340 | ep->tried_with_mpa_v1 = 0; | 2347 | ep->tried_with_mpa_v1 = 0; |
2341 | } | 2348 | } |
2349 | rcu_read_unlock(); | ||
2342 | if (!ep->l2t) { | 2350 | if (!ep->l2t) { |
2343 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | 2351 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
2344 | err = -ENOMEM; | 2352 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index f35a935267e7..0f1607c8325a 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) | |||
311 | while (ptr != cq->sw_pidx) { | 311 | while (ptr != cq->sw_pidx) { |
312 | cqe = &cq->sw_queue[ptr]; | 312 | cqe = &cq->sw_queue[ptr]; |
313 | if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && | 313 | if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && |
314 | (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) | 314 | (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) |
315 | (*count)++; | 315 | (*count)++; |
316 | if (++ptr == cq->size) | 316 | if (++ptr == cq->size) |
317 | ptr = 0; | 317 | ptr = 0; |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index dfce9ea98a39..0a52d72371ee 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1377 | neigh_release(neigh); | 1377 | neigh_release(neigh); |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) | 1380 | if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) { |
1381 | rcu_read_lock(); | ||
1381 | neigh_event_send(dst_get_neighbour(&rt->dst), NULL); | 1382 | neigh_event_send(dst_get_neighbour(&rt->dst), NULL); |
1382 | 1383 | rcu_read_unlock(); | |
1384 | } | ||
1383 | ip_rt_put(rt); | 1385 | ip_rt_put(rt); |
1384 | return rc; | 1386 | return rc; |
1385 | } | 1387 | } |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5bd2162b95dc..1d5895941e19 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2307 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | 2307 | SYM_LSB(IBCCtrlA_0, MaxPktLen); |
2308 | ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ | 2308 | ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ |
2309 | 2309 | ||
2310 | /* initially come up waiting for TS1, without sending anything. */ | ||
2311 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | ||
2312 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
2313 | |||
2314 | ppd->cpspec->ibcctrl_a = val; | ||
2315 | /* | 2310 | /* |
2316 | * Reset the PCS interface to the serdes (and also ibc, which is still | 2311 | * Reset the PCS interface to the serdes (and also ibc, which is still |
2317 | * in reset from above). Writes new value of ibcctrl_a as last step. | 2312 | * in reset from above). Writes new value of ibcctrl_a as last step. |
2318 | */ | 2313 | */ |
2319 | qib_7322_mini_pcs_reset(ppd); | 2314 | qib_7322_mini_pcs_reset(ppd); |
2320 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2321 | /* clear the linkinit cmds */ | ||
2322 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); | ||
2323 | 2315 | ||
2324 | if (!ppd->cpspec->ibcctrl_b) { | 2316 | if (!ppd->cpspec->ibcctrl_b) { |
2325 | unsigned lse = ppd->link_speed_enabled; | 2317 | unsigned lse = ppd->link_speed_enabled; |
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2385 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); | 2377 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); |
2386 | set_vls(ppd); | 2378 | set_vls(ppd); |
2387 | 2379 | ||
2380 | /* initially come up DISABLED, without sending anything. */ | ||
2381 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | ||
2382 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
2383 | qib_write_kreg_port(ppd, krp_ibcctrl_a, val); | ||
2384 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2385 | /* clear the linkinit cmds */ | ||
2386 | ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); | ||
2387 | |||
2388 | /* be paranoid against later code motion, etc. */ | 2388 | /* be paranoid against later code motion, etc. */ |
2389 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | 2389 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); |
2390 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); | 2390 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); |
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |||
5241 | off */ | 5241 | off */ |
5242 | if (ppd->dd->flags & QIB_HAS_QSFP) { | 5242 | if (ppd->dd->flags & QIB_HAS_QSFP) { |
5243 | qd->t_insert = get_jiffies_64(); | 5243 | qd->t_insert = get_jiffies_64(); |
5244 | schedule_work(&qd->work); | 5244 | queue_work(ib_wq, &qd->work); |
5245 | } | 5245 | } |
5246 | spin_lock_irqsave(&ppd->sdma_lock, flags); | 5246 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
5247 | if (__qib_sdma_running(ppd)) | 5247 | if (__qib_sdma_running(ppd)) |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index e06c4ed383f1..fa71b1e666c5 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
480 | udelay(20); /* Generous RST dwell */ | 480 | udelay(20); /* Generous RST dwell */ |
481 | 481 | ||
482 | dd->f_gpio_mod(dd, mask, mask, mask); | 482 | dd->f_gpio_mod(dd, mask, mask, mask); |
483 | /* Spec says module can take up to two seconds! */ | ||
484 | mask = QSFP_GPIO_MOD_PRS_N; | ||
485 | if (qd->ppd->hw_pidx) | ||
486 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
487 | |||
488 | /* Do not try to wait here. Better to let event handle it */ | ||
489 | if (!qib_qsfp_mod_present(qd->ppd)) | ||
490 | goto bail; | ||
491 | /* We see a module, but it may be unwise to look yet. Just schedule */ | ||
492 | qd->t_insert = get_jiffies_64(); | ||
493 | queue_work(ib_wq, &qd->work); | ||
494 | bail: | ||
495 | return; | 483 | return; |
496 | } | 484 | } |
497 | 485 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0ef9af94997d..4115be54ba3b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev, | |||
57 | struct ib_pd *pd, struct ib_ah_attr *attr) | 57 | struct ib_pd *pd, struct ib_ah_attr *attr) |
58 | { | 58 | { |
59 | struct ipoib_ah *ah; | 59 | struct ipoib_ah *ah; |
60 | struct ib_ah *vah; | ||
60 | 61 | ||
61 | ah = kmalloc(sizeof *ah, GFP_KERNEL); | 62 | ah = kmalloc(sizeof *ah, GFP_KERNEL); |
62 | if (!ah) | 63 | if (!ah) |
63 | return NULL; | 64 | return ERR_PTR(-ENOMEM); |
64 | 65 | ||
65 | ah->dev = dev; | 66 | ah->dev = dev; |
66 | ah->last_send = 0; | 67 | ah->last_send = 0; |
67 | kref_init(&ah->ref); | 68 | kref_init(&ah->ref); |
68 | 69 | ||
69 | ah->ah = ib_create_ah(pd, attr); | 70 | vah = ib_create_ah(pd, attr); |
70 | if (IS_ERR(ah->ah)) { | 71 | if (IS_ERR(vah)) { |
71 | kfree(ah); | 72 | kfree(ah); |
72 | ah = NULL; | 73 | ah = (struct ipoib_ah *)vah; |
73 | } else | 74 | } else { |
75 | ah->ah = vah; | ||
74 | ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); | 76 | ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); |
77 | } | ||
75 | 78 | ||
76 | return ah; | 79 | return ah; |
77 | } | 80 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 57ae9b9265e3..d3ed89ca4852 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -432,7 +432,7 @@ static void path_rec_completion(int status, | |||
432 | 432 | ||
433 | spin_lock_irqsave(&priv->lock, flags); | 433 | spin_lock_irqsave(&priv->lock, flags); |
434 | 434 | ||
435 | if (ah) { | 435 | if (!IS_ERR_OR_NULL(ah)) { |
436 | path->pathrec = *pathrec; | 436 | path->pathrec = *pathrec; |
437 | 437 | ||
438 | old_ah = path->ah; | 438 | old_ah = path->ah; |
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev, | |||
555 | return 0; | 555 | return 0; |
556 | } | 556 | } |
557 | 557 | ||
558 | /* called with rcu_read_lock */ | ||
558 | static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | 559 | static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) |
559 | { | 560 | { |
560 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 561 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
@@ -636,6 +637,7 @@ err_drop: | |||
636 | spin_unlock_irqrestore(&priv->lock, flags); | 637 | spin_unlock_irqrestore(&priv->lock, flags); |
637 | } | 638 | } |
638 | 639 | ||
640 | /* called with rcu_read_lock */ | ||
639 | static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) | 641 | static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) |
640 | { | 642 | { |
641 | struct ipoib_dev_priv *priv = netdev_priv(skb->dev); | 643 | struct ipoib_dev_priv *priv = netdev_priv(skb->dev); |
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
720 | struct neighbour *n = NULL; | 722 | struct neighbour *n = NULL; |
721 | unsigned long flags; | 723 | unsigned long flags; |
722 | 724 | ||
725 | rcu_read_lock(); | ||
723 | if (likely(skb_dst(skb))) | 726 | if (likely(skb_dst(skb))) |
724 | n = dst_get_neighbour(skb_dst(skb)); | 727 | n = dst_get_neighbour(skb_dst(skb)); |
725 | 728 | ||
726 | if (likely(n)) { | 729 | if (likely(n)) { |
727 | if (unlikely(!*to_ipoib_neigh(n))) { | 730 | if (unlikely(!*to_ipoib_neigh(n))) { |
728 | ipoib_path_lookup(skb, dev); | 731 | ipoib_path_lookup(skb, dev); |
729 | return NETDEV_TX_OK; | 732 | goto unlock; |
730 | } | 733 | } |
731 | 734 | ||
732 | neigh = *to_ipoib_neigh(n); | 735 | neigh = *to_ipoib_neigh(n); |
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
749 | ipoib_neigh_free(dev, neigh); | 752 | ipoib_neigh_free(dev, neigh); |
750 | spin_unlock_irqrestore(&priv->lock, flags); | 753 | spin_unlock_irqrestore(&priv->lock, flags); |
751 | ipoib_path_lookup(skb, dev); | 754 | ipoib_path_lookup(skb, dev); |
752 | return NETDEV_TX_OK; | 755 | goto unlock; |
753 | } | 756 | } |
754 | 757 | ||
755 | if (ipoib_cm_get(neigh)) { | 758 | if (ipoib_cm_get(neigh)) { |
756 | if (ipoib_cm_up(neigh)) { | 759 | if (ipoib_cm_up(neigh)) { |
757 | ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); | 760 | ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); |
758 | return NETDEV_TX_OK; | 761 | goto unlock; |
759 | } | 762 | } |
760 | } else if (neigh->ah) { | 763 | } else if (neigh->ah) { |
761 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); | 764 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); |
762 | return NETDEV_TX_OK; | 765 | goto unlock; |
763 | } | 766 | } |
764 | 767 | ||
765 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 768 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
793 | phdr->hwaddr + 4); | 796 | phdr->hwaddr + 4); |
794 | dev_kfree_skb_any(skb); | 797 | dev_kfree_skb_any(skb); |
795 | ++dev->stats.tx_dropped; | 798 | ++dev->stats.tx_dropped; |
796 | return NETDEV_TX_OK; | 799 | goto unlock; |
797 | } | 800 | } |
798 | 801 | ||
799 | unicast_arp_send(skb, dev, phdr); | 802 | unicast_arp_send(skb, dev, phdr); |
800 | } | 803 | } |
801 | } | 804 | } |
802 | 805 | unlock: | |
806 | rcu_read_unlock(); | ||
803 | return NETDEV_TX_OK; | 807 | return NETDEV_TX_OK; |
804 | } | 808 | } |
805 | 809 | ||
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
837 | dst = skb_dst(skb); | 841 | dst = skb_dst(skb); |
838 | n = NULL; | 842 | n = NULL; |
839 | if (dst) | 843 | if (dst) |
840 | n = dst_get_neighbour(dst); | 844 | n = dst_get_neighbour_raw(dst); |
841 | if ((!dst || !n) && daddr) { | 845 | if ((!dst || !n) && daddr) { |
842 | struct ipoib_pseudoheader *phdr = | 846 | struct ipoib_pseudoheader *phdr = |
843 | (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); | 847 | (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 1b7a97686356..873bff97e69e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
240 | av.grh.dgid = mcast->mcmember.mgid; | 240 | av.grh.dgid = mcast->mcmember.mgid; |
241 | 241 | ||
242 | ah = ipoib_create_ah(dev, priv->pd, &av); | 242 | ah = ipoib_create_ah(dev, priv->pd, &av); |
243 | if (!ah) { | 243 | if (IS_ERR(ah)) { |
244 | ipoib_warn(priv, "ib_address_create failed\n"); | 244 | ipoib_warn(priv, "ib_address_create failed %ld\n", |
245 | -PTR_ERR(ah)); | ||
246 | /* use original error */ | ||
247 | return PTR_ERR(ah); | ||
245 | } else { | 248 | } else { |
246 | spin_lock_irq(&priv->lock); | 249 | spin_lock_irq(&priv->lock); |
247 | mcast->ah = ah; | 250 | mcast->ah = ah; |
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
266 | 269 | ||
267 | skb->dev = dev; | 270 | skb->dev = dev; |
268 | if (dst) | 271 | if (dst) |
269 | n = dst_get_neighbour(dst); | 272 | n = dst_get_neighbour_raw(dst); |
270 | if (!dst || !n) { | 273 | if (!dst || !n) { |
271 | /* put pseudoheader back on for next time */ | 274 | /* put pseudoheader back on for next time */ |
272 | skb_push(skb, sizeof (struct ipoib_pseudoheader)); | 275 | skb_push(skb, sizeof (struct ipoib_pseudoheader)); |
@@ -722,6 +725,8 @@ out: | |||
722 | if (mcast && mcast->ah) { | 725 | if (mcast && mcast->ah) { |
723 | struct dst_entry *dst = skb_dst(skb); | 726 | struct dst_entry *dst = skb_dst(skb); |
724 | struct neighbour *n = NULL; | 727 | struct neighbour *n = NULL; |
728 | |||
729 | rcu_read_lock(); | ||
725 | if (dst) | 730 | if (dst) |
726 | n = dst_get_neighbour(dst); | 731 | n = dst_get_neighbour(dst); |
727 | if (n && !*to_ipoib_neigh(n)) { | 732 | if (n && !*to_ipoib_neigh(n)) { |
@@ -734,7 +739,7 @@ out: | |||
734 | list_add_tail(&neigh->list, &mcast->neigh_list); | 739 | list_add_tail(&neigh->list, &mcast->neigh_list); |
735 | } | 740 | } |
736 | } | 741 | } |
737 | 742 | rcu_read_unlock(); | |
738 | spin_unlock_irqrestore(&priv->lock, flags); | 743 | spin_unlock_irqrestore(&priv->lock, flags); |
739 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); | 744 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); |
740 | return; | 745 | return; |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 09b93b11a274..e2a9867c19d5 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse) | |||
1210 | */ | 1210 | */ |
1211 | static int elantech_set_properties(struct elantech_data *etd) | 1211 | static int elantech_set_properties(struct elantech_data *etd) |
1212 | { | 1212 | { |
1213 | /* This represents the version of IC body. */ | ||
1213 | int ver = (etd->fw_version & 0x0f0000) >> 16; | 1214 | int ver = (etd->fw_version & 0x0f0000) >> 16; |
1214 | 1215 | ||
1216 | /* Early version of Elan touchpads doesn't obey the rule. */ | ||
1215 | if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600) | 1217 | if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600) |
1216 | etd->hw_version = 1; | 1218 | etd->hw_version = 1; |
1217 | else if (etd->fw_version < 0x150600) | 1219 | else { |
1218 | etd->hw_version = 2; | 1220 | switch (ver) { |
1219 | else if (ver == 5) | 1221 | case 2: |
1220 | etd->hw_version = 3; | 1222 | case 4: |
1221 | else if (ver == 6) | 1223 | etd->hw_version = 2; |
1222 | etd->hw_version = 4; | 1224 | break; |
1223 | else | 1225 | case 5: |
1224 | return -1; | 1226 | etd->hw_version = 3; |
1227 | break; | ||
1228 | case 6: | ||
1229 | etd->hw_version = 4; | ||
1230 | break; | ||
1231 | default: | ||
1232 | return -1; | ||
1233 | } | ||
1234 | } | ||
1225 | 1235 | ||
1226 | /* | 1236 | /* |
1227 | * Turn on packet checking by default. | 1237 | * Turn on packet checking by default. |
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c index 4b2a42f9f0bb..d4d08bd9205b 100644 --- a/drivers/input/serio/ams_delta_serio.c +++ b/drivers/input/serio/ams_delta_serio.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/serio.h> | 25 | #include <linux/serio.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/module.h> | ||
27 | 28 | ||
28 | #include <asm/mach-types.h> | 29 | #include <asm/mach-types.h> |
29 | #include <plat/board-ams-delta.h> | 30 | #include <plat/board-ams-delta.h> |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index bb9f5d31f0d0..b4cfc6c8be89 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | |||
431 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), | 431 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), |
432 | }, | 432 | }, |
433 | }, | 433 | }, |
434 | { | ||
435 | /* Newer HP Pavilion dv4 models */ | ||
436 | .matches = { | ||
437 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
438 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), | ||
439 | }, | ||
440 | }, | ||
434 | { } | 441 | { } |
435 | }; | 442 | }; |
436 | 443 | ||
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { | |||
560 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), | 567 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), |
561 | }, | 568 | }, |
562 | }, | 569 | }, |
570 | { | ||
571 | /* Newer HP Pavilion dv4 models */ | ||
572 | .matches = { | ||
573 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
574 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), | ||
575 | }, | ||
576 | }, | ||
563 | { } | 577 | { } |
564 | }; | 578 | }; |
565 | 579 | ||
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 33ec9e467772..9021182c4b76 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c | |||
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg) | |||
242 | case IIOCDOCFINT: | 242 | case IIOCDOCFINT: |
243 | if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) | 243 | if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) |
244 | return (-EINVAL); /* invalid driver */ | 244 | return (-EINVAL); /* invalid driver */ |
245 | if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) == | ||
246 | sizeof(dioctl.cf_ctrl.msn)) | ||
247 | return -EINVAL; | ||
248 | if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) == | ||
249 | sizeof(dioctl.cf_ctrl.fwd_nr)) | ||
250 | return -EINVAL; | ||
245 | if ((i = cf_command(dioctl.cf_ctrl.drvid, | 251 | if ((i = cf_command(dioctl.cf_ctrl.drvid, |
246 | (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, | 252 | (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, |
247 | dioctl.cf_ctrl.cfproc, | 253 | dioctl.cf_ctrl.cfproc, |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 1f73d7f7e024..2339d7396b9e 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) | |||
2756 | char *c, | 2756 | char *c, |
2757 | *e; | 2757 | *e; |
2758 | 2758 | ||
2759 | if (strnlen(cfg->drvid, sizeof(cfg->drvid)) == | ||
2760 | sizeof(cfg->drvid)) | ||
2761 | return -EINVAL; | ||
2759 | drvidx = -1; | 2762 | drvidx = -1; |
2760 | chidx = -1; | 2763 | chidx = -1; |
2761 | strcpy(drvid, cfg->drvid); | 2764 | strcpy(drvid, cfg->drvid); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index d593878d66d0..5664696f2d3a 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -472,7 +472,7 @@ config BMP085 | |||
472 | module will be called bmp085. | 472 | module will be called bmp085. |
473 | 473 | ||
474 | config PCH_PHUB | 474 | config PCH_PHUB |
475 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB" | 475 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" |
476 | depends on PCI | 476 | depends on PCI |
477 | help | 477 | help |
478 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of | 478 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of |
@@ -480,12 +480,13 @@ config PCH_PHUB | |||
480 | processor. The Topcliff has MAC address and Option ROM data in SROM. | 480 | processor. The Topcliff has MAC address and Option ROM data in SROM. |
481 | This driver can access MAC address and Option ROM data in SROM. | 481 | This driver can access MAC address and Option ROM data in SROM. |
482 | 482 | ||
483 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | 483 | This driver also can be used for LAPIS Semiconductor's IOH, |
484 | Output Hub), ML7213 and ML7223. | 484 | ML7213/ML7223/ML7831. |
485 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is | 485 | ML7213 which is for IVI(In-Vehicle Infotainment) use. |
486 | for MP(Media Phone) use. | 486 | ML7223 IOH is for MP(Media Phone) use. |
487 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. | 487 | ML7831 IOH is for general purpose use. |
488 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. | 488 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
489 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. | ||
489 | 490 | ||
490 | To compile this driver as a module, choose M here: the module will | 491 | To compile this driver as a module, choose M here: the module will |
491 | be called pch_phub. | 492 | be called pch_phub. |
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h index a662f5987b68..82b2cb77ae19 100644 --- a/drivers/misc/ad525x_dpot.h +++ b/drivers/misc/ad525x_dpot.h | |||
@@ -100,7 +100,7 @@ enum dpot_devid { | |||
100 | AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), | 100 | AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), |
101 | AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, | 101 | AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, |
102 | BRDAC0, 7, 28), | 102 | BRDAC0, 7, 28), |
103 | AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, | 103 | AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, |
104 | BRDAC0, 8, 29), | 104 | BRDAC0, 8, 29), |
105 | AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, | 105 | AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, |
106 | BRDAC0 | BRDAC1, 8, 30), | 106 | BRDAC0 | BRDAC1, 8, 30), |
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index dee33addcaeb..10fc4785dba7 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. | 2 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -41,10 +41,10 @@ | |||
41 | #define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset | 41 | #define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset |
42 | (Intel EG20T PCH)*/ | 42 | (Intel EG20T PCH)*/ |
43 | #define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address | 43 | #define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address |
44 | offset(OKI SEMICONDUCTOR ML7213) | 44 | offset(LAPIS Semicon ML7213) |
45 | */ | 45 | */ |
46 | #define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address | 46 | #define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address |
47 | offset(OKI SEMICONDUCTOR ML7223) | 47 | offset(LAPIS Semicon ML7223) |
48 | */ | 48 | */ |
49 | 49 | ||
50 | /* MAX number of INT_REDUCE_CONTROL registers */ | 50 | /* MAX number of INT_REDUCE_CONTROL registers */ |
@@ -73,6 +73,9 @@ | |||
73 | #define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ | 73 | #define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ |
74 | #define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ | 74 | #define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ |
75 | 75 | ||
76 | /* Macros for ML7831 */ | ||
77 | #define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801 | ||
78 | |||
76 | /* SROM ACCESS Macro */ | 79 | /* SROM ACCESS Macro */ |
77 | #define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) | 80 | #define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) |
78 | 81 | ||
@@ -115,6 +118,7 @@ | |||
115 | * @pch_mac_start_address: MAC address area start address | 118 | * @pch_mac_start_address: MAC address area start address |
116 | * @pch_opt_rom_start_address: Option ROM start address | 119 | * @pch_opt_rom_start_address: Option ROM start address |
117 | * @ioh_type: Save IOH type | 120 | * @ioh_type: Save IOH type |
121 | * @pdev: pointer to pci device struct | ||
118 | */ | 122 | */ |
119 | struct pch_phub_reg { | 123 | struct pch_phub_reg { |
120 | u32 phub_id_reg; | 124 | u32 phub_id_reg; |
@@ -136,6 +140,7 @@ struct pch_phub_reg { | |||
136 | u32 pch_mac_start_address; | 140 | u32 pch_mac_start_address; |
137 | u32 pch_opt_rom_start_address; | 141 | u32 pch_opt_rom_start_address; |
138 | int ioh_type; | 142 | int ioh_type; |
143 | struct pci_dev *pdev; | ||
139 | }; | 144 | }; |
140 | 145 | ||
141 | /* SROM SPEC for MAC address assignment offset */ | 146 | /* SROM SPEC for MAC address assignment offset */ |
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) | |||
471 | int retval; | 476 | int retval; |
472 | int i; | 477 | int i; |
473 | 478 | ||
474 | if (chip->ioh_type == 1) /* EG20T */ | 479 | if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/ |
475 | retval = pch_phub_gbe_serial_rom_conf(chip); | 480 | retval = pch_phub_gbe_serial_rom_conf(chip); |
476 | else /* ML7223 */ | 481 | else /* ML7223 */ |
477 | retval = pch_phub_gbe_serial_rom_conf_mp(chip); | 482 | retval = pch_phub_gbe_serial_rom_conf_mp(chip); |
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, | |||
498 | unsigned int orom_size; | 503 | unsigned int orom_size; |
499 | int ret; | 504 | int ret; |
500 | int err; | 505 | int err; |
506 | ssize_t rom_size; | ||
501 | 507 | ||
502 | struct pch_phub_reg *chip = | 508 | struct pch_phub_reg *chip = |
503 | dev_get_drvdata(container_of(kobj, struct device, kobj)); | 509 | dev_get_drvdata(container_of(kobj, struct device, kobj)); |
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, | |||
509 | } | 515 | } |
510 | 516 | ||
511 | /* Get Rom signature */ | 517 | /* Get Rom signature */ |
518 | chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); | ||
519 | if (!chip->pch_phub_extrom_base_address) | ||
520 | goto exrom_map_err; | ||
521 | |||
512 | pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, | 522 | pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, |
513 | (unsigned char *)&rom_signature); | 523 | (unsigned char *)&rom_signature); |
514 | rom_signature &= 0xff; | 524 | rom_signature &= 0xff; |
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, | |||
539 | goto return_err; | 549 | goto return_err; |
540 | } | 550 | } |
541 | return_ok: | 551 | return_ok: |
552 | pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); | ||
542 | mutex_unlock(&pch_phub_mutex); | 553 | mutex_unlock(&pch_phub_mutex); |
543 | return addr_offset; | 554 | return addr_offset; |
544 | 555 | ||
545 | return_err: | 556 | return_err: |
557 | pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); | ||
558 | exrom_map_err: | ||
546 | mutex_unlock(&pch_phub_mutex); | 559 | mutex_unlock(&pch_phub_mutex); |
547 | return_err_nomutex: | 560 | return_err_nomutex: |
548 | return err; | 561 | return err; |
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, | |||
555 | int err; | 568 | int err; |
556 | unsigned int addr_offset; | 569 | unsigned int addr_offset; |
557 | int ret; | 570 | int ret; |
571 | ssize_t rom_size; | ||
558 | struct pch_phub_reg *chip = | 572 | struct pch_phub_reg *chip = |
559 | dev_get_drvdata(container_of(kobj, struct device, kobj)); | 573 | dev_get_drvdata(container_of(kobj, struct device, kobj)); |
560 | 574 | ||
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, | |||
571 | goto return_ok; | 585 | goto return_ok; |
572 | } | 586 | } |
573 | 587 | ||
588 | chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); | ||
589 | if (!chip->pch_phub_extrom_base_address) { | ||
590 | err = -ENOMEM; | ||
591 | goto exrom_map_err; | ||
592 | } | ||
593 | |||
574 | for (addr_offset = 0; addr_offset < count; addr_offset++) { | 594 | for (addr_offset = 0; addr_offset < count; addr_offset++) { |
575 | if (PCH_PHUB_OROM_SIZE < off + addr_offset) | 595 | if (PCH_PHUB_OROM_SIZE < off + addr_offset) |
576 | goto return_ok; | 596 | goto return_ok; |
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, | |||
585 | } | 605 | } |
586 | 606 | ||
587 | return_ok: | 607 | return_ok: |
608 | pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); | ||
588 | mutex_unlock(&pch_phub_mutex); | 609 | mutex_unlock(&pch_phub_mutex); |
589 | return addr_offset; | 610 | return addr_offset; |
590 | 611 | ||
591 | return_err: | 612 | return_err: |
613 | pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); | ||
614 | |||
615 | exrom_map_err: | ||
592 | mutex_unlock(&pch_phub_mutex); | 616 | mutex_unlock(&pch_phub_mutex); |
593 | return err; | 617 | return err; |
594 | } | 618 | } |
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr, | |||
598 | { | 622 | { |
599 | u8 mac[8]; | 623 | u8 mac[8]; |
600 | struct pch_phub_reg *chip = dev_get_drvdata(dev); | 624 | struct pch_phub_reg *chip = dev_get_drvdata(dev); |
625 | ssize_t rom_size; | ||
626 | |||
627 | chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); | ||
628 | if (!chip->pch_phub_extrom_base_address) | ||
629 | return -ENOMEM; | ||
601 | 630 | ||
602 | pch_phub_read_gbe_mac_addr(chip, mac); | 631 | pch_phub_read_gbe_mac_addr(chip, mac); |
632 | pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); | ||
603 | 633 | ||
604 | return sprintf(buf, "%pM\n", mac); | 634 | return sprintf(buf, "%pM\n", mac); |
605 | } | 635 | } |
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, | |||
608 | const char *buf, size_t count) | 638 | const char *buf, size_t count) |
609 | { | 639 | { |
610 | u8 mac[6]; | 640 | u8 mac[6]; |
641 | ssize_t rom_size; | ||
611 | struct pch_phub_reg *chip = dev_get_drvdata(dev); | 642 | struct pch_phub_reg *chip = dev_get_drvdata(dev); |
612 | 643 | ||
613 | if (count != 18) | 644 | if (count != 18) |
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, | |||
617 | (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], | 648 | (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], |
618 | (u32 *)&mac[4], (u32 *)&mac[5]); | 649 | (u32 *)&mac[4], (u32 *)&mac[5]); |
619 | 650 | ||
651 | chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); | ||
652 | if (!chip->pch_phub_extrom_base_address) | ||
653 | return -ENOMEM; | ||
654 | |||
620 | pch_phub_write_gbe_mac_addr(chip, mac); | 655 | pch_phub_write_gbe_mac_addr(chip, mac); |
656 | pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); | ||
621 | 657 | ||
622 | return count; | 658 | return count; |
623 | } | 659 | } |
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, | |||
640 | int retval; | 676 | int retval; |
641 | 677 | ||
642 | int ret; | 678 | int ret; |
643 | ssize_t rom_size; | ||
644 | struct pch_phub_reg *chip; | 679 | struct pch_phub_reg *chip; |
645 | 680 | ||
646 | chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); | 681 | chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); |
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, | |||
677 | "in pch_phub_base_address variable is %p\n", __func__, | 712 | "in pch_phub_base_address variable is %p\n", __func__, |
678 | chip->pch_phub_base_address); | 713 | chip->pch_phub_base_address); |
679 | 714 | ||
680 | if (id->driver_data != 3) { | 715 | chip->pdev = pdev; /* Save pci device struct */ |
681 | chip->pch_phub_extrom_base_address =\ | ||
682 | pci_map_rom(pdev, &rom_size); | ||
683 | if (chip->pch_phub_extrom_base_address == 0) { | ||
684 | dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__); | ||
685 | ret = -ENOMEM; | ||
686 | goto err_pci_map; | ||
687 | } | ||
688 | dev_dbg(&pdev->dev, "%s : " | ||
689 | "pci_map_rom SUCCESS and value in " | ||
690 | "pch_phub_extrom_base_address variable is %p\n", | ||
691 | __func__, chip->pch_phub_extrom_base_address); | ||
692 | } | ||
693 | 716 | ||
694 | if (id->driver_data == 1) { /* EG20T PCH */ | 717 | if (id->driver_data == 1) { /* EG20T PCH */ |
695 | const char *board_name; | 718 | const char *board_name; |
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, | |||
763 | chip->pch_opt_rom_start_address =\ | 786 | chip->pch_opt_rom_start_address =\ |
764 | PCH_PHUB_ROM_START_ADDR_ML7223; | 787 | PCH_PHUB_ROM_START_ADDR_ML7223; |
765 | chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; | 788 | chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; |
789 | } else if (id->driver_data == 5) { /* ML7831 */ | ||
790 | retval = sysfs_create_file(&pdev->dev.kobj, | ||
791 | &dev_attr_pch_mac.attr); | ||
792 | if (retval) | ||
793 | goto err_sysfs_create; | ||
794 | |||
795 | retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); | ||
796 | if (retval) | ||
797 | goto exit_bin_attr; | ||
798 | |||
799 | /* set the prefech value */ | ||
800 | iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); | ||
801 | /* set the interrupt delay value */ | ||
802 | iowrite32(0x25, chip->pch_phub_base_address + 0x44); | ||
803 | chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; | ||
804 | chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; | ||
766 | } | 805 | } |
767 | 806 | ||
768 | chip->ioh_type = id->driver_data; | 807 | chip->ioh_type = id->driver_data; |
@@ -773,8 +812,6 @@ exit_bin_attr: | |||
773 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); | 812 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); |
774 | 813 | ||
775 | err_sysfs_create: | 814 | err_sysfs_create: |
776 | pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); | ||
777 | err_pci_map: | ||
778 | pci_iounmap(pdev, chip->pch_phub_base_address); | 815 | pci_iounmap(pdev, chip->pch_phub_base_address); |
779 | err_pci_iomap: | 816 | err_pci_iomap: |
780 | pci_release_regions(pdev); | 817 | pci_release_regions(pdev); |
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev) | |||
792 | 829 | ||
793 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); | 830 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); |
794 | sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); | 831 | sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); |
795 | pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); | ||
796 | pci_iounmap(pdev, chip->pch_phub_base_address); | 832 | pci_iounmap(pdev, chip->pch_phub_base_address); |
797 | pci_release_regions(pdev); | 833 | pci_release_regions(pdev); |
798 | pci_disable_device(pdev); | 834 | pci_disable_device(pdev); |
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = { | |||
847 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, | 883 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, |
848 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, | 884 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, |
849 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, | 885 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, |
886 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, }, | ||
850 | { } | 887 | { } |
851 | }; | 888 | }; |
852 | MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); | 889 | MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); |
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void) | |||
873 | module_init(pch_phub_pci_init); | 910 | module_init(pch_phub_pci_init); |
874 | module_exit(pch_phub_pci_exit); | 911 | module_exit(pch_phub_pci_exit); |
875 | 912 | ||
876 | MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"); | 913 | MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB"); |
877 | MODULE_LICENSE("GPL"); | 914 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c index cfbddbef11de..43d073bc1d9c 100644 --- a/drivers/misc/spear13xx_pcie_gadget.c +++ b/drivers/misc/spear13xx_pcie_gadget.c | |||
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void) | |||
903 | } | 903 | } |
904 | module_exit(spear_pcie_gadget_exit); | 904 | module_exit(spear_pcie_gadget_exit); |
905 | 905 | ||
906 | MODULE_ALIAS("pcie-gadget-spear"); | 906 | MODULE_ALIAS("platform:pcie-gadget-spear"); |
907 | MODULE_AUTHOR("Pratyush Anand"); | 907 | MODULE_AUTHOR("Pratyush Anand"); |
908 | MODULE_LICENSE("GPL"); | 908 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 25a44d94be17..3216c514fdc8 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2554,30 +2554,6 @@ re_arm: | |||
2554 | } | 2554 | } |
2555 | } | 2555 | } |
2556 | 2556 | ||
2557 | static __be32 bond_glean_dev_ip(struct net_device *dev) | ||
2558 | { | ||
2559 | struct in_device *idev; | ||
2560 | struct in_ifaddr *ifa; | ||
2561 | __be32 addr = 0; | ||
2562 | |||
2563 | if (!dev) | ||
2564 | return 0; | ||
2565 | |||
2566 | rcu_read_lock(); | ||
2567 | idev = __in_dev_get_rcu(dev); | ||
2568 | if (!idev) | ||
2569 | goto out; | ||
2570 | |||
2571 | ifa = idev->ifa_list; | ||
2572 | if (!ifa) | ||
2573 | goto out; | ||
2574 | |||
2575 | addr = ifa->ifa_local; | ||
2576 | out: | ||
2577 | rcu_read_unlock(); | ||
2578 | return addr; | ||
2579 | } | ||
2580 | |||
2581 | static int bond_has_this_ip(struct bonding *bond, __be32 ip) | 2557 | static int bond_has_this_ip(struct bonding *bond, __be32 ip) |
2582 | { | 2558 | { |
2583 | struct vlan_entry *vlan; | 2559 | struct vlan_entry *vlan; |
@@ -3323,6 +3299,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
3323 | struct bonding *bond; | 3299 | struct bonding *bond; |
3324 | struct vlan_entry *vlan; | 3300 | struct vlan_entry *vlan; |
3325 | 3301 | ||
3302 | /* we only care about primary address */ | ||
3303 | if(ifa->ifa_flags & IFA_F_SECONDARY) | ||
3304 | return NOTIFY_DONE; | ||
3305 | |||
3326 | list_for_each_entry(bond, &bn->dev_list, bond_list) { | 3306 | list_for_each_entry(bond, &bn->dev_list, bond_list) { |
3327 | if (bond->dev == event_dev) { | 3307 | if (bond->dev == event_dev) { |
3328 | switch (event) { | 3308 | switch (event) { |
@@ -3330,7 +3310,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
3330 | bond->master_ip = ifa->ifa_local; | 3310 | bond->master_ip = ifa->ifa_local; |
3331 | return NOTIFY_OK; | 3311 | return NOTIFY_OK; |
3332 | case NETDEV_DOWN: | 3312 | case NETDEV_DOWN: |
3333 | bond->master_ip = bond_glean_dev_ip(bond->dev); | 3313 | bond->master_ip = 0; |
3334 | return NOTIFY_OK; | 3314 | return NOTIFY_OK; |
3335 | default: | 3315 | default: |
3336 | return NOTIFY_DONE; | 3316 | return NOTIFY_DONE; |
@@ -3346,8 +3326,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
3346 | vlan->vlan_ip = ifa->ifa_local; | 3326 | vlan->vlan_ip = ifa->ifa_local; |
3347 | return NOTIFY_OK; | 3327 | return NOTIFY_OK; |
3348 | case NETDEV_DOWN: | 3328 | case NETDEV_DOWN: |
3349 | vlan->vlan_ip = | 3329 | vlan->vlan_ip = 0; |
3350 | bond_glean_dev_ip(vlan_dev); | ||
3351 | return NOTIFY_OK; | 3330 | return NOTIFY_OK; |
3352 | default: | 3331 | default: |
3353 | return NOTIFY_DONE; | 3332 | return NOTIFY_DONE; |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 26be1dfc1577..f801754c71a7 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -614,7 +614,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) | |||
614 | 614 | ||
615 | if (!dm->wake_state) | 615 | if (!dm->wake_state) |
616 | irq_set_irq_wake(dm->irq_wake, 1); | 616 | irq_set_irq_wake(dm->irq_wake, 1); |
617 | else if (dm->wake_state & !opts) | 617 | else if (dm->wake_state && !opts) |
618 | irq_set_irq_wake(dm->irq_wake, 0); | 618 | irq_set_irq_wake(dm->irq_wake, 0); |
619 | } | 619 | } |
620 | 620 | ||
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index c520cfd3b298..5272f9d4dda9 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig | |||
@@ -24,6 +24,7 @@ config FEC | |||
24 | bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" | 24 | bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" |
25 | depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ | 25 | depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ |
26 | ARCH_MXC || ARCH_MXS) | 26 | ARCH_MXC || ARCH_MXS) |
27 | default ARCH_MXC || ARCH_MXS if ARM | ||
27 | select PHYLIB | 28 | select PHYLIB |
28 | ---help--- | 29 | ---help--- |
29 | Say Y here if you want to use the built-in 10/100 Fast ethernet | 30 | Say Y here if you want to use the built-in 10/100 Fast ethernet |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 5c0b531949e2..27d651a80f3f 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme) | |||
1745 | } | 1745 | } |
1746 | 1746 | ||
1747 | static int | 1747 | static int |
1748 | jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) | ||
1749 | { | ||
1750 | u32 phy_addr; | ||
1751 | |||
1752 | phy_addr = JM_PHY_SPEC_REG_READ | specreg; | ||
1753 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, | ||
1754 | phy_addr); | ||
1755 | return jme_mdio_read(jme->dev, jme->mii_if.phy_id, | ||
1756 | JM_PHY_SPEC_DATA_REG); | ||
1757 | } | ||
1758 | |||
1759 | static void | ||
1760 | jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) | ||
1761 | { | ||
1762 | u32 phy_addr; | ||
1763 | |||
1764 | phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; | ||
1765 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, | ||
1766 | phy_data); | ||
1767 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, | ||
1768 | phy_addr); | ||
1769 | } | ||
1770 | |||
1771 | static int | ||
1772 | jme_phy_calibration(struct jme_adapter *jme) | ||
1773 | { | ||
1774 | u32 ctrl1000, phy_data; | ||
1775 | |||
1776 | jme_phy_off(jme); | ||
1777 | jme_phy_on(jme); | ||
1778 | /* Enabel PHY test mode 1 */ | ||
1779 | ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); | ||
1780 | ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; | ||
1781 | ctrl1000 |= PHY_GAD_TEST_MODE_1; | ||
1782 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); | ||
1783 | |||
1784 | phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); | ||
1785 | phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; | ||
1786 | phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | | ||
1787 | JM_PHY_EXT_COMM_2_CALI_ENABLE; | ||
1788 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); | ||
1789 | msleep(20); | ||
1790 | phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); | ||
1791 | phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | | ||
1792 | JM_PHY_EXT_COMM_2_CALI_MODE_0 | | ||
1793 | JM_PHY_EXT_COMM_2_CALI_LATCH); | ||
1794 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); | ||
1795 | |||
1796 | /* Disable PHY test mode */ | ||
1797 | ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); | ||
1798 | ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; | ||
1799 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); | ||
1800 | return 0; | ||
1801 | } | ||
1802 | |||
1803 | static int | ||
1804 | jme_phy_setEA(struct jme_adapter *jme) | ||
1805 | { | ||
1806 | u32 phy_comm0 = 0, phy_comm1 = 0; | ||
1807 | u8 nic_ctrl; | ||
1808 | |||
1809 | pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); | ||
1810 | if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) | ||
1811 | return 0; | ||
1812 | |||
1813 | switch (jme->pdev->device) { | ||
1814 | case PCI_DEVICE_ID_JMICRON_JMC250: | ||
1815 | if (((jme->chip_main_rev == 5) && | ||
1816 | ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || | ||
1817 | (jme->chip_sub_rev == 3))) || | ||
1818 | (jme->chip_main_rev >= 6)) { | ||
1819 | phy_comm0 = 0x008A; | ||
1820 | phy_comm1 = 0x4109; | ||
1821 | } | ||
1822 | if ((jme->chip_main_rev == 3) && | ||
1823 | ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) | ||
1824 | phy_comm0 = 0xE088; | ||
1825 | break; | ||
1826 | case PCI_DEVICE_ID_JMICRON_JMC260: | ||
1827 | if (((jme->chip_main_rev == 5) && | ||
1828 | ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || | ||
1829 | (jme->chip_sub_rev == 3))) || | ||
1830 | (jme->chip_main_rev >= 6)) { | ||
1831 | phy_comm0 = 0x008A; | ||
1832 | phy_comm1 = 0x4109; | ||
1833 | } | ||
1834 | if ((jme->chip_main_rev == 3) && | ||
1835 | ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) | ||
1836 | phy_comm0 = 0xE088; | ||
1837 | if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) | ||
1838 | phy_comm0 = 0x608A; | ||
1839 | if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) | ||
1840 | phy_comm0 = 0x408A; | ||
1841 | break; | ||
1842 | default: | ||
1843 | return -ENODEV; | ||
1844 | } | ||
1845 | if (phy_comm0) | ||
1846 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); | ||
1847 | if (phy_comm1) | ||
1848 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); | ||
1849 | |||
1850 | return 0; | ||
1851 | } | ||
1852 | |||
1853 | static int | ||
1748 | jme_open(struct net_device *netdev) | 1854 | jme_open(struct net_device *netdev) |
1749 | { | 1855 | { |
1750 | struct jme_adapter *jme = netdev_priv(netdev); | 1856 | struct jme_adapter *jme = netdev_priv(netdev); |
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev) | |||
1769 | jme_set_settings(netdev, &jme->old_ecmd); | 1875 | jme_set_settings(netdev, &jme->old_ecmd); |
1770 | else | 1876 | else |
1771 | jme_reset_phy_processor(jme); | 1877 | jme_reset_phy_processor(jme); |
1772 | 1878 | jme_phy_calibration(jme); | |
1879 | jme_phy_setEA(jme); | ||
1773 | jme_reset_link(jme); | 1880 | jme_reset_link(jme); |
1774 | 1881 | ||
1775 | return 0; | 1882 | return 0; |
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev) | |||
3184 | jme_set_settings(netdev, &jme->old_ecmd); | 3291 | jme_set_settings(netdev, &jme->old_ecmd); |
3185 | else | 3292 | else |
3186 | jme_reset_phy_processor(jme); | 3293 | jme_reset_phy_processor(jme); |
3187 | 3294 | jme_phy_calibration(jme); | |
3295 | jme_phy_setEA(jme); | ||
3188 | jme_start_irq(jme); | 3296 | jme_start_irq(jme); |
3189 | netif_device_attach(netdev); | 3297 | netif_device_attach(netdev); |
3190 | 3298 | ||
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); | |||
3239 | MODULE_LICENSE("GPL"); | 3347 | MODULE_LICENSE("GPL"); |
3240 | MODULE_VERSION(DRV_VERSION); | 3348 | MODULE_VERSION(DRV_VERSION); |
3241 | MODULE_DEVICE_TABLE(pci, jme_pci_tbl); | 3349 | MODULE_DEVICE_TABLE(pci, jme_pci_tbl); |
3242 | |||
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 02ea27c1dcb5..4304072bd3c5 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h | |||
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits { | |||
760 | RXMCS_CHECKSUM, | 760 | RXMCS_CHECKSUM, |
761 | }; | 761 | }; |
762 | 762 | ||
763 | /* Extern PHY common register 2 */ | ||
764 | |||
765 | #define PHY_GAD_TEST_MODE_1 0x00002000 | ||
766 | #define PHY_GAD_TEST_MODE_MSK 0x0000E000 | ||
767 | #define JM_PHY_SPEC_REG_READ 0x00004000 | ||
768 | #define JM_PHY_SPEC_REG_WRITE 0x00008000 | ||
769 | #define PHY_CALIBRATION_DELAY 20 | ||
770 | #define JM_PHY_SPEC_ADDR_REG 0x1E | ||
771 | #define JM_PHY_SPEC_DATA_REG 0x1F | ||
772 | |||
773 | #define JM_PHY_EXT_COMM_0_REG 0x30 | ||
774 | #define JM_PHY_EXT_COMM_1_REG 0x31 | ||
775 | #define JM_PHY_EXT_COMM_2_REG 0x32 | ||
776 | #define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01 | ||
777 | #define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02 | ||
778 | #define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10 | ||
779 | #define PCI_PRIV_SHARE_NICCTRL 0xF5 | ||
780 | #define JME_FLAG_PHYEA_ENABLE 0x2 | ||
781 | |||
763 | /* | 782 | /* |
764 | * Wakeup Frame setup interface registers | 783 | * Wakeup Frame setup interface registers |
765 | */ | 784 | */ |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 662ab7e9a0f0..8876134b35bd 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) | |||
1827 | } | 1827 | } |
1828 | 1828 | ||
1829 | /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ | 1829 | /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ |
1830 | REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); | 1830 | if (AR_SREV_9300_20_OR_LATER(ah)) |
1831 | REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); | ||
1831 | } | 1832 | } |
1832 | 1833 | ||
1833 | /* | 1834 | /* |
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index db5262844543..55c8e50f45fd 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c | |||
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) | |||
395 | if (mac->link_state != MAC80211_LINKED) | 395 | if (mac->link_state != MAC80211_LINKED) |
396 | return; | 396 | return; |
397 | 397 | ||
398 | spin_lock(&rtlpriv->locks.lps_lock); | 398 | spin_lock_irq(&rtlpriv->locks.lps_lock); |
399 | 399 | ||
400 | /* Idle for a while if we connect to AP a while ago. */ | 400 | /* Idle for a while if we connect to AP a while ago. */ |
401 | if (mac->cnt_after_linked >= 2) { | 401 | if (mac->cnt_after_linked >= 2) { |
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) | |||
407 | } | 407 | } |
408 | } | 408 | } |
409 | 409 | ||
410 | spin_unlock(&rtlpriv->locks.lps_lock); | 410 | spin_unlock_irq(&rtlpriv->locks.lps_lock); |
411 | } | 411 | } |
412 | 412 | ||
413 | /*Leave the leisure power save mode.*/ | 413 | /*Leave the leisure power save mode.*/ |
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw) | |||
416 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 416 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
417 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); | 417 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); |
418 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); | 418 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
419 | unsigned long flags; | ||
419 | 420 | ||
420 | spin_lock(&rtlpriv->locks.lps_lock); | 421 | spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags); |
421 | 422 | ||
422 | if (ppsc->fwctrl_lps) { | 423 | if (ppsc->fwctrl_lps) { |
423 | if (ppsc->dot11_psmode != EACTIVE) { | 424 | if (ppsc->dot11_psmode != EACTIVE) { |
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) | |||
438 | rtl_lps_set_psmode(hw, EACTIVE); | 439 | rtl_lps_set_psmode(hw, EACTIVE); |
439 | } | 440 | } |
440 | } | 441 | } |
441 | spin_unlock(&rtlpriv->locks.lps_lock); | 442 | spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags); |
442 | } | 443 | } |
443 | 444 | ||
444 | /* For sw LPS*/ | 445 | /* For sw LPS*/ |
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) | |||
539 | RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); | 540 | RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); |
540 | } | 541 | } |
541 | 542 | ||
542 | spin_lock(&rtlpriv->locks.lps_lock); | 543 | spin_lock_irq(&rtlpriv->locks.lps_lock); |
543 | rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); | 544 | rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); |
544 | spin_unlock(&rtlpriv->locks.lps_lock); | 545 | spin_unlock_irq(&rtlpriv->locks.lps_lock); |
545 | } | 546 | } |
546 | 547 | ||
547 | void rtl_swlps_rfon_wq_callback(void *data) | 548 | void rtl_swlps_rfon_wq_callback(void *data) |
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) | |||
574 | if (rtlpriv->link_info.busytraffic) | 575 | if (rtlpriv->link_info.busytraffic) |
575 | return; | 576 | return; |
576 | 577 | ||
577 | spin_lock(&rtlpriv->locks.lps_lock); | 578 | spin_lock_irq(&rtlpriv->locks.lps_lock); |
578 | rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); | 579 | rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); |
579 | spin_unlock(&rtlpriv->locks.lps_lock); | 580 | spin_unlock_irq(&rtlpriv->locks.lps_lock); |
580 | 581 | ||
581 | if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && | 582 | if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && |
582 | !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { | 583 | !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 6d3dd3988d0f..19c0115092dd 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -60,27 +60,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |||
60 | */ | 60 | */ |
61 | struct device_node *of_irq_find_parent(struct device_node *child) | 61 | struct device_node *of_irq_find_parent(struct device_node *child) |
62 | { | 62 | { |
63 | struct device_node *p, *c = child; | 63 | struct device_node *p; |
64 | const __be32 *parp; | 64 | const __be32 *parp; |
65 | 65 | ||
66 | if (!of_node_get(c)) | 66 | if (!of_node_get(child)) |
67 | return NULL; | 67 | return NULL; |
68 | 68 | ||
69 | do { | 69 | do { |
70 | parp = of_get_property(c, "interrupt-parent", NULL); | 70 | parp = of_get_property(child, "interrupt-parent", NULL); |
71 | if (parp == NULL) | 71 | if (parp == NULL) |
72 | p = of_get_parent(c); | 72 | p = of_get_parent(child); |
73 | else { | 73 | else { |
74 | if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) | 74 | if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) |
75 | p = of_node_get(of_irq_dflt_pic); | 75 | p = of_node_get(of_irq_dflt_pic); |
76 | else | 76 | else |
77 | p = of_find_node_by_phandle(be32_to_cpup(parp)); | 77 | p = of_find_node_by_phandle(be32_to_cpup(parp)); |
78 | } | 78 | } |
79 | of_node_put(c); | 79 | of_node_put(child); |
80 | c = p; | 80 | child = p; |
81 | } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); | 81 | } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); |
82 | 82 | ||
83 | return (p == child) ? NULL : p; | 83 | return p; |
84 | } | 84 | } |
85 | 85 | ||
86 | /** | 86 | /** |
@@ -424,6 +424,8 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
424 | 424 | ||
425 | desc->dev = np; | 425 | desc->dev = np; |
426 | desc->interrupt_parent = of_irq_find_parent(np); | 426 | desc->interrupt_parent = of_irq_find_parent(np); |
427 | if (desc->interrupt_parent == np) | ||
428 | desc->interrupt_parent = NULL; | ||
427 | list_add_tail(&desc->list, &intc_desc_list); | 429 | list_add_tail(&desc->list, &intc_desc_list); |
428 | } | 430 | } |
429 | 431 | ||
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b6f9749b4fa7..f02b5235056d 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -76,6 +76,7 @@ config PCI_IOV | |||
76 | 76 | ||
77 | config PCI_PRI | 77 | config PCI_PRI |
78 | bool "PCI PRI support" | 78 | bool "PCI PRI support" |
79 | depends on PCI | ||
79 | select PCI_ATS | 80 | select PCI_ATS |
80 | help | 81 | help |
81 | PRI is the PCI Page Request Interface. It allows PCI devices that are | 82 | PRI is the PCI Page Request Interface. It allows PCI devices that are |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 596172b4ae95..fce1c54a0c8d 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -459,8 +459,17 @@ static int add_bridge(acpi_handle handle) | |||
459 | { | 459 | { |
460 | acpi_status status; | 460 | acpi_status status; |
461 | unsigned long long tmp; | 461 | unsigned long long tmp; |
462 | struct acpi_pci_root *root; | ||
462 | acpi_handle dummy_handle; | 463 | acpi_handle dummy_handle; |
463 | 464 | ||
465 | /* | ||
466 | * We shouldn't use this bridge if PCIe native hotplug control has been | ||
467 | * granted by the BIOS for it. | ||
468 | */ | ||
469 | root = acpi_pci_find_root(handle); | ||
470 | if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)) | ||
471 | return -ENODEV; | ||
472 | |||
464 | /* if the bridge doesn't have _STA, we assume it is always there */ | 473 | /* if the bridge doesn't have _STA, we assume it is always there */ |
465 | status = acpi_get_handle(handle, "_STA", &dummy_handle); | 474 | status = acpi_get_handle(handle, "_STA", &dummy_handle); |
466 | if (ACPI_SUCCESS(status)) { | 475 | if (ACPI_SUCCESS(status)) { |
@@ -1376,13 +1385,23 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, | |||
1376 | static acpi_status | 1385 | static acpi_status |
1377 | find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | 1386 | find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) |
1378 | { | 1387 | { |
1388 | struct acpi_pci_root *root; | ||
1379 | int *count = (int *)context; | 1389 | int *count = (int *)context; |
1380 | 1390 | ||
1381 | if (acpi_is_root_bridge(handle)) { | 1391 | if (!acpi_is_root_bridge(handle)) |
1382 | acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | 1392 | return AE_OK; |
1383 | handle_hotplug_event_bridge, NULL); | 1393 | |
1384 | (*count)++; | 1394 | root = acpi_pci_find_root(handle); |
1385 | } | 1395 | if (!root) |
1396 | return AE_OK; | ||
1397 | |||
1398 | if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) | ||
1399 | return AE_OK; | ||
1400 | |||
1401 | (*count)++; | ||
1402 | acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | ||
1403 | handle_hotplug_event_bridge, NULL); | ||
1404 | |||
1386 | return AE_OK ; | 1405 | return AE_OK ; |
1387 | } | 1406 | } |
1388 | 1407 | ||
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 1e9c9aacc3a6..085dbb5fc168 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot) | |||
213 | goto err_exit; | 213 | goto err_exit; |
214 | } | 214 | } |
215 | 215 | ||
216 | /* Wait for 1 second after checking link training status */ | ||
217 | msleep(1000); | ||
218 | |||
219 | /* Check for a power fault */ | 216 | /* Check for a power fault */ |
220 | if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { | 217 | if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { |
221 | ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); | 218 | ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 96dc4734e4af..7b1414810ae3 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl) | |||
280 | else | 280 | else |
281 | msleep(1000); | 281 | msleep(1000); |
282 | 282 | ||
283 | /* | ||
284 | * Need to wait for 1000 ms after Data Link Layer Link Active | ||
285 | * (DLLLA) bit reads 1b before sending configuration request. | ||
286 | * We need it before checking Link Training (LT) bit becuase | ||
287 | * LT is still set even after DLLLA bit is set on some platform. | ||
288 | */ | ||
289 | msleep(1000); | ||
290 | |||
283 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | 291 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); |
284 | if (retval) { | 292 | if (retval) { |
285 | ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); | 293 | ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); |
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl) | |||
294 | return retval; | 302 | return retval; |
295 | } | 303 | } |
296 | 304 | ||
305 | /* | ||
306 | * If the port supports Link speeds greater than 5.0 GT/s, we | ||
307 | * must wait for 100 ms after Link training completes before | ||
308 | * sending configuration request. | ||
309 | */ | ||
310 | if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT) | ||
311 | msleep(100); | ||
312 | |||
313 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); | ||
314 | |||
297 | return retval; | 315 | return retval; |
298 | } | 316 | } |
299 | 317 | ||
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot) | |||
484 | u16 slot_cmd; | 502 | u16 slot_cmd; |
485 | u16 cmd_mask; | 503 | u16 cmd_mask; |
486 | u16 slot_status; | 504 | u16 slot_status; |
487 | u16 lnk_status; | ||
488 | int retval = 0; | 505 | int retval = 0; |
489 | 506 | ||
490 | /* Clear sticky power-fault bit from previous power failures */ | 507 | /* Clear sticky power-fault bit from previous power failures */ |
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot) | |||
516 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 533 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
517 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 534 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
518 | 535 | ||
519 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
520 | if (retval) { | ||
521 | ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n", | ||
522 | __func__); | ||
523 | return retval; | ||
524 | } | ||
525 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); | ||
526 | |||
527 | return retval; | 536 | return retval; |
528 | } | 537 | } |
529 | 538 | ||
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index aca972bbfb4c..dd7e0c51a33e 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) | |||
278 | 278 | ||
279 | static int is_shpc_capable(struct pci_dev *dev) | 279 | static int is_shpc_capable(struct pci_dev *dev) |
280 | { | 280 | { |
281 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == | 281 | if (dev->vendor == PCI_VENDOR_ID_AMD && |
282 | PCI_DEVICE_ID_AMD_GOLAM_7450)) | 282 | dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) |
283 | return 1; | 283 | return 1; |
284 | if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) | 284 | if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) |
285 | return 0; | 285 | return 0; |
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 36547f0ce305..75ba2311b54f 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c | |||
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
944 | ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ | 944 | ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ |
945 | ctrl_dbg(ctrl, "Hotplug Controller:\n"); | 945 | ctrl_dbg(ctrl, "Hotplug Controller:\n"); |
946 | 946 | ||
947 | if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == | 947 | if (pdev->vendor == PCI_VENDOR_ID_AMD && |
948 | PCI_DEVICE_ID_AMD_GOLAM_7450)) { | 948 | pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) { |
949 | /* amd shpc driver doesn't use Base Offset; assume 0 */ | 949 | /* amd shpc driver doesn't use Base Offset; assume 0 */ |
950 | ctrl->mmio_base = pci_resource_start(pdev, 0); | 950 | ctrl->mmio_base = pci_resource_start(pdev, 0); |
951 | ctrl->mmio_size = pci_resource_len(pdev, 0); | 951 | ctrl->mmio_size = pci_resource_len(pdev, 0); |
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c index 5abeb3ac3e8d..298c6c6a2795 100644 --- a/drivers/regulator/aat2870-regulator.c +++ b/drivers/regulator/aat2870-regulator.c | |||
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id) | |||
160 | break; | 160 | break; |
161 | } | 161 | } |
162 | 162 | ||
163 | if (!ri) | 163 | if (i == ARRAY_SIZE(aat2870_regulators)) |
164 | return NULL; | 164 | return NULL; |
165 | 165 | ||
166 | ri->enable_addr = AAT2870_LDO_EN; | 166 | ri->enable_addr = AAT2870_LDO_EN; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 669d02160221..938398f3e869 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev) | |||
2799 | list_del(&rdev->list); | 2799 | list_del(&rdev->list); |
2800 | if (rdev->supply) | 2800 | if (rdev->supply) |
2801 | regulator_put(rdev->supply); | 2801 | regulator_put(rdev->supply); |
2802 | device_unregister(&rdev->dev); | ||
2803 | kfree(rdev->constraints); | 2802 | kfree(rdev->constraints); |
2803 | device_unregister(&rdev->dev); | ||
2804 | mutex_unlock(®ulator_list_mutex); | 2804 | mutex_unlock(®ulator_list_mutex); |
2805 | } | 2805 | } |
2806 | EXPORT_SYMBOL_GPL(regulator_unregister); | 2806 | EXPORT_SYMBOL_GPL(regulator_unregister); |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 66d2d60b436a..b552aae55b41 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c | |||
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev, | |||
664 | 664 | ||
665 | switch (id) { | 665 | switch (id) { |
666 | case TPS65910_REG_VDD1: | 666 | case TPS65910_REG_VDD1: |
667 | dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; | 667 | dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; |
668 | if (dcdc_mult == 1) | 668 | if (dcdc_mult == 1) |
669 | dcdc_mult--; | 669 | dcdc_mult--; |
670 | vsel = (selector % VDD1_2_NUM_VOLTS) + 3; | 670 | vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3; |
671 | 671 | ||
672 | tps65910_modify_bits(pmic, TPS65910_VDD1, | 672 | tps65910_modify_bits(pmic, TPS65910_VDD1, |
673 | (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), | 673 | (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), |
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev, | |||
675 | tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); | 675 | tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); |
676 | break; | 676 | break; |
677 | case TPS65910_REG_VDD2: | 677 | case TPS65910_REG_VDD2: |
678 | dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; | 678 | dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; |
679 | if (dcdc_mult == 1) | 679 | if (dcdc_mult == 1) |
680 | dcdc_mult--; | 680 | dcdc_mult--; |
681 | vsel = (selector % VDD1_2_NUM_VOLTS) + 3; | 681 | vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3; |
682 | 682 | ||
683 | tps65910_modify_bits(pmic, TPS65910_VDD2, | 683 | tps65910_modify_bits(pmic, TPS65910_VDD2, |
684 | (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), | 684 | (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), |
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev, | |||
756 | switch (id) { | 756 | switch (id) { |
757 | case TPS65910_REG_VDD1: | 757 | case TPS65910_REG_VDD1: |
758 | case TPS65910_REG_VDD2: | 758 | case TPS65910_REG_VDD2: |
759 | mult = (selector / VDD1_2_NUM_VOLTS) + 1; | 759 | mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; |
760 | volt = VDD1_2_MIN_VOLT + | 760 | volt = VDD1_2_MIN_VOLT + |
761 | (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; | 761 | (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET; |
762 | break; | 762 | break; |
763 | case TPS65911_REG_VDDCTRL: | 763 | case TPS65911_REG_VDDCTRL: |
764 | volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); | 764 | volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); |
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev) | |||
947 | 947 | ||
948 | if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { | 948 | if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { |
949 | pmic->desc[i].ops = &tps65910_ops_dcdc; | 949 | pmic->desc[i].ops = &tps65910_ops_dcdc; |
950 | pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE * | ||
951 | VDD1_2_NUM_VOLT_COARSE; | ||
950 | } else if (i == TPS65910_REG_VDD3) { | 952 | } else if (i == TPS65910_REG_VDD3) { |
951 | if (tps65910_chip_id(tps65910) == TPS65910) | 953 | if (tps65910_chip_id(tps65910) == TPS65910) |
952 | pmic->desc[i].ops = &tps65910_ops_vdd3; | 954 | pmic->desc[i].ops = &tps65910_ops_vdd3; |
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index ee8747f4fa08..11cc308d66e9 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c | |||
@@ -71,6 +71,7 @@ struct twlreg_info { | |||
71 | #define VREG_TYPE 1 | 71 | #define VREG_TYPE 1 |
72 | #define VREG_REMAP 2 | 72 | #define VREG_REMAP 2 |
73 | #define VREG_DEDICATED 3 /* LDO control */ | 73 | #define VREG_DEDICATED 3 /* LDO control */ |
74 | #define VREG_VOLTAGE_SMPS_4030 9 | ||
74 | /* TWL6030 register offsets */ | 75 | /* TWL6030 register offsets */ |
75 | #define VREG_TRANS 1 | 76 | #define VREG_TRANS 1 |
76 | #define VREG_STATE 2 | 77 | #define VREG_STATE 2 |
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = { | |||
514 | .get_status = twl4030reg_get_status, | 515 | .get_status = twl4030reg_get_status, |
515 | }; | 516 | }; |
516 | 517 | ||
518 | static int | ||
519 | twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, | ||
520 | unsigned *selector) | ||
521 | { | ||
522 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
523 | int vsel = DIV_ROUND_UP(min_uV - 600000, 12500); | ||
524 | |||
525 | twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030, | ||
526 | vsel); | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static int twl4030smps_get_voltage(struct regulator_dev *rdev) | ||
531 | { | ||
532 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
533 | int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, | ||
534 | VREG_VOLTAGE_SMPS_4030); | ||
535 | |||
536 | return vsel * 12500 + 600000; | ||
537 | } | ||
538 | |||
539 | static struct regulator_ops twl4030smps_ops = { | ||
540 | .set_voltage = twl4030smps_set_voltage, | ||
541 | .get_voltage = twl4030smps_get_voltage, | ||
542 | }; | ||
543 | |||
517 | static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) | 544 | static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) |
518 | { | 545 | { |
519 | struct twlreg_info *info = rdev_get_drvdata(rdev); | 546 | struct twlreg_info *info = rdev_get_drvdata(rdev); |
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = { | |||
856 | }, \ | 883 | }, \ |
857 | } | 884 | } |
858 | 885 | ||
886 | #define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ | ||
887 | { \ | ||
888 | .base = offset, \ | ||
889 | .id = num, \ | ||
890 | .delay = turnon_delay, \ | ||
891 | .remap = remap_conf, \ | ||
892 | .desc = { \ | ||
893 | .name = #label, \ | ||
894 | .id = TWL4030_REG_##label, \ | ||
895 | .ops = &twl4030smps_ops, \ | ||
896 | .type = REGULATOR_VOLTAGE, \ | ||
897 | .owner = THIS_MODULE, \ | ||
898 | }, \ | ||
899 | } | ||
900 | |||
859 | #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ | 901 | #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ |
860 | .base = offset, \ | 902 | .base = offset, \ |
861 | .min_mV = min_mVolts, \ | 903 | .min_mV = min_mVolts, \ |
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = { | |||
947 | TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08), | 989 | TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08), |
948 | TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08), | 990 | TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08), |
949 | TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08), | 991 | TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08), |
950 | TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08), | 992 | TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08), |
951 | TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08), | 993 | TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08), |
952 | TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08), | 994 | TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08), |
953 | TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08), | 995 | TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08), |
954 | TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08), | 996 | TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08), |
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index e763254741c2..21c70b2b8311 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c | |||
@@ -426,7 +426,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev) | |||
426 | goto err_clk; | 426 | goto err_clk; |
427 | } | 427 | } |
428 | 428 | ||
429 | mfp_set_groupg(&pdev->dev); | 429 | mfp_set_groupg(&pdev->dev, NULL); |
430 | nuc900_init_spi(hw); | 430 | nuc900_init_spi(hw); |
431 | 431 | ||
432 | err = spi_bitbang_start(&hw->bitbang); | 432 | err = spi_bitbang_start(&hw->bitbang); |
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig index 9e1864c6dfd0..8190f2aaf53b 100644 --- a/drivers/staging/et131x/Kconfig +++ b/drivers/staging/et131x/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config ET131X | 1 | config ET131X |
2 | tristate "Agere ET-1310 Gigabit Ethernet support" | 2 | tristate "Agere ET-1310 Gigabit Ethernet support" |
3 | depends on PCI | 3 | depends on PCI && NET && NETDEVICES |
4 | select PHYLIB | ||
4 | default n | 5 | default n |
5 | ---help--- | 6 | ---help--- |
6 | This driver supports Agere ET-1310 ethernet adapters. | 7 | This driver supports Agere ET-1310 ethernet adapters. |
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c index f5f44a02456f..0c1c6ca8c379 100644 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c | |||
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev) | |||
4469 | return 0; | 4469 | return 0; |
4470 | } | 4470 | } |
4471 | 4471 | ||
4472 | static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); | ||
4473 | #define ET131X_PM_OPS (&et131x_pm_ops) | ||
4474 | #else | ||
4475 | #define ET131X_PM_OPS NULL | ||
4476 | #endif | ||
4477 | |||
4472 | /* ISR functions */ | 4478 | /* ISR functions */ |
4473 | 4479 | ||
4474 | /** | 4480 | /** |
@@ -5470,12 +5476,6 @@ err_out: | |||
5470 | return result; | 5476 | return result; |
5471 | } | 5477 | } |
5472 | 5478 | ||
5473 | static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); | ||
5474 | #define ET131X_PM_OPS (&et131x_pm_ops) | ||
5475 | #else | ||
5476 | #define ET131X_PM_OPS NULL | ||
5477 | #endif | ||
5478 | |||
5479 | static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { | 5479 | static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { |
5480 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, | 5480 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, |
5481 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, | 5481 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, |
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c index 326e967d54ef..aec9311b108c 100644 --- a/drivers/staging/iio/industrialio-core.c +++ b/drivers/staging/iio/industrialio-core.c | |||
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = { | |||
242 | 242 | ||
243 | static int iio_event_getfd(struct iio_dev *indio_dev) | 243 | static int iio_event_getfd(struct iio_dev *indio_dev) |
244 | { | 244 | { |
245 | if (indio_dev->event_interface == NULL) | 245 | struct iio_event_interface *ev_int = indio_dev->event_interface; |
246 | int fd; | ||
247 | |||
248 | if (ev_int == NULL) | ||
246 | return -ENODEV; | 249 | return -ENODEV; |
247 | 250 | ||
248 | mutex_lock(&indio_dev->event_interface->event_list_lock); | 251 | mutex_lock(&ev_int->event_list_lock); |
249 | if (test_and_set_bit(IIO_BUSY_BIT_POS, | 252 | if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
250 | &indio_dev->event_interface->flags)) { | 253 | mutex_unlock(&ev_int->event_list_lock); |
251 | mutex_unlock(&indio_dev->event_interface->event_list_lock); | ||
252 | return -EBUSY; | 254 | return -EBUSY; |
253 | } | 255 | } |
254 | mutex_unlock(&indio_dev->event_interface->event_list_lock); | 256 | mutex_unlock(&ev_int->event_list_lock); |
255 | return anon_inode_getfd("iio:event", | 257 | fd = anon_inode_getfd("iio:event", |
256 | &iio_event_chrdev_fileops, | 258 | &iio_event_chrdev_fileops, ev_int, O_RDONLY); |
257 | indio_dev->event_interface, O_RDONLY); | 259 | if (fd < 0) { |
260 | mutex_lock(&ev_int->event_list_lock); | ||
261 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); | ||
262 | mutex_unlock(&ev_int->event_list_lock); | ||
263 | } | ||
264 | return fd; | ||
258 | } | 265 | } |
259 | 266 | ||
260 | static int __init iio_init(void) | 267 | static int __init iio_init(void) |
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig index 5cde96b2e6e1..5c2a15b42dfe 100644 --- a/drivers/staging/slicoss/Kconfig +++ b/drivers/staging/slicoss/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config SLICOSS | 1 | config SLICOSS |
2 | tristate "Alacritech Gigabit IS-NIC support" | 2 | tristate "Alacritech Gigabit IS-NIC support" |
3 | depends on PCI && X86 | 3 | depends on PCI && X86 && NET |
4 | default n | 4 | default n |
5 | help | 5 | help |
6 | This driver supports Alacritech's IS-NIC gigabit ethernet cards. | 6 | This driver supports Alacritech's IS-NIC gigabit ethernet cards. |
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c index 435f6facbc23..44fbebab5075 100644 --- a/drivers/tty/hvc/hvc_dcc.c +++ b/drivers/tty/hvc/hvc_dcc.c | |||
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void) | |||
46 | 46 | ||
47 | asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" | 47 | asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" |
48 | : "=r" (__c)); | 48 | : "=r" (__c)); |
49 | isb(); | ||
49 | 50 | ||
50 | return __c; | 51 | return __c; |
51 | } | 52 | } |
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c) | |||
55 | asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" | 56 | asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" |
56 | : /* no output register */ | 57 | : /* no output register */ |
57 | : "r" (c)); | 58 | : "r" (c)); |
59 | isb(); | ||
58 | } | 60 | } |
59 | 61 | ||
60 | static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) | 62 | static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 5f479dada6f2..925a1e547a83 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60 | |||
1560 | Support for the IFX6x60 modem devices on Intel MID platforms. | 1560 | Support for the IFX6x60 modem devices on Intel MID platforms. |
1561 | 1561 | ||
1562 | config SERIAL_PCH_UART | 1562 | config SERIAL_PCH_UART |
1563 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART" | 1563 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART" |
1564 | depends on PCI | 1564 | depends on PCI |
1565 | select SERIAL_CORE | 1565 | select SERIAL_CORE |
1566 | help | 1566 | help |
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART | |||
1568 | which is an IOH(Input/Output Hub) for x86 embedded processor. | 1568 | which is an IOH(Input/Output Hub) for x86 embedded processor. |
1569 | Enabling PCH_DMA, this PCH UART works as DMA mode. | 1569 | Enabling PCH_DMA, this PCH UART works as DMA mode. |
1570 | 1570 | ||
1571 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | 1571 | This driver also can be used for LAPIS Semiconductor IOH(Input/ |
1572 | Output Hub), ML7213 and ML7223. | 1572 | Output Hub), ML7213, ML7223 and ML7831. |
1573 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is | 1573 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is |
1574 | for MP(Media Phone) use. | 1574 | for MP(Media Phone) use and ML7831 IOH is for general purpose use. |
1575 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. | 1575 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
1576 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. | 1576 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. |
1577 | 1577 | ||
1578 | config SERIAL_MSM_SMD | 1578 | config SERIAL_MSM_SMD |
1579 | bool "Enable tty device interface for some SMD ports" | 1579 | bool "Enable tty device interface for some SMD ports" |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 4a0f86fa1e90..4c823f341d98 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) | |||
228 | if (rs485conf->flags & SER_RS485_ENABLED) { | 228 | if (rs485conf->flags & SER_RS485_ENABLED) { |
229 | dev_dbg(port->dev, "Setting UART to RS485\n"); | 229 | dev_dbg(port->dev, "Setting UART to RS485\n"); |
230 | atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; | 230 | atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; |
231 | if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) | 231 | if ((rs485conf->delay_rts_after_send) > 0) |
232 | UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); | 232 | UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); |
233 | mode |= ATMEL_US_USMODE_RS485; | 233 | mode |= ATMEL_US_USMODE_RS485; |
234 | } else { | 234 | } else { |
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) | |||
304 | 304 | ||
305 | if (atmel_port->rs485.flags & SER_RS485_ENABLED) { | 305 | if (atmel_port->rs485.flags & SER_RS485_ENABLED) { |
306 | dev_dbg(port->dev, "Setting UART to RS485\n"); | 306 | dev_dbg(port->dev, "Setting UART to RS485\n"); |
307 | if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) | 307 | if ((atmel_port->rs485.delay_rts_after_send) > 0) |
308 | UART_PUT_TTGR(port, | 308 | UART_PUT_TTGR(port, |
309 | atmel_port->rs485.delay_rts_after_send); | 309 | atmel_port->rs485.delay_rts_after_send); |
310 | mode |= ATMEL_US_USMODE_RS485; | 310 | mode |= ATMEL_US_USMODE_RS485; |
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1228 | 1228 | ||
1229 | if (atmel_port->rs485.flags & SER_RS485_ENABLED) { | 1229 | if (atmel_port->rs485.flags & SER_RS485_ENABLED) { |
1230 | dev_dbg(port->dev, "Setting UART to RS485\n"); | 1230 | dev_dbg(port->dev, "Setting UART to RS485\n"); |
1231 | if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) | 1231 | if ((atmel_port->rs485.delay_rts_after_send) > 0) |
1232 | UART_PUT_TTGR(port, | 1232 | UART_PUT_TTGR(port, |
1233 | atmel_port->rs485.delay_rts_after_send); | 1233 | atmel_port->rs485.delay_rts_after_send); |
1234 | mode |= ATMEL_US_USMODE_RS485; | 1234 | mode |= ATMEL_US_USMODE_RS485; |
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port, | |||
1447 | rs485conf->delay_rts_after_send = rs485_delay[1]; | 1447 | rs485conf->delay_rts_after_send = rs485_delay[1]; |
1448 | rs485conf->flags = 0; | 1448 | rs485conf->flags = 0; |
1449 | 1449 | ||
1450 | if (rs485conf->delay_rts_before_send == 0 && | ||
1451 | rs485conf->delay_rts_after_send == 0) { | ||
1452 | rs485conf->flags |= SER_RS485_RTS_ON_SEND; | ||
1453 | } else { | ||
1454 | if (rs485conf->delay_rts_before_send) | ||
1455 | rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND; | ||
1456 | if (rs485conf->delay_rts_after_send) | ||
1457 | rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; | ||
1458 | } | ||
1459 | |||
1460 | if (of_get_property(np, "rs485-rx-during-tx", NULL)) | 1450 | if (of_get_property(np, "rs485-rx-during-tx", NULL)) |
1461 | rs485conf->flags |= SER_RS485_RX_DURING_TX; | 1451 | rs485conf->flags |= SER_RS485_RX_DURING_TX; |
1462 | 1452 | ||
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index b7435043f2fe..1dfba7b779c8 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c | |||
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty, | |||
3234 | e100_disable_rx(info); | 3234 | e100_disable_rx(info); |
3235 | e100_enable_rx_irq(info); | 3235 | e100_enable_rx_irq(info); |
3236 | #endif | 3236 | #endif |
3237 | if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && | 3237 | if (info->rs485.delay_rts_before_send > 0) |
3238 | (info->rs485.delay_rts_before_send > 0)) | 3238 | msleep(info->rs485.delay_rts_before_send); |
3239 | msleep(info->rs485.delay_rts_before_send); | ||
3240 | } | 3239 | } |
3241 | #endif /* CONFIG_ETRAX_RS485 */ | 3240 | #endif /* CONFIG_ETRAX_RS485 */ |
3242 | 3241 | ||
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty, | |||
3693 | 3692 | ||
3694 | rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; | 3693 | rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; |
3695 | rs485data.flags = 0; | 3694 | rs485data.flags = 0; |
3696 | if (rs485data.delay_rts_before_send != 0) | ||
3697 | rs485data.flags |= SER_RS485_RTS_BEFORE_SEND; | ||
3698 | else | ||
3699 | rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND); | ||
3700 | 3695 | ||
3701 | if (rs485ctrl.enabled) | 3696 | if (rs485ctrl.enabled) |
3702 | rs485data.flags |= SER_RS485_ENABLED; | 3697 | rs485data.flags |= SER_RS485_ENABLED; |
@@ -4531,7 +4526,6 @@ static int __init rs_init(void) | |||
4531 | /* Set sane defaults */ | 4526 | /* Set sane defaults */ |
4532 | info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); | 4527 | info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); |
4533 | info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; | 4528 | info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; |
4534 | info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND); | ||
4535 | info->rs485.delay_rts_before_send = 0; | 4529 | info->rs485.delay_rts_before_send = 0; |
4536 | info->rs485.flags &= ~(SER_RS485_ENABLED); | 4530 | info->rs485.flags &= ~(SER_RS485_ENABLED); |
4537 | #endif | 4531 | #endif |
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c index 286c386d9c46..e272d3919c67 100644 --- a/drivers/tty/serial/mfd.c +++ b/drivers/tty/serial/mfd.c | |||
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, | |||
884 | { | 884 | { |
885 | struct uart_hsu_port *up = | 885 | struct uart_hsu_port *up = |
886 | container_of(port, struct uart_hsu_port, port); | 886 | container_of(port, struct uart_hsu_port, port); |
887 | struct tty_struct *tty = port->state->port.tty; | ||
888 | unsigned char cval, fcr = 0; | 887 | unsigned char cval, fcr = 0; |
889 | unsigned long flags; | 888 | unsigned long flags; |
890 | unsigned int baud, quot; | 889 | unsigned int baud, quot; |
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, | |||
907 | } | 906 | } |
908 | 907 | ||
909 | /* CMSPAR isn't supported by this driver */ | 908 | /* CMSPAR isn't supported by this driver */ |
910 | if (tty) | 909 | termios->c_cflag &= ~CMSPAR; |
911 | tty->termios->c_cflag &= ~CMSPAR; | ||
912 | 910 | ||
913 | if (termios->c_cflag & CSTOPB) | 911 | if (termios->c_cflag & CSTOPB) |
914 | cval |= UART_LCR_STOP; | 912 | cval |= UART_LCR_STOP; |
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 21febef926aa..d6aba8c087e4 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. | 2 | *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
3 | * | 3 | * |
4 | *This program is free software; you can redistribute it and/or modify | 4 | *This program is free software; you can redistribute it and/or modify |
5 | *it under the terms of the GNU General Public License as published by | 5 | *it under the terms of the GNU General Public License as published by |
@@ -46,8 +46,8 @@ enum { | |||
46 | 46 | ||
47 | /* Set the max number of UART port | 47 | /* Set the max number of UART port |
48 | * Intel EG20T PCH: 4 port | 48 | * Intel EG20T PCH: 4 port |
49 | * OKI SEMICONDUCTOR ML7213 IOH: 3 port | 49 | * LAPIS Semiconductor ML7213 IOH: 3 port |
50 | * OKI SEMICONDUCTOR ML7223 IOH: 2 port | 50 | * LAPIS Semiconductor ML7223 IOH: 2 port |
51 | */ | 51 | */ |
52 | #define PCH_UART_NR 4 | 52 | #define PCH_UART_NR 4 |
53 | 53 | ||
@@ -258,6 +258,8 @@ enum pch_uart_num_t { | |||
258 | pch_ml7213_uart2, | 258 | pch_ml7213_uart2, |
259 | pch_ml7223_uart0, | 259 | pch_ml7223_uart0, |
260 | pch_ml7223_uart1, | 260 | pch_ml7223_uart1, |
261 | pch_ml7831_uart0, | ||
262 | pch_ml7831_uart1, | ||
261 | }; | 263 | }; |
262 | 264 | ||
263 | static struct pch_uart_driver_data drv_dat[] = { | 265 | static struct pch_uart_driver_data drv_dat[] = { |
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = { | |||
270 | [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, | 272 | [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, |
271 | [pch_ml7223_uart0] = {PCH_UART_8LINE, 0}, | 273 | [pch_ml7223_uart0] = {PCH_UART_8LINE, 0}, |
272 | [pch_ml7223_uart1] = {PCH_UART_2LINE, 1}, | 274 | [pch_ml7223_uart1] = {PCH_UART_2LINE, 1}, |
275 | [pch_ml7831_uart0] = {PCH_UART_8LINE, 0}, | ||
276 | [pch_ml7831_uart1] = {PCH_UART_2LINE, 1}, | ||
273 | }; | 277 | }; |
274 | 278 | ||
275 | static unsigned int default_baud = 9600; | 279 | static unsigned int default_baud = 9600; |
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port) | |||
628 | dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", | 632 | dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", |
629 | __func__); | 633 | __func__); |
630 | dma_release_channel(priv->chan_tx); | 634 | dma_release_channel(priv->chan_tx); |
635 | priv->chan_tx = NULL; | ||
631 | return; | 636 | return; |
632 | } | 637 | } |
633 | 638 | ||
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port) | |||
1215 | dev_err(priv->port.dev, | 1220 | dev_err(priv->port.dev, |
1216 | "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); | 1221 | "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); |
1217 | 1222 | ||
1218 | if (priv->use_dma_flag) | 1223 | pch_free_dma(port); |
1219 | pch_free_dma(port); | ||
1220 | 1224 | ||
1221 | free_irq(priv->port.irq, priv); | 1225 | free_irq(priv->port.irq, priv); |
1222 | } | 1226 | } |
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port, | |||
1280 | if (rtn) | 1284 | if (rtn) |
1281 | goto out; | 1285 | goto out; |
1282 | 1286 | ||
1287 | pch_uart_set_mctrl(&priv->port, priv->port.mctrl); | ||
1283 | /* Don't rewrite B0 */ | 1288 | /* Don't rewrite B0 */ |
1284 | if (tty_termios_baud_rate(termios)) | 1289 | if (tty_termios_baud_rate(termios)) |
1285 | tty_termios_encode_baud_rate(termios, baud, baud); | 1290 | tty_termios_encode_baud_rate(termios, baud, baud); |
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = { | |||
1552 | .driver_data = pch_ml7223_uart0}, | 1557 | .driver_data = pch_ml7223_uart0}, |
1553 | {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), | 1558 | {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), |
1554 | .driver_data = pch_ml7223_uart1}, | 1559 | .driver_data = pch_ml7223_uart1}, |
1560 | {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811), | ||
1561 | .driver_data = pch_ml7831_uart0}, | ||
1562 | {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812), | ||
1563 | .driver_data = pch_ml7831_uart1}, | ||
1555 | {0,}, | 1564 | {0,}, |
1556 | }; | 1565 | }; |
1557 | 1566 | ||
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 512c49f98e85..8e0924f55446 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include <linux/kmod.h> | 37 | #include <linux/kmod.h> |
38 | #include <linux/nsproxy.h> | 38 | #include <linux/nsproxy.h> |
39 | #include <linux/ratelimit.h> | ||
39 | 40 | ||
40 | /* | 41 | /* |
41 | * This guards the refcounted line discipline lists. The lock | 42 | * This guards the refcounted line discipline lists. The lock |
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty) | |||
547 | /** | 548 | /** |
548 | * tty_ldisc_wait_idle - wait for the ldisc to become idle | 549 | * tty_ldisc_wait_idle - wait for the ldisc to become idle |
549 | * @tty: tty to wait for | 550 | * @tty: tty to wait for |
551 | * @timeout: for how long to wait at most | ||
550 | * | 552 | * |
551 | * Wait for the line discipline to become idle. The discipline must | 553 | * Wait for the line discipline to become idle. The discipline must |
552 | * have been halted for this to guarantee it remains idle. | 554 | * have been halted for this to guarantee it remains idle. |
553 | */ | 555 | */ |
554 | static int tty_ldisc_wait_idle(struct tty_struct *tty) | 556 | static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout) |
555 | { | 557 | { |
556 | int ret; | 558 | long ret; |
557 | ret = wait_event_timeout(tty_ldisc_idle, | 559 | ret = wait_event_timeout(tty_ldisc_idle, |
558 | atomic_read(&tty->ldisc->users) == 1, 5 * HZ); | 560 | atomic_read(&tty->ldisc->users) == 1, timeout); |
559 | if (ret < 0) | 561 | if (ret < 0) |
560 | return ret; | 562 | return ret; |
561 | return ret > 0 ? 0 : -EBUSY; | 563 | return ret > 0 ? 0 : -EBUSY; |
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) | |||
665 | 667 | ||
666 | tty_ldisc_flush_works(tty); | 668 | tty_ldisc_flush_works(tty); |
667 | 669 | ||
668 | retval = tty_ldisc_wait_idle(tty); | 670 | retval = tty_ldisc_wait_idle(tty, 5 * HZ); |
669 | 671 | ||
670 | tty_lock(); | 672 | tty_lock(); |
671 | mutex_lock(&tty->ldisc_mutex); | 673 | mutex_lock(&tty->ldisc_mutex); |
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) | |||
762 | if (IS_ERR(ld)) | 764 | if (IS_ERR(ld)) |
763 | return -1; | 765 | return -1; |
764 | 766 | ||
765 | WARN_ON_ONCE(tty_ldisc_wait_idle(tty)); | ||
766 | |||
767 | tty_ldisc_close(tty, tty->ldisc); | 767 | tty_ldisc_close(tty, tty->ldisc); |
768 | tty_ldisc_put(tty->ldisc); | 768 | tty_ldisc_put(tty->ldisc); |
769 | tty->ldisc = NULL; | 769 | tty->ldisc = NULL; |
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty) | |||
838 | tty_unlock(); | 838 | tty_unlock(); |
839 | cancel_work_sync(&tty->buf.work); | 839 | cancel_work_sync(&tty->buf.work); |
840 | mutex_unlock(&tty->ldisc_mutex); | 840 | mutex_unlock(&tty->ldisc_mutex); |
841 | 841 | retry: | |
842 | tty_lock(); | 842 | tty_lock(); |
843 | mutex_lock(&tty->ldisc_mutex); | 843 | mutex_lock(&tty->ldisc_mutex); |
844 | 844 | ||
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty) | |||
847 | it means auditing a lot of other paths so this is | 847 | it means auditing a lot of other paths so this is |
848 | a FIXME */ | 848 | a FIXME */ |
849 | if (tty->ldisc) { /* Not yet closed */ | 849 | if (tty->ldisc) { /* Not yet closed */ |
850 | if (atomic_read(&tty->ldisc->users) != 1) { | ||
851 | char cur_n[TASK_COMM_LEN], tty_n[64]; | ||
852 | long timeout = 3 * HZ; | ||
853 | tty_unlock(); | ||
854 | |||
855 | while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { | ||
856 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
857 | printk_ratelimited(KERN_WARNING | ||
858 | "%s: waiting (%s) for %s took too long, but we keep waiting...\n", | ||
859 | __func__, get_task_comm(cur_n, current), | ||
860 | tty_name(tty, tty_n)); | ||
861 | } | ||
862 | mutex_unlock(&tty->ldisc_mutex); | ||
863 | goto retry; | ||
864 | } | ||
865 | |||
850 | if (reset == 0) { | 866 | if (reset == 0) { |
851 | 867 | ||
852 | if (!tty_ldisc_reinit(tty, tty->termios->c_line)) | 868 | if (!tty_ldisc_reinit(tty, tty->termios->c_line)) |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 6960715c5063..e8c564a53346 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm) | |||
539 | { | 539 | { |
540 | int i; | 540 | int i; |
541 | 541 | ||
542 | mutex_lock(&open_mutex); | ||
543 | if (acm->dev) { | 542 | if (acm->dev) { |
544 | usb_autopm_get_interface(acm->control); | 543 | usb_autopm_get_interface(acm->control); |
545 | acm_set_control(acm, acm->ctrlout = 0); | 544 | acm_set_control(acm, acm->ctrlout = 0); |
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm) | |||
551 | acm->control->needs_remote_wakeup = 0; | 550 | acm->control->needs_remote_wakeup = 0; |
552 | usb_autopm_put_interface(acm->control); | 551 | usb_autopm_put_interface(acm->control); |
553 | } | 552 | } |
554 | mutex_unlock(&open_mutex); | ||
555 | } | 553 | } |
556 | 554 | ||
557 | static void acm_tty_hangup(struct tty_struct *tty) | 555 | static void acm_tty_hangup(struct tty_struct *tty) |
558 | { | 556 | { |
559 | struct acm *acm = tty->driver_data; | 557 | struct acm *acm = tty->driver_data; |
560 | tty_port_hangup(&acm->port); | 558 | tty_port_hangup(&acm->port); |
559 | mutex_lock(&open_mutex); | ||
561 | acm_port_down(acm); | 560 | acm_port_down(acm); |
561 | mutex_unlock(&open_mutex); | ||
562 | } | 562 | } |
563 | 563 | ||
564 | static void acm_tty_close(struct tty_struct *tty, struct file *filp) | 564 | static void acm_tty_close(struct tty_struct *tty, struct file *filp) |
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) | |||
569 | shutdown */ | 569 | shutdown */ |
570 | if (!acm) | 570 | if (!acm) |
571 | return; | 571 | return; |
572 | |||
573 | mutex_lock(&open_mutex); | ||
572 | if (tty_port_close_start(&acm->port, tty, filp) == 0) { | 574 | if (tty_port_close_start(&acm->port, tty, filp) == 0) { |
573 | mutex_lock(&open_mutex); | ||
574 | if (!acm->dev) { | 575 | if (!acm->dev) { |
575 | tty_port_tty_set(&acm->port, NULL); | 576 | tty_port_tty_set(&acm->port, NULL); |
576 | acm_tty_unregister(acm); | 577 | acm_tty_unregister(acm); |
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) | |||
582 | acm_port_down(acm); | 583 | acm_port_down(acm); |
583 | tty_port_close_end(&acm->port, tty); | 584 | tty_port_close_end(&acm->port, tty); |
584 | tty_port_tty_set(&acm->port, NULL); | 585 | tty_port_tty_set(&acm->port, NULL); |
586 | mutex_unlock(&open_mutex); | ||
585 | } | 587 | } |
586 | 588 | ||
587 | static int acm_tty_write(struct tty_struct *tty, | 589 | static int acm_tty_write(struct tty_struct *tty, |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 96f05b29c9ad..79781461eec9 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
813 | USB_PORT_FEAT_C_PORT_LINK_STATE); | 813 | USB_PORT_FEAT_C_PORT_LINK_STATE); |
814 | } | 814 | } |
815 | 815 | ||
816 | if ((portchange & USB_PORT_STAT_C_BH_RESET) && | ||
817 | hub_is_superspeed(hub->hdev)) { | ||
818 | need_debounce_delay = true; | ||
819 | clear_port_feature(hub->hdev, port1, | ||
820 | USB_PORT_FEAT_C_BH_PORT_RESET); | ||
821 | } | ||
816 | /* We can forget about a "removed" device when there's a | 822 | /* We can forget about a "removed" device when there's a |
817 | * physical disconnect or the connect status changes. | 823 | * physical disconnect or the connect status changes. |
818 | */ | 824 | */ |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d6a8d8269bfb..ecf12e15a7ef 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
50 | /* Logitech Webcam B/C500 */ | 50 | /* Logitech Webcam B/C500 */ |
51 | { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME }, |
52 | 52 | ||
53 | /* Logitech Webcam C600 */ | ||
54 | { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
55 | |||
53 | /* Logitech Webcam Pro 9000 */ | 56 | /* Logitech Webcam Pro 9000 */ |
54 | { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME }, | 57 | { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME }, |
55 | 58 | ||
59 | /* Logitech Webcam C905 */ | ||
60 | { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
61 | |||
62 | /* Logitech Webcam C210 */ | ||
63 | { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
64 | |||
65 | /* Logitech Webcam C260 */ | ||
66 | { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
67 | |||
56 | /* Logitech Webcam C310 */ | 68 | /* Logitech Webcam C310 */ |
57 | { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME }, | 69 | { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME }, |
58 | 70 | ||
71 | /* Logitech Webcam C910 */ | ||
72 | { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
73 | |||
74 | /* Logitech Webcam C160 */ | ||
75 | { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
76 | |||
59 | /* Logitech Webcam C270 */ | 77 | /* Logitech Webcam C270 */ |
60 | { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, | 78 | { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, |
61 | 79 | ||
80 | /* Logitech Quickcam Pro 9000 */ | ||
81 | { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
82 | |||
83 | /* Logitech Quickcam E3500 */ | ||
84 | { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
85 | |||
86 | /* Logitech Quickcam Vision Pro */ | ||
87 | { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
88 | |||
62 | /* Logitech Harmony 700-series */ | 89 | /* Logitech Harmony 700-series */ |
63 | { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, | 90 | { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, |
64 | 91 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index fa824cfdd2eb..25dbd8614e72 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) | |||
1284 | int ret; | 1284 | int ret; |
1285 | 1285 | ||
1286 | dep->endpoint.maxpacket = 1024; | 1286 | dep->endpoint.maxpacket = 1024; |
1287 | dep->endpoint.max_streams = 15; | ||
1287 | dep->endpoint.ops = &dwc3_gadget_ep_ops; | 1288 | dep->endpoint.ops = &dwc3_gadget_ep_ops; |
1288 | list_add_tail(&dep->endpoint.ep_list, | 1289 | list_add_tail(&dep->endpoint.ep_list, |
1289 | &dwc->gadget.ep_list); | 1290 | &dwc->gadget.ep_list); |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index b21cd376c11a..23a447373c51 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -469,7 +469,7 @@ config USB_LANGWELL | |||
469 | gadget drivers to also be dynamically linked. | 469 | gadget drivers to also be dynamically linked. |
470 | 470 | ||
471 | config USB_EG20T | 471 | config USB_EG20T |
472 | tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC" | 472 | tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC" |
473 | depends on PCI | 473 | depends on PCI |
474 | select USB_GADGET_DUALSPEED | 474 | select USB_GADGET_DUALSPEED |
475 | help | 475 | help |
@@ -485,10 +485,11 @@ config USB_EG20T | |||
485 | This driver dose not support interrupt transfer or isochronous | 485 | This driver dose not support interrupt transfer or isochronous |
486 | transfer modes. | 486 | transfer modes. |
487 | 487 | ||
488 | This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is | 488 | This driver also can be used for LAPIS Semiconductor's ML7213 which is |
489 | for IVI(In-Vehicle Infotainment) use. | 489 | for IVI(In-Vehicle Infotainment) use. |
490 | ML7213 is companion chip for Intel Atom E6xx series. | 490 | ML7831 is for general purpose use. |
491 | ML7213 is completely compatible for Intel EG20T PCH. | 491 | ML7213/ML7831 is companion chip for Intel Atom E6xx series. |
492 | ML7213/ML7831 is completely compatible for Intel EG20T PCH. | ||
492 | 493 | ||
493 | config USB_CI13XXX_MSM | 494 | config USB_CI13XXX_MSM |
494 | tristate "MIPS USB CI13xxx for MSM" | 495 | tristate "MIPS USB CI13xxx for MSM" |
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c index 4eedfe557154..1fc612914c52 100644 --- a/drivers/usb/gadget/ci13xxx_msm.c +++ b/drivers/usb/gadget/ci13xxx_msm.c | |||
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void) | |||
122 | return platform_driver_register(&ci13xxx_msm_driver); | 122 | return platform_driver_register(&ci13xxx_msm_driver); |
123 | } | 123 | } |
124 | module_init(ci13xxx_msm_init); | 124 | module_init(ci13xxx_msm_init); |
125 | |||
126 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c index 83428f56253b..9a0c3979ff43 100644 --- a/drivers/usb/gadget/ci13xxx_udc.c +++ b/drivers/usb/gadget/ci13xxx_udc.c | |||
@@ -71,6 +71,9 @@ | |||
71 | /****************************************************************************** | 71 | /****************************************************************************** |
72 | * DEFINE | 72 | * DEFINE |
73 | *****************************************************************************/ | 73 | *****************************************************************************/ |
74 | |||
75 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | ||
76 | |||
74 | /* ctrl register bank access */ | 77 | /* ctrl register bank access */ |
75 | static DEFINE_SPINLOCK(udc_lock); | 78 | static DEFINE_SPINLOCK(udc_lock); |
76 | 79 | ||
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | |||
1434 | return -EALREADY; | 1437 | return -EALREADY; |
1435 | 1438 | ||
1436 | mReq->req.status = -EALREADY; | 1439 | mReq->req.status = -EALREADY; |
1437 | if (length && !mReq->req.dma) { | 1440 | if (length && mReq->req.dma == DMA_ADDR_INVALID) { |
1438 | mReq->req.dma = \ | 1441 | mReq->req.dma = \ |
1439 | dma_map_single(mEp->device, mReq->req.buf, | 1442 | dma_map_single(mEp->device, mReq->req.buf, |
1440 | length, mEp->dir ? DMA_TO_DEVICE : | 1443 | length, mEp->dir ? DMA_TO_DEVICE : |
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | |||
1453 | dma_unmap_single(mEp->device, mReq->req.dma, | 1456 | dma_unmap_single(mEp->device, mReq->req.dma, |
1454 | length, mEp->dir ? DMA_TO_DEVICE : | 1457 | length, mEp->dir ? DMA_TO_DEVICE : |
1455 | DMA_FROM_DEVICE); | 1458 | DMA_FROM_DEVICE); |
1456 | mReq->req.dma = 0; | 1459 | mReq->req.dma = DMA_ADDR_INVALID; |
1457 | mReq->map = 0; | 1460 | mReq->map = 0; |
1458 | } | 1461 | } |
1459 | return -ENOMEM; | 1462 | return -ENOMEM; |
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) | |||
1549 | if (mReq->map) { | 1552 | if (mReq->map) { |
1550 | dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, | 1553 | dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, |
1551 | mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 1554 | mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
1552 | mReq->req.dma = 0; | 1555 | mReq->req.dma = DMA_ADDR_INVALID; |
1553 | mReq->map = 0; | 1556 | mReq->map = 0; |
1554 | } | 1557 | } |
1555 | 1558 | ||
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock) | |||
1610 | * @gadget: gadget | 1613 | * @gadget: gadget |
1611 | * | 1614 | * |
1612 | * This function returns an error code | 1615 | * This function returns an error code |
1613 | * Caller must hold lock | ||
1614 | */ | 1616 | */ |
1615 | static int _gadget_stop_activity(struct usb_gadget *gadget) | 1617 | static int _gadget_stop_activity(struct usb_gadget *gadget) |
1616 | { | 1618 | { |
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) | |||
2189 | mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); | 2191 | mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); |
2190 | if (mReq != NULL) { | 2192 | if (mReq != NULL) { |
2191 | INIT_LIST_HEAD(&mReq->queue); | 2193 | INIT_LIST_HEAD(&mReq->queue); |
2194 | mReq->req.dma = DMA_ADDR_INVALID; | ||
2192 | 2195 | ||
2193 | mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, | 2196 | mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, |
2194 | &mReq->dma); | 2197 | &mReq->dma); |
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) | |||
2328 | if (mReq->map) { | 2331 | if (mReq->map) { |
2329 | dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, | 2332 | dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, |
2330 | mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 2333 | mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
2331 | mReq->req.dma = 0; | 2334 | mReq->req.dma = DMA_ADDR_INVALID; |
2332 | mReq->map = 0; | 2335 | mReq->map = 0; |
2333 | } | 2336 | } |
2334 | req->status = -ECONNRESET; | 2337 | req->status = -ECONNRESET; |
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget) | |||
2500 | spin_lock_irqsave(udc->lock, flags); | 2503 | spin_lock_irqsave(udc->lock, flags); |
2501 | if (!udc->remote_wakeup) { | 2504 | if (!udc->remote_wakeup) { |
2502 | ret = -EOPNOTSUPP; | 2505 | ret = -EOPNOTSUPP; |
2503 | dbg_trace("remote wakeup feature is not enabled\n"); | 2506 | trace("remote wakeup feature is not enabled\n"); |
2504 | goto out; | 2507 | goto out; |
2505 | } | 2508 | } |
2506 | if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { | 2509 | if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { |
2507 | ret = -EINVAL; | 2510 | ret = -EINVAL; |
2508 | dbg_trace("port is not suspended\n"); | 2511 | trace("port is not suspended\n"); |
2509 | goto out; | 2512 | goto out; |
2510 | } | 2513 | } |
2511 | hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); | 2514 | hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); |
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver) | |||
2703 | if (udc->udc_driver->notify_event) | 2706 | if (udc->udc_driver->notify_event) |
2704 | udc->udc_driver->notify_event(udc, | 2707 | udc->udc_driver->notify_event(udc, |
2705 | CI13XXX_CONTROLLER_STOPPED_EVENT); | 2708 | CI13XXX_CONTROLLER_STOPPED_EVENT); |
2709 | spin_unlock_irqrestore(udc->lock, flags); | ||
2706 | _gadget_stop_activity(&udc->gadget); | 2710 | _gadget_stop_activity(&udc->gadget); |
2711 | spin_lock_irqsave(udc->lock, flags); | ||
2707 | pm_runtime_put(&udc->gadget.dev); | 2712 | pm_runtime_put(&udc->gadget.dev); |
2708 | } | 2713 | } |
2709 | 2714 | ||
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev, | |||
2850 | struct ci13xxx *udc; | 2855 | struct ci13xxx *udc; |
2851 | int retval = 0; | 2856 | int retval = 0; |
2852 | 2857 | ||
2853 | trace("%p, %p, %p", dev, regs, name); | 2858 | trace("%p, %p, %p", dev, regs, driver->name); |
2854 | 2859 | ||
2855 | if (dev == NULL || regs == NULL || driver == NULL || | 2860 | if (dev == NULL || regs == NULL || driver == NULL || |
2856 | driver->name == NULL) | 2861 | driver->name == NULL) |
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 52583a235330..c39d58860fa0 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c | |||
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f, | |||
624 | if (ctrl->bRequestType != | 624 | if (ctrl->bRequestType != |
625 | (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) | 625 | (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) |
626 | break; | 626 | break; |
627 | if (w_index != fsg->interface_number || w_value != 0) | 627 | if (w_index != fsg->interface_number || w_value != 0 || |
628 | w_length != 0) | ||
628 | return -EDOM; | 629 | return -EDOM; |
629 | 630 | ||
630 | /* | 631 | /* |
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f, | |||
639 | if (ctrl->bRequestType != | 640 | if (ctrl->bRequestType != |
640 | (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) | 641 | (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) |
641 | break; | 642 | break; |
642 | if (w_index != fsg->interface_number || w_value != 0) | 643 | if (w_index != fsg->interface_number || w_value != 0 || |
644 | w_length != 1) | ||
643 | return -EDOM; | 645 | return -EDOM; |
644 | VDBG(fsg, "get max LUN\n"); | 646 | VDBG(fsg, "get max LUN\n"); |
645 | *(u8 *)req->buf = fsg->common->nluns - 1; | 647 | *(u8 *)req->buf = fsg->common->nluns - 1; |
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c index 67b222908cf9..3797b3d6c622 100644 --- a/drivers/usb/gadget/f_midi.c +++ b/drivers/usb/gadget/f_midi.c | |||
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req); | |||
95 | 95 | ||
96 | DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); | 96 | DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); |
97 | DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); | 97 | DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); |
98 | DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16); | ||
99 | DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); | 98 | DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); |
100 | 99 | ||
101 | /* B.3.1 Standard AC Interface Descriptor */ | 100 | /* B.3.1 Standard AC Interface Descriptor */ |
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = { | |||
140 | /* .wTotalLength = DYNAMIC */ | 139 | /* .wTotalLength = DYNAMIC */ |
141 | }; | 140 | }; |
142 | 141 | ||
143 | /* B.4.3 Embedded MIDI IN Jack Descriptor */ | ||
144 | static struct usb_midi_in_jack_descriptor jack_in_emb_desc = { | ||
145 | .bLength = USB_DT_MIDI_IN_SIZE, | ||
146 | .bDescriptorType = USB_DT_CS_INTERFACE, | ||
147 | .bDescriptorSubtype = USB_MS_MIDI_IN_JACK, | ||
148 | .bJackType = USB_MS_EMBEDDED, | ||
149 | /* .bJackID = DYNAMIC */ | ||
150 | }; | ||
151 | |||
152 | /* B.4.4 Embedded MIDI OUT Jack Descriptor */ | ||
153 | static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = { | ||
154 | /* .bLength = DYNAMIC */ | ||
155 | .bDescriptorType = USB_DT_CS_INTERFACE, | ||
156 | .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK, | ||
157 | .bJackType = USB_MS_EMBEDDED, | ||
158 | /* .bJackID = DYNAMIC */ | ||
159 | /* .bNrInputPins = DYNAMIC */ | ||
160 | /* .pins = DYNAMIC */ | ||
161 | }; | ||
162 | |||
163 | /* B.5.1 Standard Bulk OUT Endpoint Descriptor */ | 142 | /* B.5.1 Standard Bulk OUT Endpoint Descriptor */ |
164 | static struct usb_endpoint_descriptor bulk_out_desc = { | 143 | static struct usb_endpoint_descriptor bulk_out_desc = { |
165 | .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, | 144 | .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, |
@@ -758,9 +737,11 @@ fail: | |||
758 | static int __init | 737 | static int __init |
759 | f_midi_bind(struct usb_configuration *c, struct usb_function *f) | 738 | f_midi_bind(struct usb_configuration *c, struct usb_function *f) |
760 | { | 739 | { |
761 | struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12]; | 740 | struct usb_descriptor_header **midi_function; |
762 | struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; | 741 | struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; |
742 | struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS]; | ||
763 | struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; | 743 | struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; |
744 | struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS]; | ||
764 | struct usb_composite_dev *cdev = c->cdev; | 745 | struct usb_composite_dev *cdev = c->cdev; |
765 | struct f_midi *midi = func_to_midi(f); | 746 | struct f_midi *midi = func_to_midi(f); |
766 | int status, n, jack = 1, i = 0; | 747 | int status, n, jack = 1, i = 0; |
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f) | |||
798 | goto fail; | 779 | goto fail; |
799 | midi->out_ep->driver_data = cdev; /* claim */ | 780 | midi->out_ep->driver_data = cdev; /* claim */ |
800 | 781 | ||
782 | /* allocate temporary function list */ | ||
783 | midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function), | ||
784 | GFP_KERNEL); | ||
785 | if (!midi_function) { | ||
786 | status = -ENOMEM; | ||
787 | goto fail; | ||
788 | } | ||
789 | |||
801 | /* | 790 | /* |
802 | * construct the function's descriptor set. As the number of | 791 | * construct the function's descriptor set. As the number of |
803 | * input and output MIDI ports is configurable, we have to do | 792 | * input and output MIDI ports is configurable, we have to do |
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f) | |||
811 | 800 | ||
812 | /* calculate the header's wTotalLength */ | 801 | /* calculate the header's wTotalLength */ |
813 | n = USB_DT_MS_HEADER_SIZE | 802 | n = USB_DT_MS_HEADER_SIZE |
814 | + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE | 803 | + (midi->in_ports + midi->out_ports) * |
815 | + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1); | 804 | (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1)); |
816 | ms_header_desc.wTotalLength = cpu_to_le16(n); | 805 | ms_header_desc.wTotalLength = cpu_to_le16(n); |
817 | 806 | ||
818 | midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; | 807 | midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; |
819 | 808 | ||
820 | /* we have one embedded IN jack */ | 809 | /* configure the external IN jacks, each linked to an embedded OUT jack */ |
821 | jack_in_emb_desc.bJackID = jack++; | ||
822 | midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc; | ||
823 | |||
824 | /* and a dynamic amount of external IN jacks */ | ||
825 | for (n = 0; n < midi->in_ports; n++) { | ||
826 | struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n]; | ||
827 | |||
828 | ext->bLength = USB_DT_MIDI_IN_SIZE; | ||
829 | ext->bDescriptorType = USB_DT_CS_INTERFACE; | ||
830 | ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; | ||
831 | ext->bJackType = USB_MS_EXTERNAL; | ||
832 | ext->bJackID = jack++; | ||
833 | ext->iJack = 0; | ||
834 | |||
835 | midi_function[i++] = (struct usb_descriptor_header *) ext; | ||
836 | } | ||
837 | |||
838 | /* one embedded OUT jack ... */ | ||
839 | jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports); | ||
840 | jack_out_emb_desc.bJackID = jack++; | ||
841 | jack_out_emb_desc.bNrInputPins = midi->in_ports; | ||
842 | /* ... which referencess all external IN jacks */ | ||
843 | for (n = 0; n < midi->in_ports; n++) { | 810 | for (n = 0; n < midi->in_ports; n++) { |
844 | jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID; | 811 | struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n]; |
845 | jack_out_emb_desc.pins[n].baSourcePin = 1; | 812 | struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n]; |
813 | |||
814 | in_ext->bLength = USB_DT_MIDI_IN_SIZE; | ||
815 | in_ext->bDescriptorType = USB_DT_CS_INTERFACE; | ||
816 | in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; | ||
817 | in_ext->bJackType = USB_MS_EXTERNAL; | ||
818 | in_ext->bJackID = jack++; | ||
819 | in_ext->iJack = 0; | ||
820 | midi_function[i++] = (struct usb_descriptor_header *) in_ext; | ||
821 | |||
822 | out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1); | ||
823 | out_emb->bDescriptorType = USB_DT_CS_INTERFACE; | ||
824 | out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; | ||
825 | out_emb->bJackType = USB_MS_EMBEDDED; | ||
826 | out_emb->bJackID = jack++; | ||
827 | out_emb->bNrInputPins = 1; | ||
828 | out_emb->pins[0].baSourcePin = 1; | ||
829 | out_emb->pins[0].baSourceID = in_ext->bJackID; | ||
830 | out_emb->iJack = 0; | ||
831 | midi_function[i++] = (struct usb_descriptor_header *) out_emb; | ||
832 | |||
833 | /* link it to the endpoint */ | ||
834 | ms_in_desc.baAssocJackID[n] = out_emb->bJackID; | ||
846 | } | 835 | } |
847 | 836 | ||
848 | midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc; | 837 | /* configure the external OUT jacks, each linked to an embedded IN jack */ |
849 | |||
850 | /* and multiple external OUT jacks ... */ | ||
851 | for (n = 0; n < midi->out_ports; n++) { | 838 | for (n = 0; n < midi->out_ports; n++) { |
852 | struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n]; | 839 | struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n]; |
853 | int m; | 840 | struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n]; |
854 | 841 | ||
855 | ext->bLength = USB_DT_MIDI_OUT_SIZE(1); | 842 | in_emb->bLength = USB_DT_MIDI_IN_SIZE; |
856 | ext->bDescriptorType = USB_DT_CS_INTERFACE; | 843 | in_emb->bDescriptorType = USB_DT_CS_INTERFACE; |
857 | ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; | 844 | in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; |
858 | ext->bJackType = USB_MS_EXTERNAL; | 845 | in_emb->bJackType = USB_MS_EMBEDDED; |
859 | ext->bJackID = jack++; | 846 | in_emb->bJackID = jack++; |
860 | ext->bNrInputPins = 1; | 847 | in_emb->iJack = 0; |
861 | ext->iJack = 0; | 848 | midi_function[i++] = (struct usb_descriptor_header *) in_emb; |
862 | /* ... which all reference the same embedded IN jack */ | 849 | |
863 | for (m = 0; m < midi->out_ports; m++) { | 850 | out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1); |
864 | ext->pins[m].baSourceID = jack_in_emb_desc.bJackID; | 851 | out_ext->bDescriptorType = USB_DT_CS_INTERFACE; |
865 | ext->pins[m].baSourcePin = 1; | 852 | out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; |
866 | } | 853 | out_ext->bJackType = USB_MS_EXTERNAL; |
867 | 854 | out_ext->bJackID = jack++; | |
868 | midi_function[i++] = (struct usb_descriptor_header *) ext; | 855 | out_ext->bNrInputPins = 1; |
856 | out_ext->iJack = 0; | ||
857 | out_ext->pins[0].baSourceID = in_emb->bJackID; | ||
858 | out_ext->pins[0].baSourcePin = 1; | ||
859 | midi_function[i++] = (struct usb_descriptor_header *) out_ext; | ||
860 | |||
861 | /* link it to the endpoint */ | ||
862 | ms_out_desc.baAssocJackID[n] = in_emb->bJackID; | ||
869 | } | 863 | } |
870 | 864 | ||
871 | /* configure the endpoint descriptors ... */ | 865 | /* configure the endpoint descriptors ... */ |
872 | ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); | 866 | ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); |
873 | ms_out_desc.bNumEmbMIDIJack = midi->in_ports; | 867 | ms_out_desc.bNumEmbMIDIJack = midi->in_ports; |
874 | for (n = 0; n < midi->in_ports; n++) | ||
875 | ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID; | ||
876 | 868 | ||
877 | ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); | 869 | ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); |
878 | ms_in_desc.bNumEmbMIDIJack = midi->out_ports; | 870 | ms_in_desc.bNumEmbMIDIJack = midi->out_ports; |
879 | for (n = 0; n < midi->out_ports; n++) | ||
880 | ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID; | ||
881 | 871 | ||
882 | /* ... and add them to the list */ | 872 | /* ... and add them to the list */ |
883 | midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; | 873 | midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; |
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f) | |||
901 | f->descriptors = usb_copy_descriptors(midi_function); | 891 | f->descriptors = usb_copy_descriptors(midi_function); |
902 | } | 892 | } |
903 | 893 | ||
894 | kfree(midi_function); | ||
895 | |||
904 | return 0; | 896 | return 0; |
905 | 897 | ||
906 | fail: | 898 | fail: |
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index f7e39b0365ce..11b5196284ae 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c | |||
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg, | |||
859 | if (ctrl->bRequestType != (USB_DIR_OUT | | 859 | if (ctrl->bRequestType != (USB_DIR_OUT | |
860 | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) | 860 | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) |
861 | break; | 861 | break; |
862 | if (w_index != 0 || w_value != 0) { | 862 | if (w_index != 0 || w_value != 0 || w_length != 0) { |
863 | value = -EDOM; | 863 | value = -EDOM; |
864 | break; | 864 | break; |
865 | } | 865 | } |
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg, | |||
875 | if (ctrl->bRequestType != (USB_DIR_IN | | 875 | if (ctrl->bRequestType != (USB_DIR_IN | |
876 | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) | 876 | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) |
877 | break; | 877 | break; |
878 | if (w_index != 0 || w_value != 0) { | 878 | if (w_index != 0 || w_value != 0 || w_length != 1) { |
879 | value = -EDOM; | 879 | value = -EDOM; |
880 | break; | 880 | break; |
881 | } | 881 | } |
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index d786ba31fc07..b3b3d83b7c33 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c | |||
@@ -2480,8 +2480,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev) | |||
2480 | 2480 | ||
2481 | #ifndef CONFIG_ARCH_MXC | 2481 | #ifndef CONFIG_ARCH_MXC |
2482 | if (pdata->have_sysif_regs) | 2482 | if (pdata->have_sysif_regs) |
2483 | usb_sys_regs = (struct usb_sys_interface *) | 2483 | usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET; |
2484 | ((u32)dr_regs + USB_DR_SYS_OFFSET); | ||
2485 | #endif | 2484 | #endif |
2486 | 2485 | ||
2487 | /* Initialize USB clocks */ | 2486 | /* Initialize USB clocks */ |
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index a392ec0d2d51..6ccae2707e59 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c | |||
@@ -1730,8 +1730,9 @@ static void | |||
1730 | gadgetfs_disconnect (struct usb_gadget *gadget) | 1730 | gadgetfs_disconnect (struct usb_gadget *gadget) |
1731 | { | 1731 | { |
1732 | struct dev_data *dev = get_gadget_data (gadget); | 1732 | struct dev_data *dev = get_gadget_data (gadget); |
1733 | unsigned long flags; | ||
1733 | 1734 | ||
1734 | spin_lock (&dev->lock); | 1735 | spin_lock_irqsave (&dev->lock, flags); |
1735 | if (dev->state == STATE_DEV_UNCONNECTED) | 1736 | if (dev->state == STATE_DEV_UNCONNECTED) |
1736 | goto exit; | 1737 | goto exit; |
1737 | dev->state = STATE_DEV_UNCONNECTED; | 1738 | dev->state = STATE_DEV_UNCONNECTED; |
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget) | |||
1740 | next_event (dev, GADGETFS_DISCONNECT); | 1741 | next_event (dev, GADGETFS_DISCONNECT); |
1741 | ep0_readable (dev); | 1742 | ep0_readable (dev); |
1742 | exit: | 1743 | exit: |
1743 | spin_unlock (&dev->lock); | 1744 | spin_unlock_irqrestore (&dev->lock, flags); |
1744 | } | 1745 | } |
1745 | 1746 | ||
1746 | static void | 1747 | static void |
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c index 550d6dcdf104..5048a0c07640 100644 --- a/drivers/usb/gadget/pch_udc.c +++ b/drivers/usb/gadget/pch_udc.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. | 2 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -354,6 +354,7 @@ struct pch_udc_dev { | |||
354 | #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 | 354 | #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 |
355 | #define PCI_VENDOR_ID_ROHM 0x10DB | 355 | #define PCI_VENDOR_ID_ROHM 0x10DB |
356 | #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D | 356 | #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D |
357 | #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808 | ||
357 | 358 | ||
358 | static const char ep0_string[] = "ep0in"; | 359 | static const char ep0_string[] = "ep0in"; |
359 | static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ | 360 | static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ |
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = { | |||
2970 | .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, | 2971 | .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, |
2971 | .class_mask = 0xffffffff, | 2972 | .class_mask = 0xffffffff, |
2972 | }, | 2973 | }, |
2974 | { | ||
2975 | PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC), | ||
2976 | .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, | ||
2977 | .class_mask = 0xffffffff, | ||
2978 | }, | ||
2973 | { 0 }, | 2979 | { 0 }, |
2974 | }; | 2980 | }; |
2975 | 2981 | ||
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void) | |||
2999 | module_exit(pch_udc_pci_exit); | 3005 | module_exit(pch_udc_pci_exit); |
3000 | 3006 | ||
3001 | MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); | 3007 | MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); |
3002 | MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); | 3008 | MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>"); |
3003 | MODULE_LICENSE("GPL"); | 3009 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 68a826a1b866..24f84b210ce1 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c | |||
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep) | |||
1718 | if (list_empty(&ep->queue) && !ep->busy) { | 1718 | if (list_empty(&ep->queue) && !ep->busy) { |
1719 | pipe_stop(ep->r8a66597, ep->pipenum); | 1719 | pipe_stop(ep->r8a66597, ep->pipenum); |
1720 | r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); | 1720 | r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); |
1721 | r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr); | ||
1722 | r8a66597_write(ep->r8a66597, 0, ep->pipectr); | ||
1721 | } | 1723 | } |
1722 | spin_unlock_irqrestore(&ep->r8a66597->lock, flags); | 1724 | spin_unlock_irqrestore(&ep->r8a66597->lock, flags); |
1723 | } | 1725 | } |
@@ -1742,7 +1744,6 @@ static int r8a66597_start(struct usb_gadget *gadget, | |||
1742 | struct usb_gadget_driver *driver) | 1744 | struct usb_gadget_driver *driver) |
1743 | { | 1745 | { |
1744 | struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); | 1746 | struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); |
1745 | int retval; | ||
1746 | 1747 | ||
1747 | if (!driver | 1748 | if (!driver |
1748 | || driver->speed != USB_SPEED_HIGH | 1749 | || driver->speed != USB_SPEED_HIGH |
@@ -1752,16 +1753,7 @@ static int r8a66597_start(struct usb_gadget *gadget, | |||
1752 | return -ENODEV; | 1753 | return -ENODEV; |
1753 | 1754 | ||
1754 | /* hook up the driver */ | 1755 | /* hook up the driver */ |
1755 | driver->driver.bus = NULL; | ||
1756 | r8a66597->driver = driver; | 1756 | r8a66597->driver = driver; |
1757 | r8a66597->gadget.dev.driver = &driver->driver; | ||
1758 | |||
1759 | retval = device_add(&r8a66597->gadget.dev); | ||
1760 | if (retval) { | ||
1761 | dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n", | ||
1762 | retval); | ||
1763 | goto error; | ||
1764 | } | ||
1765 | 1757 | ||
1766 | init_controller(r8a66597); | 1758 | init_controller(r8a66597); |
1767 | r8a66597_bset(r8a66597, VBSE, INTENB0); | 1759 | r8a66597_bset(r8a66597, VBSE, INTENB0); |
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget, | |||
1775 | } | 1767 | } |
1776 | 1768 | ||
1777 | return 0; | 1769 | return 0; |
1778 | |||
1779 | error: | ||
1780 | r8a66597->driver = NULL; | ||
1781 | r8a66597->gadget.dev.driver = NULL; | ||
1782 | |||
1783 | return retval; | ||
1784 | } | 1770 | } |
1785 | 1771 | ||
1786 | static int r8a66597_stop(struct usb_gadget *gadget, | 1772 | static int r8a66597_stop(struct usb_gadget *gadget, |
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget, | |||
1794 | disable_controller(r8a66597); | 1780 | disable_controller(r8a66597); |
1795 | spin_unlock_irqrestore(&r8a66597->lock, flags); | 1781 | spin_unlock_irqrestore(&r8a66597->lock, flags); |
1796 | 1782 | ||
1797 | device_del(&r8a66597->gadget.dev); | ||
1798 | r8a66597->driver = NULL; | 1783 | r8a66597->driver = NULL; |
1799 | return 0; | 1784 | return 0; |
1800 | } | 1785 | } |
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev) | |||
1845 | clk_put(r8a66597->clk); | 1830 | clk_put(r8a66597->clk); |
1846 | } | 1831 | } |
1847 | #endif | 1832 | #endif |
1833 | device_unregister(&r8a66597->gadget.dev); | ||
1848 | kfree(r8a66597); | 1834 | kfree(r8a66597); |
1849 | return 0; | 1835 | return 0; |
1850 | } | 1836 | } |
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev) | |||
1924 | r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; | 1910 | r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; |
1925 | 1911 | ||
1926 | r8a66597->gadget.ops = &r8a66597_gadget_ops; | 1912 | r8a66597->gadget.ops = &r8a66597_gadget_ops; |
1927 | device_initialize(&r8a66597->gadget.dev); | ||
1928 | dev_set_name(&r8a66597->gadget.dev, "gadget"); | 1913 | dev_set_name(&r8a66597->gadget.dev, "gadget"); |
1929 | r8a66597->gadget.is_dualspeed = 1; | 1914 | r8a66597->gadget.is_dualspeed = 1; |
1930 | r8a66597->gadget.dev.parent = &pdev->dev; | 1915 | r8a66597->gadget.dev.parent = &pdev->dev; |
1931 | r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask; | 1916 | r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask; |
1932 | r8a66597->gadget.dev.release = pdev->dev.release; | 1917 | r8a66597->gadget.dev.release = pdev->dev.release; |
1933 | r8a66597->gadget.name = udc_name; | 1918 | r8a66597->gadget.name = udc_name; |
1919 | ret = device_register(&r8a66597->gadget.dev); | ||
1920 | if (ret < 0) { | ||
1921 | dev_err(&pdev->dev, "device_register failed\n"); | ||
1922 | goto clean_up; | ||
1923 | } | ||
1934 | 1924 | ||
1935 | init_timer(&r8a66597->timer); | 1925 | init_timer(&r8a66597->timer); |
1936 | r8a66597->timer.function = r8a66597_timer; | 1926 | r8a66597->timer.function = r8a66597_timer; |
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev) | |||
1945 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", | 1935 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", |
1946 | clk_name); | 1936 | clk_name); |
1947 | ret = PTR_ERR(r8a66597->clk); | 1937 | ret = PTR_ERR(r8a66597->clk); |
1948 | goto clean_up; | 1938 | goto clean_up_dev; |
1949 | } | 1939 | } |
1950 | clk_enable(r8a66597->clk); | 1940 | clk_enable(r8a66597->clk); |
1951 | } | 1941 | } |
@@ -2014,7 +2004,9 @@ clean_up2: | |||
2014 | clk_disable(r8a66597->clk); | 2004 | clk_disable(r8a66597->clk); |
2015 | clk_put(r8a66597->clk); | 2005 | clk_put(r8a66597->clk); |
2016 | } | 2006 | } |
2007 | clean_up_dev: | ||
2017 | #endif | 2008 | #endif |
2009 | device_unregister(&r8a66597->gadget.dev); | ||
2018 | clean_up: | 2010 | clean_up: |
2019 | if (r8a66597) { | 2011 | if (r8a66597) { |
2020 | if (r8a66597->sudmac_reg) | 2012 | if (r8a66597->sudmac_reg) |
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 022baeca7c94..6939e17f4580 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c | |||
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) | |||
210 | kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); | 210 | kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); |
211 | 211 | ||
212 | if (udc_is_newstyle(udc)) { | 212 | if (udc_is_newstyle(udc)) { |
213 | usb_gadget_disconnect(udc->gadget); | 213 | udc->driver->disconnect(udc->gadget); |
214 | udc->driver->unbind(udc->gadget); | 214 | udc->driver->unbind(udc->gadget); |
215 | usb_gadget_udc_stop(udc->gadget, udc->driver); | 215 | usb_gadget_udc_stop(udc->gadget, udc->driver); |
216 | 216 | usb_gadget_disconnect(udc->gadget); | |
217 | } else { | 217 | } else { |
218 | usb_gadget_stop(udc->gadget, udc->driver); | 218 | usb_gadget_stop(udc->gadget, udc->driver); |
219 | } | 219 | } |
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver); | |||
344 | static ssize_t usb_udc_srp_store(struct device *dev, | 344 | static ssize_t usb_udc_srp_store(struct device *dev, |
345 | struct device_attribute *attr, const char *buf, size_t n) | 345 | struct device_attribute *attr, const char *buf, size_t n) |
346 | { | 346 | { |
347 | struct usb_udc *udc = dev_get_drvdata(dev); | 347 | struct usb_udc *udc = container_of(dev, struct usb_udc, dev); |
348 | 348 | ||
349 | if (sysfs_streq(buf, "1")) | 349 | if (sysfs_streq(buf, "1")) |
350 | usb_gadget_wakeup(udc->gadget); | 350 | usb_gadget_wakeup(udc->gadget); |
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev, | |||
378 | return snprintf(buf, PAGE_SIZE, "%s\n", | 378 | return snprintf(buf, PAGE_SIZE, "%s\n", |
379 | usb_speed_string(udc->gadget->speed)); | 379 | usb_speed_string(udc->gadget->speed)); |
380 | } | 380 | } |
381 | static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL); | 381 | static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL); |
382 | 382 | ||
383 | #define USB_UDC_ATTR(name) \ | 383 | #define USB_UDC_ATTR(name) \ |
384 | ssize_t usb_udc_##name##_show(struct device *dev, \ | 384 | ssize_t usb_udc_##name##_show(struct device *dev, \ |
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \ | |||
389 | \ | 389 | \ |
390 | return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ | 390 | return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ |
391 | } \ | 391 | } \ |
392 | static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL) | 392 | static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL) |
393 | 393 | ||
394 | static USB_UDC_ATTR(is_dualspeed); | 394 | static USB_UDC_ATTR(is_dualspeed); |
395 | static USB_UDC_ATTR(is_otg); | 395 | static USB_UDC_ATTR(is_otg); |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 2e829fae6482..56a32033adb3 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1479,10 +1479,15 @@ iso_stream_schedule ( | |||
1479 | 1479 | ||
1480 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ | 1480 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ |
1481 | 1481 | ||
1482 | /* find a uframe slot with enough bandwidth */ | 1482 | /* find a uframe slot with enough bandwidth. |
1483 | next = start + period; | 1483 | * Early uframes are more precious because full-speed |
1484 | for (; start < next; start++) { | 1484 | * iso IN transfers can't use late uframes, |
1485 | 1485 | * and therefore they should be allocated last. | |
1486 | */ | ||
1487 | next = start; | ||
1488 | start += period; | ||
1489 | do { | ||
1490 | start--; | ||
1486 | /* check schedule: enough space? */ | 1491 | /* check schedule: enough space? */ |
1487 | if (stream->highspeed) { | 1492 | if (stream->highspeed) { |
1488 | if (itd_slot_ok(ehci, mod, start, | 1493 | if (itd_slot_ok(ehci, mod, start, |
@@ -1495,7 +1500,7 @@ iso_stream_schedule ( | |||
1495 | start, sched, period)) | 1500 | start, sched, period)) |
1496 | break; | 1501 | break; |
1497 | } | 1502 | } |
1498 | } | 1503 | } while (start > next); |
1499 | 1504 | ||
1500 | /* no room in the schedule */ | 1505 | /* no room in the schedule */ |
1501 | if (start == next) { | 1506 | if (start == next) { |
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c index fe74bd676018..b4fb511d24bc 100644 --- a/drivers/usb/host/ehci-xls.c +++ b/drivers/usb/host/ehci-xls.c | |||
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd) | |||
19 | 19 | ||
20 | ehci->caps = hcd->regs; | 20 | ehci->caps = hcd->regs; |
21 | ehci->regs = hcd->regs + | 21 | ehci->regs = hcd->regs + |
22 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 22 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
23 | dbg_hcs_params(ehci, "reset"); | 23 | dbg_hcs_params(ehci, "reset"); |
24 | dbg_hcc_params(ehci, "reset"); | 24 | dbg_hcc_params(ehci, "reset"); |
25 | 25 | ||
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index ba3a46b78b75..95a9fec38e89 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int | |||
223 | if (port < 0 || port >= 2) | 223 | if (port < 0 || port >= 2) |
224 | return; | 224 | return; |
225 | 225 | ||
226 | if (pdata->vbus_pin[port] <= 0) | ||
227 | return; | ||
228 | |||
226 | gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); | 229 | gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); |
227 | } | 230 | } |
228 | 231 | ||
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) | |||
231 | if (port < 0 || port >= 2) | 234 | if (port < 0 || port >= 2) |
232 | return -EINVAL; | 235 | return -EINVAL; |
233 | 236 | ||
237 | if (pdata->vbus_pin[port] <= 0) | ||
238 | return -EINVAL; | ||
239 | |||
234 | return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; | 240 | return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; |
235 | } | 241 | } |
236 | 242 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 34efd479e068..b2639191549e 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd) | |||
389 | struct ohci_hcd *ohci; | 389 | struct ohci_hcd *ohci; |
390 | 390 | ||
391 | ohci = hcd_to_ohci (hcd); | 391 | ohci = hcd_to_ohci (hcd); |
392 | ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); | 392 | ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable); |
393 | ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); | ||
394 | 393 | ||
395 | /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ | 394 | /* Software reset, after which the controller goes into SUSPEND */ |
396 | ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? | 395 | ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus); |
397 | OHCI_CTRL_RWC | OHCI_CTRL_HCFS : | 396 | ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */ |
398 | OHCI_CTRL_RWC); | 397 | udelay(10); |
399 | ohci_writel(ohci, ohci->hc_control, &ohci->regs->control); | ||
400 | 398 | ||
401 | /* flush the writes */ | 399 | ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval); |
402 | (void) ohci_readl (ohci, &ohci->regs->control); | ||
403 | } | 400 | } |
404 | 401 | ||
405 | static int check_ed(struct ohci_hcd *ohci, struct ed *ed) | 402 | static int check_ed(struct ohci_hcd *ohci, struct ed *ed) |
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index ad8166c681e2..bc01b064585a 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) | |||
175 | return 0; | 175 | return 0; |
176 | } | 176 | } |
177 | 177 | ||
178 | /* nVidia controllers continue to drive Reset signalling on the bus | ||
179 | * even after system shutdown, wasting power. This flag tells the | ||
180 | * shutdown routine to leave the controller OPERATIONAL instead of RESET. | ||
181 | */ | ||
182 | static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) | ||
183 | { | ||
184 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
185 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | ||
186 | |||
187 | /* Evidently nVidia fixed their later hardware; this is a guess at | ||
188 | * the changeover point. | ||
189 | */ | ||
190 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d | ||
191 | |||
192 | if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) { | ||
193 | ohci->flags |= OHCI_QUIRK_SHUTDOWN; | ||
194 | ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); | ||
195 | } | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static void sb800_prefetch(struct ohci_hcd *ohci, int on) | 178 | static void sb800_prefetch(struct ohci_hcd *ohci, int on) |
201 | { | 179 | { |
202 | struct pci_dev *pdev; | 180 | struct pci_dev *pdev; |
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = { | |||
260 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), | 238 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), |
261 | .driver_data = (unsigned long)ohci_quirk_amd700, | 239 | .driver_data = (unsigned long)ohci_quirk_amd700, |
262 | }, | 240 | }, |
263 | { | ||
264 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), | ||
265 | .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown, | ||
266 | }, | ||
267 | 241 | ||
268 | /* FIXME for some of the early AMD 760 southbridges, OHCI | 242 | /* FIXME for some of the early AMD 760 southbridges, OHCI |
269 | * won't work at all. blacklist them. | 243 | * won't work at all. blacklist them. |
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 35e5fd640ce7..0795b934d00c 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h | |||
@@ -403,7 +403,6 @@ struct ohci_hcd { | |||
403 | #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ | 403 | #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ |
404 | #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ | 404 | #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ |
405 | #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ | 405 | #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ |
406 | #define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */ | ||
407 | // there are also chip quirks/bugs in init logic | 406 | // there are also chip quirks/bugs in init logic |
408 | 407 | ||
409 | struct work_struct nec_work; /* Worker for NEC quirk */ | 408 | struct work_struct nec_work; /* Worker for NEC quirk */ |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 27a3dec32fa2..caf87428ca43 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define OHCI_INTRENABLE 0x10 | 37 | #define OHCI_INTRENABLE 0x10 |
38 | #define OHCI_INTRDISABLE 0x14 | 38 | #define OHCI_INTRDISABLE 0x14 |
39 | #define OHCI_FMINTERVAL 0x34 | 39 | #define OHCI_FMINTERVAL 0x34 |
40 | #define OHCI_HCFS (3 << 6) /* hc functional state */ | ||
40 | #define OHCI_HCR (1 << 0) /* host controller reset */ | 41 | #define OHCI_HCR (1 << 0) /* host controller reset */ |
41 | #define OHCI_OCR (1 << 3) /* ownership change request */ | 42 | #define OHCI_OCR (1 << 3) /* ownership change request */ |
42 | #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ | 43 | #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ |
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) | |||
466 | { | 467 | { |
467 | void __iomem *base; | 468 | void __iomem *base; |
468 | u32 control; | 469 | u32 control; |
470 | u32 fminterval; | ||
471 | int cnt; | ||
469 | 472 | ||
470 | if (!mmio_resource_enabled(pdev, 0)) | 473 | if (!mmio_resource_enabled(pdev, 0)) |
471 | return; | 474 | return; |
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) | |||
498 | } | 501 | } |
499 | #endif | 502 | #endif |
500 | 503 | ||
501 | /* reset controller, preserving RWC (and possibly IR) */ | 504 | /* disable interrupts */ |
502 | writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); | 505 | writel((u32) ~0, base + OHCI_INTRDISABLE); |
503 | readl(base + OHCI_CONTROL); | ||
504 | 506 | ||
505 | /* Some NVIDIA controllers stop working if kept in RESET for too long */ | 507 | /* Reset the USB bus, if the controller isn't already in RESET */ |
506 | if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { | 508 | if (control & OHCI_HCFS) { |
507 | u32 fminterval; | 509 | /* Go into RESET, preserving RWC (and possibly IR) */ |
508 | int cnt; | 510 | writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); |
511 | readl(base + OHCI_CONTROL); | ||
509 | 512 | ||
510 | /* drive reset for at least 50 ms (7.1.7.5) */ | 513 | /* drive bus reset for at least 50 ms (7.1.7.5) */ |
511 | msleep(50); | 514 | msleep(50); |
515 | } | ||
512 | 516 | ||
513 | /* software reset of the controller, preserving HcFmInterval */ | 517 | /* software reset of the controller, preserving HcFmInterval */ |
514 | fminterval = readl(base + OHCI_FMINTERVAL); | 518 | fminterval = readl(base + OHCI_FMINTERVAL); |
515 | writel(OHCI_HCR, base + OHCI_CMDSTATUS); | 519 | writel(OHCI_HCR, base + OHCI_CMDSTATUS); |
516 | 520 | ||
517 | /* reset requires max 10 us delay */ | 521 | /* reset requires max 10 us delay */ |
518 | for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ | 522 | for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ |
519 | if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) | 523 | if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) |
520 | break; | 524 | break; |
521 | udelay(1); | 525 | udelay(1); |
522 | } | ||
523 | writel(fminterval, base + OHCI_FMINTERVAL); | ||
524 | |||
525 | /* Now we're in the SUSPEND state with all devices reset | ||
526 | * and wakeups and interrupts disabled | ||
527 | */ | ||
528 | } | 526 | } |
527 | writel(fminterval, base + OHCI_FMINTERVAL); | ||
529 | 528 | ||
530 | /* | 529 | /* Now the controller is safely in SUSPEND and nothing can wake it up */ |
531 | * disable interrupts | ||
532 | */ | ||
533 | writel(~(u32)0, base + OHCI_INTRDISABLE); | ||
534 | writel(~(u32)0, base + OHCI_INTRSTATUS); | ||
535 | |||
536 | iounmap(base); | 530 | iounmap(base); |
537 | } | 531 | } |
538 | 532 | ||
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) | |||
627 | void __iomem *base, *op_reg_base; | 621 | void __iomem *base, *op_reg_base; |
628 | u32 hcc_params, cap, val; | 622 | u32 hcc_params, cap, val; |
629 | u8 offset, cap_length; | 623 | u8 offset, cap_length; |
630 | int wait_time, delta, count = 256/4; | 624 | int wait_time, count = 256/4; |
631 | 625 | ||
632 | if (!mmio_resource_enabled(pdev, 0)) | 626 | if (!mmio_resource_enabled(pdev, 0)) |
633 | return; | 627 | return; |
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) | |||
673 | writel(val, op_reg_base + EHCI_USBCMD); | 667 | writel(val, op_reg_base + EHCI_USBCMD); |
674 | 668 | ||
675 | wait_time = 2000; | 669 | wait_time = 2000; |
676 | delta = 100; | ||
677 | do { | 670 | do { |
678 | writel(0x3f, op_reg_base + EHCI_USBSTS); | 671 | writel(0x3f, op_reg_base + EHCI_USBSTS); |
679 | udelay(delta); | 672 | udelay(100); |
680 | wait_time -= delta; | 673 | wait_time -= 100; |
681 | val = readl(op_reg_base + EHCI_USBSTS); | 674 | val = readl(op_reg_base + EHCI_USBSTS); |
682 | if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { | 675 | if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { |
683 | break; | 676 | break; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 42a22b8e6922..0e4b25fa3bcd 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
982 | struct xhci_virt_device *dev; | 982 | struct xhci_virt_device *dev; |
983 | struct xhci_ep_ctx *ep0_ctx; | 983 | struct xhci_ep_ctx *ep0_ctx; |
984 | struct xhci_slot_ctx *slot_ctx; | 984 | struct xhci_slot_ctx *slot_ctx; |
985 | struct xhci_input_control_ctx *ctrl_ctx; | ||
986 | u32 port_num; | 985 | u32 port_num; |
987 | struct usb_device *top_dev; | 986 | struct usb_device *top_dev; |
988 | 987 | ||
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
994 | return -EINVAL; | 993 | return -EINVAL; |
995 | } | 994 | } |
996 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); | 995 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
997 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | ||
998 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | 996 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); |
999 | 997 | ||
1000 | /* 2) New slot context and endpoint 0 context are valid*/ | ||
1001 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); | ||
1002 | |||
1003 | /* 3) Only the control endpoint is valid - one endpoint context */ | 998 | /* 3) Only the control endpoint is valid - one endpoint context */ |
1004 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); | 999 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); |
1005 | switch (udev->speed) { | 1000 | switch (udev->speed) { |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 940321b3ec68..9f1d4b15d818 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
816 | struct xhci_ring *ring; | 816 | struct xhci_ring *ring; |
817 | struct xhci_td *cur_td; | 817 | struct xhci_td *cur_td; |
818 | int ret, i, j; | 818 | int ret, i, j; |
819 | unsigned long flags; | ||
819 | 820 | ||
820 | ep = (struct xhci_virt_ep *) arg; | 821 | ep = (struct xhci_virt_ep *) arg; |
821 | xhci = ep->xhci; | 822 | xhci = ep->xhci; |
822 | 823 | ||
823 | spin_lock(&xhci->lock); | 824 | spin_lock_irqsave(&xhci->lock, flags); |
824 | 825 | ||
825 | ep->stop_cmds_pending--; | 826 | ep->stop_cmds_pending--; |
826 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 827 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
827 | xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " | 828 | xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " |
828 | "xHCI as DYING, exiting.\n"); | 829 | "xHCI as DYING, exiting.\n"); |
829 | spin_unlock(&xhci->lock); | 830 | spin_unlock_irqrestore(&xhci->lock, flags); |
830 | return; | 831 | return; |
831 | } | 832 | } |
832 | if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { | 833 | if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { |
833 | xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " | 834 | xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " |
834 | "exiting.\n"); | 835 | "exiting.\n"); |
835 | spin_unlock(&xhci->lock); | 836 | spin_unlock_irqrestore(&xhci->lock, flags); |
836 | return; | 837 | return; |
837 | } | 838 | } |
838 | 839 | ||
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
844 | xhci->xhc_state |= XHCI_STATE_DYING; | 845 | xhci->xhc_state |= XHCI_STATE_DYING; |
845 | /* Disable interrupts from the host controller and start halting it */ | 846 | /* Disable interrupts from the host controller and start halting it */ |
846 | xhci_quiesce(xhci); | 847 | xhci_quiesce(xhci); |
847 | spin_unlock(&xhci->lock); | 848 | spin_unlock_irqrestore(&xhci->lock, flags); |
848 | 849 | ||
849 | ret = xhci_halt(xhci); | 850 | ret = xhci_halt(xhci); |
850 | 851 | ||
851 | spin_lock(&xhci->lock); | 852 | spin_lock_irqsave(&xhci->lock, flags); |
852 | if (ret < 0) { | 853 | if (ret < 0) { |
853 | /* This is bad; the host is not responding to commands and it's | 854 | /* This is bad; the host is not responding to commands and it's |
854 | * not allowing itself to be halted. At least interrupts are | 855 | * not allowing itself to be halted. At least interrupts are |
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
896 | } | 897 | } |
897 | } | 898 | } |
898 | } | 899 | } |
899 | spin_unlock(&xhci->lock); | 900 | spin_unlock_irqrestore(&xhci->lock, flags); |
900 | xhci_dbg(xhci, "Calling usb_hc_died()\n"); | 901 | xhci_dbg(xhci, "Calling usb_hc_died()\n"); |
901 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); | 902 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); |
902 | xhci_dbg(xhci, "xHCI host controller is dead.\n"); | 903 | xhci_dbg(xhci, "xHCI host controller is dead.\n"); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1ff95a0df576..aa94c0195791 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -799,7 +799,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
799 | u32 command, temp = 0; | 799 | u32 command, temp = 0; |
800 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 800 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
801 | struct usb_hcd *secondary_hcd; | 801 | struct usb_hcd *secondary_hcd; |
802 | int retval; | 802 | int retval = 0; |
803 | 803 | ||
804 | /* Wait a bit if either of the roothubs need to settle from the | 804 | /* Wait a bit if either of the roothubs need to settle from the |
805 | * transition into bus suspend. | 805 | * transition into bus suspend. |
@@ -809,6 +809,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
809 | xhci->bus_state[1].next_statechange)) | 809 | xhci->bus_state[1].next_statechange)) |
810 | msleep(100); | 810 | msleep(100); |
811 | 811 | ||
812 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
813 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | ||
814 | |||
812 | spin_lock_irq(&xhci->lock); | 815 | spin_lock_irq(&xhci->lock); |
813 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | 816 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
814 | hibernated = true; | 817 | hibernated = true; |
@@ -878,20 +881,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
878 | return retval; | 881 | return retval; |
879 | xhci_dbg(xhci, "Start the primary HCD\n"); | 882 | xhci_dbg(xhci, "Start the primary HCD\n"); |
880 | retval = xhci_run(hcd->primary_hcd); | 883 | retval = xhci_run(hcd->primary_hcd); |
881 | if (retval) | ||
882 | goto failed_restart; | ||
883 | |||
884 | xhci_dbg(xhci, "Start the secondary HCD\n"); | ||
885 | retval = xhci_run(secondary_hcd); | ||
886 | if (!retval) { | 884 | if (!retval) { |
887 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 885 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
888 | set_bit(HCD_FLAG_HW_ACCESSIBLE, | 886 | retval = xhci_run(secondary_hcd); |
889 | &xhci->shared_hcd->flags); | ||
890 | } | 887 | } |
891 | failed_restart: | ||
892 | hcd->state = HC_STATE_SUSPENDED; | 888 | hcd->state = HC_STATE_SUSPENDED; |
893 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; | 889 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
894 | return retval; | 890 | goto done; |
895 | } | 891 | } |
896 | 892 | ||
897 | /* step 4: set Run/Stop bit */ | 893 | /* step 4: set Run/Stop bit */ |
@@ -910,11 +906,14 @@ failed_restart: | |||
910 | * Running endpoints by ringing their doorbells | 906 | * Running endpoints by ringing their doorbells |
911 | */ | 907 | */ |
912 | 908 | ||
913 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
914 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | ||
915 | |||
916 | spin_unlock_irq(&xhci->lock); | 909 | spin_unlock_irq(&xhci->lock); |
917 | return 0; | 910 | |
911 | done: | ||
912 | if (retval == 0) { | ||
913 | usb_hcd_resume_root_hub(hcd); | ||
914 | usb_hcd_resume_root_hub(xhci->shared_hcd); | ||
915 | } | ||
916 | return retval; | ||
918 | } | 917 | } |
919 | #endif /* CONFIG_PM */ | 918 | #endif /* CONFIG_PM */ |
920 | 919 | ||
@@ -3504,6 +3503,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3504 | /* Otherwise, update the control endpoint ring enqueue pointer. */ | 3503 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
3505 | else | 3504 | else |
3506 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | 3505 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
3506 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
3507 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); | ||
3508 | ctrl_ctx->drop_flags = 0; | ||
3509 | |||
3507 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 3510 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
3508 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | 3511 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
3509 | 3512 | ||
@@ -3585,7 +3588,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3585 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) | 3588 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) |
3586 | + 1; | 3589 | + 1; |
3587 | /* Zero the input context control for later use */ | 3590 | /* Zero the input context control for later use */ |
3588 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
3589 | ctrl_ctx->add_flags = 0; | 3591 | ctrl_ctx->add_flags = 0; |
3590 | ctrl_ctx->drop_flags = 0; | 3592 | ctrl_ctx->drop_flags = 0; |
3591 | 3593 | ||
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index fc34b8b11910..07a03460a598 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC | |||
11 | select TWL4030_USB if MACH_OMAP_3430SDP | 11 | select TWL4030_USB if MACH_OMAP_3430SDP |
12 | select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA | 12 | select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA |
13 | select USB_OTG_UTILS | 13 | select USB_OTG_UTILS |
14 | select USB_GADGET_DUALSPEED | ||
14 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' | 15 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' |
15 | help | 16 | help |
16 | Say Y here if your system has a dual role high speed USB | 17 | Say Y here if your system has a dual role high speed USB |
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN | |||
60 | 61 | ||
61 | config USB_MUSB_UX500 | 62 | config USB_MUSB_UX500 |
62 | tristate "U8500 and U5500" | 63 | tristate "U8500 and U5500" |
63 | depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500) | 64 | depends on (ARCH_U8500 && AB8500_USB) |
64 | 65 | ||
65 | endchoice | 66 | endchoice |
66 | 67 | ||
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c index 08f1d0b662a3..e233d2b7d335 100644 --- a/drivers/usb/musb/am35x.c +++ b/drivers/usb/musb/am35x.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/module.h> | ||
30 | #include <linux/clk.h> | 31 | #include <linux/clk.h> |
31 | #include <linux/io.h> | 32 | #include <linux/io.h> |
32 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 4da7492ddbdb..2613bfdb09b6 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/module.h> | ||
30 | #include <linux/clk.h> | 31 | #include <linux/clk.h> |
31 | #include <linux/io.h> | 32 | #include <linux/io.h> |
32 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 20a28731c338..c1fa12ec7a9a 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1477 | /*-------------------------------------------------------------------------*/ | 1477 | /*-------------------------------------------------------------------------*/ |
1478 | 1478 | ||
1479 | #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ | 1479 | #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ |
1480 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ | 1480 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) |
1481 | defined(CONFIG_ARCH_U5500) | ||
1482 | 1481 | ||
1483 | static irqreturn_t generic_interrupt(int irq, void *__hci) | 1482 | static irqreturn_t generic_interrupt(int irq, void *__hci) |
1484 | { | 1483 | { |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ae4a20acef6c..d51043acfe1a 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) | |||
1999 | nuke(&hw_ep->ep_out, -ESHUTDOWN); | 1999 | nuke(&hw_ep->ep_out, -ESHUTDOWN); |
2000 | } | 2000 | } |
2001 | } | 2001 | } |
2002 | |||
2003 | spin_unlock(&musb->lock); | ||
2004 | driver->disconnect(&musb->g); | ||
2005 | spin_lock(&musb->lock); | ||
2006 | } | 2002 | } |
2007 | } | 2003 | } |
2008 | 2004 | ||
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index d2e2efaba658..08c679c0dde5 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c | |||
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev) | |||
405 | /* | 405 | /* |
406 | * platform functions | 406 | * platform functions |
407 | */ | 407 | */ |
408 | static int __devinit usbhs_probe(struct platform_device *pdev) | 408 | static int usbhs_probe(struct platform_device *pdev) |
409 | { | 409 | { |
410 | struct renesas_usbhs_platform_info *info = pdev->dev.platform_data; | 410 | struct renesas_usbhs_platform_info *info = pdev->dev.platform_data; |
411 | struct renesas_usbhs_driver_callback *dfunc; | 411 | struct renesas_usbhs_driver_callback *dfunc; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 8da685e796d1..ffdf5d15085e 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) | |||
820 | if (len % 4) /* 32bit alignment */ | 820 | if (len % 4) /* 32bit alignment */ |
821 | goto usbhsf_pio_prepare_push; | 821 | goto usbhsf_pio_prepare_push; |
822 | 822 | ||
823 | if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ | 823 | if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ |
824 | goto usbhsf_pio_prepare_push; | 824 | goto usbhsf_pio_prepare_push; |
825 | 825 | ||
826 | /* get enable DMA fifo */ | 826 | /* get enable DMA fifo */ |
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done) | |||
897 | if (!fifo) | 897 | if (!fifo) |
898 | goto usbhsf_pio_prepare_pop; | 898 | goto usbhsf_pio_prepare_pop; |
899 | 899 | ||
900 | if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ | 900 | if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ |
901 | goto usbhsf_pio_prepare_pop; | 901 | goto usbhsf_pio_prepare_pop; |
902 | 902 | ||
903 | ret = usbhsf_fifo_select(pipe, fifo, 0); | 903 | ret = usbhsf_fifo_select(pipe, fifo, 0); |
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h index 8ae3733031cd..6c6875533f01 100644 --- a/drivers/usb/renesas_usbhs/mod.h +++ b/drivers/usb/renesas_usbhs/mod.h | |||
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod); | |||
143 | */ | 143 | */ |
144 | #if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \ | 144 | #if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \ |
145 | defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE) | 145 | defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE) |
146 | extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv); | 146 | extern int usbhs_mod_host_probe(struct usbhs_priv *priv); |
147 | extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv); | 147 | extern int usbhs_mod_host_remove(struct usbhs_priv *priv); |
148 | #else | 148 | #else |
149 | static inline int usbhs_mod_host_probe(struct usbhs_priv *priv) | 149 | static inline int usbhs_mod_host_probe(struct usbhs_priv *priv) |
150 | { | 150 | { |
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv) | |||
157 | 157 | ||
158 | #if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \ | 158 | #if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \ |
159 | defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE) | 159 | defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE) |
160 | extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv); | 160 | extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv); |
161 | extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv); | 161 | extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv); |
162 | #else | 162 | #else |
163 | static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv) | 163 | static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv) |
164 | { | 164 | { |
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 4cc7ee0babc6..d9717e0bc1ff 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
@@ -830,7 +830,7 @@ static int usbhsg_stop(struct usbhs_priv *priv) | |||
830 | return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); | 830 | return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); |
831 | } | 831 | } |
832 | 832 | ||
833 | int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) | 833 | int usbhs_mod_gadget_probe(struct usbhs_priv *priv) |
834 | { | 834 | { |
835 | struct usbhsg_gpriv *gpriv; | 835 | struct usbhsg_gpriv *gpriv; |
836 | struct usbhsg_uep *uep; | 836 | struct usbhsg_uep *uep; |
@@ -927,7 +927,7 @@ usbhs_mod_gadget_probe_err_gpriv: | |||
927 | return ret; | 927 | return ret; |
928 | } | 928 | } |
929 | 929 | ||
930 | void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv) | 930 | void usbhs_mod_gadget_remove(struct usbhs_priv *priv) |
931 | { | 931 | { |
932 | struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); | 932 | struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); |
933 | 933 | ||
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c index 1a7208a50afc..bade761a1e52 100644 --- a/drivers/usb/renesas_usbhs/mod_host.c +++ b/drivers/usb/renesas_usbhs/mod_host.c | |||
@@ -103,7 +103,7 @@ struct usbhsh_hpriv { | |||
103 | 103 | ||
104 | u32 port_stat; /* USB_PORT_STAT_xxx */ | 104 | u32 port_stat; /* USB_PORT_STAT_xxx */ |
105 | 105 | ||
106 | struct completion *done; | 106 | struct completion setup_ack_done; |
107 | 107 | ||
108 | /* see usbhsh_req_alloc/free */ | 108 | /* see usbhsh_req_alloc/free */ |
109 | struct list_head ureq_link_active; | 109 | struct list_head ureq_link_active; |
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv, | |||
355 | struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, | 355 | struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, |
356 | struct usbhsh_device *udev, | 356 | struct usbhsh_device *udev, |
357 | struct usb_host_endpoint *ep, | 357 | struct usb_host_endpoint *ep, |
358 | int dir_in_req, | ||
358 | gfp_t mem_flags) | 359 | gfp_t mem_flags) |
359 | { | 360 | { |
360 | struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv); | 361 | struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv); |
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, | |||
364 | struct usbhs_pipe *pipe, *best_pipe; | 365 | struct usbhs_pipe *pipe, *best_pipe; |
365 | struct device *dev = usbhsh_hcd_to_dev(hcd); | 366 | struct device *dev = usbhsh_hcd_to_dev(hcd); |
366 | struct usb_endpoint_descriptor *desc = &ep->desc; | 367 | struct usb_endpoint_descriptor *desc = &ep->desc; |
367 | int type, i; | 368 | int type, i, dir_in; |
368 | unsigned int min_usr; | 369 | unsigned int min_usr; |
369 | 370 | ||
371 | dir_in_req = !!dir_in_req; | ||
372 | |||
370 | uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags); | 373 | uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags); |
371 | if (!uep) { | 374 | if (!uep) { |
372 | dev_err(dev, "usbhsh_ep alloc fail\n"); | 375 | dev_err(dev, "usbhsh_ep alloc fail\n"); |
373 | return NULL; | 376 | return NULL; |
374 | } | 377 | } |
375 | type = usb_endpoint_type(desc); | 378 | |
379 | if (usb_endpoint_xfer_control(desc)) { | ||
380 | best_pipe = usbhsh_hpriv_to_dcp(hpriv); | ||
381 | goto usbhsh_endpoint_alloc_find_pipe; | ||
382 | } | ||
376 | 383 | ||
377 | /* | 384 | /* |
378 | * find best pipe for endpoint | 385 | * find best pipe for endpoint |
379 | * see | 386 | * see |
380 | * HARDWARE LIMITATION | 387 | * HARDWARE LIMITATION |
381 | */ | 388 | */ |
389 | type = usb_endpoint_type(desc); | ||
382 | min_usr = ~0; | 390 | min_usr = ~0; |
383 | best_pipe = NULL; | 391 | best_pipe = NULL; |
384 | usbhs_for_each_pipe_with_dcp(pipe, priv, i) { | 392 | usbhs_for_each_pipe(pipe, priv, i) { |
385 | if (!usbhs_pipe_type_is(pipe, type)) | 393 | if (!usbhs_pipe_type_is(pipe, type)) |
386 | continue; | 394 | continue; |
387 | 395 | ||
396 | dir_in = !!usbhs_pipe_is_dir_in(pipe); | ||
397 | if (0 != (dir_in - dir_in_req)) | ||
398 | continue; | ||
399 | |||
388 | info = usbhsh_pipe_info(pipe); | 400 | info = usbhsh_pipe_info(pipe); |
389 | 401 | ||
390 | if (min_usr > info->usr_cnt) { | 402 | if (min_usr > info->usr_cnt) { |
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, | |||
398 | kfree(uep); | 410 | kfree(uep); |
399 | return NULL; | 411 | return NULL; |
400 | } | 412 | } |
401 | 413 | usbhsh_endpoint_alloc_find_pipe: | |
402 | /* | 414 | /* |
403 | * init uep | 415 | * init uep |
404 | */ | 416 | */ |
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, | |||
423 | * see | 435 | * see |
424 | * DCPMAXP/PIPEMAXP | 436 | * DCPMAXP/PIPEMAXP |
425 | */ | 437 | */ |
438 | usbhs_pipe_sequence_data0(uep->pipe); | ||
426 | usbhs_pipe_config_update(uep->pipe, | 439 | usbhs_pipe_config_update(uep->pipe, |
427 | usbhsh_device_number(hpriv, udev), | 440 | usbhsh_device_number(hpriv, udev), |
428 | usb_endpoint_num(desc), | 441 | usb_endpoint_num(desc), |
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, | |||
430 | 443 | ||
431 | dev_dbg(dev, "%s [%d-%s](%p)\n", __func__, | 444 | dev_dbg(dev, "%s [%d-%s](%p)\n", __func__, |
432 | usbhsh_device_number(hpriv, udev), | 445 | usbhsh_device_number(hpriv, udev), |
433 | usbhs_pipe_name(pipe), uep); | 446 | usbhs_pipe_name(uep->pipe), uep); |
434 | 447 | ||
435 | return uep; | 448 | return uep; |
436 | } | 449 | } |
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv, | |||
549 | * usbhsh_irq_setup_ack() | 562 | * usbhsh_irq_setup_ack() |
550 | * usbhsh_irq_setup_err() | 563 | * usbhsh_irq_setup_err() |
551 | */ | 564 | */ |
552 | DECLARE_COMPLETION(done); | 565 | init_completion(&hpriv->setup_ack_done); |
553 | hpriv->done = &done; | ||
554 | 566 | ||
555 | /* copy original request */ | 567 | /* copy original request */ |
556 | memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest)); | 568 | memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest)); |
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv, | |||
572 | /* | 584 | /* |
573 | * wait setup packet ACK | 585 | * wait setup packet ACK |
574 | */ | 586 | */ |
575 | wait_for_completion(&done); | 587 | wait_for_completion(&hpriv->setup_ack_done); |
576 | hpriv->done = NULL; | ||
577 | 588 | ||
578 | dev_dbg(dev, "%s done\n", __func__); | 589 | dev_dbg(dev, "%s done\n", __func__); |
579 | } | 590 | } |
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd, | |||
724 | struct usbhsh_device *udev, *new_udev = NULL; | 735 | struct usbhsh_device *udev, *new_udev = NULL; |
725 | struct usbhs_pipe *pipe; | 736 | struct usbhs_pipe *pipe; |
726 | struct usbhsh_ep *uep; | 737 | struct usbhsh_ep *uep; |
738 | int is_dir_in = usb_pipein(urb->pipe); | ||
727 | 739 | ||
728 | int ret; | 740 | int ret; |
729 | 741 | ||
730 | dev_dbg(dev, "%s (%s)\n", | 742 | dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out"); |
731 | __func__, usb_pipein(urb->pipe) ? "in" : "out"); | ||
732 | 743 | ||
733 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | 744 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
734 | if (ret) | 745 | if (ret) |
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd, | |||
751 | */ | 762 | */ |
752 | uep = usbhsh_ep_to_uep(ep); | 763 | uep = usbhsh_ep_to_uep(ep); |
753 | if (!uep) { | 764 | if (!uep) { |
754 | uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags); | 765 | uep = usbhsh_endpoint_alloc(hpriv, udev, ep, |
766 | is_dir_in, mem_flags); | ||
755 | if (!uep) | 767 | if (!uep) |
756 | goto usbhsh_urb_enqueue_error_free_device; | 768 | goto usbhsh_urb_enqueue_error_free_device; |
757 | } | 769 | } |
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv, | |||
1095 | 1107 | ||
1096 | dev_dbg(dev, "setup packet OK\n"); | 1108 | dev_dbg(dev, "setup packet OK\n"); |
1097 | 1109 | ||
1098 | if (unlikely(!hpriv->done)) | 1110 | complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */ |
1099 | dev_err(dev, "setup ack happen without necessary data\n"); | ||
1100 | else | ||
1101 | complete(hpriv->done); /* see usbhsh_urb_enqueue() */ | ||
1102 | 1111 | ||
1103 | return 0; | 1112 | return 0; |
1104 | } | 1113 | } |
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv, | |||
1111 | 1120 | ||
1112 | dev_dbg(dev, "setup packet Err\n"); | 1121 | dev_dbg(dev, "setup packet Err\n"); |
1113 | 1122 | ||
1114 | if (unlikely(!hpriv->done)) | 1123 | complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */ |
1115 | dev_err(dev, "setup err happen without necessary data\n"); | ||
1116 | else | ||
1117 | complete(hpriv->done); /* see usbhsh_urb_enqueue() */ | ||
1118 | 1124 | ||
1119 | return 0; | 1125 | return 0; |
1120 | } | 1126 | } |
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv) | |||
1221 | { | 1227 | { |
1222 | struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); | 1228 | struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); |
1223 | struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); | 1229 | struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); |
1230 | struct usbhs_mod *mod = usbhs_mod_get_current(priv); | ||
1224 | struct device *dev = usbhs_priv_to_dev(priv); | 1231 | struct device *dev = usbhs_priv_to_dev(priv); |
1225 | 1232 | ||
1233 | /* | ||
1234 | * disable irq callback | ||
1235 | */ | ||
1236 | mod->irq_attch = NULL; | ||
1237 | mod->irq_dtch = NULL; | ||
1238 | mod->irq_sack = NULL; | ||
1239 | mod->irq_sign = NULL; | ||
1240 | usbhs_irq_callback_update(priv, mod); | ||
1241 | |||
1226 | usb_remove_hcd(hcd); | 1242 | usb_remove_hcd(hcd); |
1227 | 1243 | ||
1228 | /* disable sys */ | 1244 | /* disable sys */ |
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv) | |||
1235 | return 0; | 1251 | return 0; |
1236 | } | 1252 | } |
1237 | 1253 | ||
1238 | int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) | 1254 | int usbhs_mod_host_probe(struct usbhs_priv *priv) |
1239 | { | 1255 | { |
1240 | struct usbhsh_hpriv *hpriv; | 1256 | struct usbhsh_hpriv *hpriv; |
1241 | struct usb_hcd *hcd; | 1257 | struct usb_hcd *hcd; |
@@ -1279,7 +1295,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) | |||
1279 | hpriv->mod.stop = usbhsh_stop; | 1295 | hpriv->mod.stop = usbhsh_stop; |
1280 | hpriv->pipe_info = pipe_info; | 1296 | hpriv->pipe_info = pipe_info; |
1281 | hpriv->pipe_size = pipe_size; | 1297 | hpriv->pipe_size = pipe_size; |
1282 | hpriv->done = NULL; | ||
1283 | usbhsh_req_list_init(hpriv); | 1298 | usbhsh_req_list_init(hpriv); |
1284 | usbhsh_port_stat_init(hpriv); | 1299 | usbhsh_port_stat_init(hpriv); |
1285 | 1300 | ||
@@ -1299,7 +1314,7 @@ usbhs_mod_host_probe_err: | |||
1299 | return -ENOMEM; | 1314 | return -ENOMEM; |
1300 | } | 1315 | } |
1301 | 1316 | ||
1302 | int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv) | 1317 | int usbhs_mod_host_remove(struct usbhs_priv *priv) |
1303 | { | 1318 | { |
1304 | struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); | 1319 | struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); |
1305 | struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); | 1320 | struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); |
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 5cdb9d912275..18e875b92e00 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c | |||
@@ -42,7 +42,7 @@ static int debug; | |||
42 | * Version information | 42 | * Version information |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #define DRIVER_VERSION "v0.6" | 45 | #define DRIVER_VERSION "v0.7" |
46 | #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" | 46 | #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" |
47 | #define DRIVER_DESC "USB ARK3116 serial/IrDA driver" | 47 | #define DRIVER_DESC "USB ARK3116 serial/IrDA driver" |
48 | #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" | 48 | #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" |
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
380 | goto err_out; | 380 | goto err_out; |
381 | } | 381 | } |
382 | 382 | ||
383 | /* setup termios */ | ||
384 | if (tty) | ||
385 | ark3116_set_termios(tty, port, NULL); | ||
386 | |||
387 | /* remove any data still left: also clears error state */ | 383 | /* remove any data still left: also clears error state */ |
388 | ark3116_read_reg(serial, UART_RX, buf); | 384 | ark3116_read_reg(serial, UART_RX, buf); |
389 | 385 | ||
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
406 | /* enable DMA */ | 402 | /* enable DMA */ |
407 | ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); | 403 | ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); |
408 | 404 | ||
405 | /* setup termios */ | ||
406 | if (tty) | ||
407 | ark3116_set_termios(tty, port, NULL); | ||
408 | |||
409 | err_out: | 409 | err_out: |
410 | kfree(buf); | 410 | kfree(buf); |
411 | return result; | 411 | return result; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8fe034d2d3e7..bd4298bb6750 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -2104,13 +2104,19 @@ static void ftdi_set_termios(struct tty_struct *tty, | |||
2104 | 2104 | ||
2105 | cflag = termios->c_cflag; | 2105 | cflag = termios->c_cflag; |
2106 | 2106 | ||
2107 | /* FIXME -For this cut I don't care if the line is really changing or | 2107 | if (old_termios->c_cflag == termios->c_cflag |
2108 | not - so just do the change regardless - should be able to | 2108 | && old_termios->c_ispeed == termios->c_ispeed |
2109 | compare old_termios and tty->termios */ | 2109 | && old_termios->c_ospeed == termios->c_ospeed) |
2110 | goto no_c_cflag_changes; | ||
2111 | |||
2110 | /* NOTE These routines can get interrupted by | 2112 | /* NOTE These routines can get interrupted by |
2111 | ftdi_sio_read_bulk_callback - need to examine what this means - | 2113 | ftdi_sio_read_bulk_callback - need to examine what this means - |
2112 | don't see any problems yet */ | 2114 | don't see any problems yet */ |
2113 | 2115 | ||
2116 | if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) == | ||
2117 | (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB))) | ||
2118 | goto no_data_parity_stop_changes; | ||
2119 | |||
2114 | /* Set number of data bits, parity, stop bits */ | 2120 | /* Set number of data bits, parity, stop bits */ |
2115 | 2121 | ||
2116 | urb_value = 0; | 2122 | urb_value = 0; |
@@ -2151,6 +2157,7 @@ static void ftdi_set_termios(struct tty_struct *tty, | |||
2151 | } | 2157 | } |
2152 | 2158 | ||
2153 | /* Now do the baudrate */ | 2159 | /* Now do the baudrate */ |
2160 | no_data_parity_stop_changes: | ||
2154 | if ((cflag & CBAUD) == B0) { | 2161 | if ((cflag & CBAUD) == B0) { |
2155 | /* Disable flow control */ | 2162 | /* Disable flow control */ |
2156 | if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 2163 | if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
@@ -2178,6 +2185,7 @@ static void ftdi_set_termios(struct tty_struct *tty, | |||
2178 | 2185 | ||
2179 | /* Set flow control */ | 2186 | /* Set flow control */ |
2180 | /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ | 2187 | /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ |
2188 | no_c_cflag_changes: | ||
2181 | if (cflag & CRTSCTS) { | 2189 | if (cflag & CRTSCTS) { |
2182 | dbg("%s Setting to CRTSCTS flow control", __func__); | 2190 | dbg("%s Setting to CRTSCTS flow control", __func__); |
2183 | if (usb_control_msg(dev, | 2191 | if (usb_control_msg(dev, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 89ae1f65e1b1..d865878c9f97 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb); | |||
156 | #define HUAWEI_PRODUCT_K4511 0x14CC | 156 | #define HUAWEI_PRODUCT_K4511 0x14CC |
157 | #define HUAWEI_PRODUCT_ETS1220 0x1803 | 157 | #define HUAWEI_PRODUCT_ETS1220 0x1803 |
158 | #define HUAWEI_PRODUCT_E353 0x1506 | 158 | #define HUAWEI_PRODUCT_E353 0x1506 |
159 | #define HUAWEI_PRODUCT_E173S 0x1C05 | ||
159 | 160 | ||
160 | #define QUANTA_VENDOR_ID 0x0408 | 161 | #define QUANTA_VENDOR_ID 0x0408 |
161 | #define QUANTA_PRODUCT_Q101 0xEA02 | 162 | #define QUANTA_PRODUCT_Q101 0xEA02 |
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb); | |||
316 | #define ZTE_PRODUCT_AC8710 0xfff1 | 317 | #define ZTE_PRODUCT_AC8710 0xfff1 |
317 | #define ZTE_PRODUCT_AC2726 0xfff5 | 318 | #define ZTE_PRODUCT_AC2726 0xfff5 |
318 | #define ZTE_PRODUCT_AC8710T 0xffff | 319 | #define ZTE_PRODUCT_AC8710T 0xffff |
320 | #define ZTE_PRODUCT_MC2718 0xffe8 | ||
321 | #define ZTE_PRODUCT_AD3812 0xffeb | ||
322 | #define ZTE_PRODUCT_MC2716 0xffed | ||
319 | 323 | ||
320 | #define BENQ_VENDOR_ID 0x04a5 | 324 | #define BENQ_VENDOR_ID 0x04a5 |
321 | #define BENQ_PRODUCT_H10 0x4068 | 325 | #define BENQ_PRODUCT_H10 0x4068 |
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb); | |||
468 | #define YUGA_PRODUCT_CLU528 0x260D | 472 | #define YUGA_PRODUCT_CLU528 0x260D |
469 | #define YUGA_PRODUCT_CLU526 0x260F | 473 | #define YUGA_PRODUCT_CLU526 0x260F |
470 | 474 | ||
475 | /* Viettel products */ | ||
476 | #define VIETTEL_VENDOR_ID 0x2262 | ||
477 | #define VIETTEL_PRODUCT_VT1000 0x0002 | ||
478 | |||
471 | /* some devices interfaces need special handling due to a number of reasons */ | 479 | /* some devices interfaces need special handling due to a number of reasons */ |
472 | enum option_blacklist_reason { | 480 | enum option_blacklist_reason { |
473 | OPTION_BLACKLIST_NONE = 0, | 481 | OPTION_BLACKLIST_NONE = 0, |
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = { | |||
500 | .reserved = BIT(4), | 508 | .reserved = BIT(4), |
501 | }; | 509 | }; |
502 | 510 | ||
511 | static const struct option_blacklist_info zte_ad3812_z_blacklist = { | ||
512 | .sendsetup = BIT(0) | BIT(1) | BIT(2), | ||
513 | }; | ||
514 | |||
515 | static const struct option_blacklist_info zte_mc2718_z_blacklist = { | ||
516 | .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), | ||
517 | }; | ||
518 | |||
519 | static const struct option_blacklist_info zte_mc2716_z_blacklist = { | ||
520 | .sendsetup = BIT(1) | BIT(2) | BIT(3), | ||
521 | }; | ||
522 | |||
503 | static const struct option_blacklist_info huawei_cdc12_blacklist = { | 523 | static const struct option_blacklist_info huawei_cdc12_blacklist = { |
504 | .reserved = BIT(1) | BIT(2), | 524 | .reserved = BIT(1) | BIT(2), |
505 | }; | 525 | }; |
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = { | |||
622 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, | 642 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, |
623 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, | 643 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, |
624 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, | 644 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, |
645 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, | ||
625 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), | 646 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), |
626 | .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, | 647 | .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, |
627 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), | 648 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), |
@@ -1043,6 +1064,12 @@ static const struct usb_device_id option_ids[] = { | |||
1043 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, | 1064 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, |
1044 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, | 1065 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, |
1045 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, | 1066 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, |
1067 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), | ||
1068 | .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, | ||
1069 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), | ||
1070 | .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, | ||
1071 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), | ||
1072 | .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, | ||
1046 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 1073 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
1047 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | 1074 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, |
1048 | { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ | 1075 | { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ |
@@ -1141,6 +1168,7 @@ static const struct usb_device_id option_ids[] = { | |||
1141 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, | 1168 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, |
1142 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, | 1169 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, |
1143 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, | 1170 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, |
1171 | { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, | ||
1144 | { } /* Terminating entry */ | 1172 | { } /* Terminating entry */ |
1145 | }; | 1173 | }; |
1146 | MODULE_DEVICE_TABLE(usb, option_ids); | 1174 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 9083d1e616b4..fc2d66f7f4eb 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = { | |||
91 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, | 91 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, |
92 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, | 92 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, |
93 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, | 93 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, |
94 | { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) }, | ||
95 | { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, | 94 | { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, |
96 | { } /* Terminating entry */ | 95 | { } /* Terminating entry */ |
97 | }; | 96 | }; |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 3d10d7f02072..c38b8c00c06f 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -145,10 +145,6 @@ | |||
145 | #define ADLINK_VENDOR_ID 0x0b63 | 145 | #define ADLINK_VENDOR_ID 0x0b63 |
146 | #define ADLINK_ND6530_PRODUCT_ID 0x6530 | 146 | #define ADLINK_ND6530_PRODUCT_ID 0x6530 |
147 | 147 | ||
148 | /* WinChipHead USB->RS 232 adapter */ | ||
149 | #define WINCHIPHEAD_VENDOR_ID 0x4348 | ||
150 | #define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523 | ||
151 | |||
152 | /* SMART USB Serial Adapter */ | 148 | /* SMART USB Serial Adapter */ |
153 | #define SMART_VENDOR_ID 0x0b8c | 149 | #define SMART_VENDOR_ID 0x0b8c |
154 | #define SMART_PRODUCT_ID 0x2303 | 150 | #define SMART_PRODUCT_ID 0x2303 |
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 4dca3ef0668c..9fbe742343c6 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c | |||
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb) | |||
1762 | result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); | 1762 | result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); |
1763 | } else { | 1763 | } else { |
1764 | void *buf; | 1764 | void *buf; |
1765 | int offset; | 1765 | int offset = 0; |
1766 | u16 PhyBlockAddr; | 1766 | u16 PhyBlockAddr; |
1767 | u8 PageNum; | 1767 | u8 PageNum; |
1768 | u32 result; | ||
1769 | u16 len, oldphy, newphy; | 1768 | u16 len, oldphy, newphy; |
1770 | 1769 | ||
1771 | buf = kmalloc(blenByte, GFP_KERNEL); | 1770 | buf = kmalloc(blenByte, GFP_KERNEL); |
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 93c1a4d86f51..82dd834709c7 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c | |||
@@ -59,7 +59,9 @@ | |||
59 | 59 | ||
60 | void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) | 60 | void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) |
61 | { | 61 | { |
62 | /* Pad the SCSI command with zeros out to 12 bytes | 62 | /* |
63 | * Pad the SCSI command with zeros out to 12 bytes. If the | ||
64 | * command already is 12 bytes or longer, leave it alone. | ||
63 | * | 65 | * |
64 | * NOTE: This only works because a scsi_cmnd struct field contains | 66 | * NOTE: This only works because a scsi_cmnd struct field contains |
65 | * a unsigned char cmnd[16], so we know we have storage available | 67 | * a unsigned char cmnd[16], so we know we have storage available |
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) | |||
67 | for (; srb->cmd_len<12; srb->cmd_len++) | 69 | for (; srb->cmd_len<12; srb->cmd_len++) |
68 | srb->cmnd[srb->cmd_len] = 0; | 70 | srb->cmnd[srb->cmd_len] = 0; |
69 | 71 | ||
70 | /* set command length to 12 bytes */ | ||
71 | srb->cmd_len = 12; | ||
72 | |||
73 | /* send the command to the transport layer */ | 72 | /* send the command to the transport layer */ |
74 | usb_stor_invoke_transport(srb, us); | 73 | usb_stor_invoke_transport(srb, us); |
75 | } | 74 | } |
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 55f91d9ab00b..29577bf1f559 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c | |||
@@ -116,6 +116,7 @@ | |||
116 | /* Clock registers available only on Version 2 */ | 116 | /* Clock registers available only on Version 2 */ |
117 | #define LCD_CLK_ENABLE_REG 0x6c | 117 | #define LCD_CLK_ENABLE_REG 0x6c |
118 | #define LCD_CLK_RESET_REG 0x70 | 118 | #define LCD_CLK_RESET_REG 0x70 |
119 | #define LCD_CLK_MAIN_RESET BIT(3) | ||
119 | 120 | ||
120 | #define LCD_NUM_BUFFERS 2 | 121 | #define LCD_NUM_BUFFERS 2 |
121 | 122 | ||
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void) | |||
244 | { | 245 | { |
245 | u32 reg; | 246 | u32 reg; |
246 | 247 | ||
248 | /* Bring LCDC out of reset */ | ||
249 | if (lcd_revision == LCD_VERSION_2) | ||
250 | lcdc_write(0, LCD_CLK_RESET_REG); | ||
251 | |||
247 | reg = lcdc_read(LCD_RASTER_CTRL_REG); | 252 | reg = lcdc_read(LCD_RASTER_CTRL_REG); |
248 | if (!(reg & LCD_RASTER_ENABLE)) | 253 | if (!(reg & LCD_RASTER_ENABLE)) |
249 | lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); | 254 | lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); |
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void) | |||
257 | reg = lcdc_read(LCD_RASTER_CTRL_REG); | 262 | reg = lcdc_read(LCD_RASTER_CTRL_REG); |
258 | if (reg & LCD_RASTER_ENABLE) | 263 | if (reg & LCD_RASTER_ENABLE) |
259 | lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); | 264 | lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); |
265 | |||
266 | if (lcd_revision == LCD_VERSION_2) | ||
267 | /* Write 1 to reset LCDC */ | ||
268 | lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); | ||
260 | } | 269 | } |
261 | 270 | ||
262 | static void lcd_blit(int load_mode, struct da8xx_fb_par *par) | 271 | static void lcd_blit(int load_mode, struct da8xx_fb_par *par) |
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par) | |||
584 | lcdc_write(0, LCD_DMA_CTRL_REG); | 593 | lcdc_write(0, LCD_DMA_CTRL_REG); |
585 | lcdc_write(0, LCD_RASTER_CTRL_REG); | 594 | lcdc_write(0, LCD_RASTER_CTRL_REG); |
586 | 595 | ||
587 | if (lcd_revision == LCD_VERSION_2) | 596 | if (lcd_revision == LCD_VERSION_2) { |
588 | lcdc_write(0, LCD_INT_ENABLE_SET_REG); | 597 | lcdc_write(0, LCD_INT_ENABLE_SET_REG); |
598 | /* Write 1 to reset */ | ||
599 | lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); | ||
600 | lcdc_write(0, LCD_CLK_RESET_REG); | ||
601 | } | ||
589 | } | 602 | } |
590 | 603 | ||
591 | static void lcd_calc_clk_divider(struct da8xx_fb_par *par) | 604 | static void lcd_calc_clk_divider(struct da8xx_fb_par *par) |
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index 0ccd7adf47bb..6f61e781f15a 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
20 | */ | 20 | */ |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | ||
22 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
23 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
24 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 3532782551cb..5c81533eacaa 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane, | |||
1720 | const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); | 1720 | const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); |
1721 | unsigned long fclk = 0; | 1721 | unsigned long fclk = 0; |
1722 | 1722 | ||
1723 | if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { | 1723 | if (width == out_width && height == out_height) |
1724 | if (width != out_width || height != out_height) | 1724 | return 0; |
1725 | return -EINVAL; | 1725 | |
1726 | else | 1726 | if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) |
1727 | return 0; | 1727 | return -EINVAL; |
1728 | } | ||
1729 | 1728 | ||
1730 | if (out_width < width / maxdownscale || | 1729 | if (out_width < width / maxdownscale || |
1731 | out_width > width * 8) | 1730 | out_width > width * 8) |
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 3262f0f1fa35..c56378c555b0 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c | |||
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg, | |||
269 | unsigned long hdmi_get_pixel_clock(void) | 269 | unsigned long hdmi_get_pixel_clock(void) |
270 | { | 270 | { |
271 | /* HDMI Pixel Clock in Mhz */ | 271 | /* HDMI Pixel Clock in Mhz */ |
272 | return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000; | 272 | return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000; |
273 | } | 273 | } |
274 | 274 | ||
275 | static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, | 275 | static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, |
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h index 69d882cbe709..c01c1c162726 100644 --- a/drivers/video/via/share.h +++ b/drivers/video/via/share.h | |||
@@ -559,8 +559,8 @@ | |||
559 | #define M1200X720_R60_VSP POSITIVE | 559 | #define M1200X720_R60_VSP POSITIVE |
560 | 560 | ||
561 | /* 1200x900@60 Sync Polarity (DCON) */ | 561 | /* 1200x900@60 Sync Polarity (DCON) */ |
562 | #define M1200X900_R60_HSP NEGATIVE | 562 | #define M1200X900_R60_HSP POSITIVE |
563 | #define M1200X900_R60_VSP NEGATIVE | 563 | #define M1200X900_R60_VSP POSITIVE |
564 | 564 | ||
565 | /* 1280x600@60 Sync Polarity (GTF Mode) */ | 565 | /* 1280x600@60 Sync Polarity (GTF Mode) */ |
566 | #define M1280x600_R60_HSP NEGATIVE | 566 | #define M1280x600_R60_HSP NEGATIVE |
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 816ed08e7cf3..1a61939b85fc 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig | |||
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON | |||
37 | 37 | ||
38 | config VIRTIO_MMIO | 38 | config VIRTIO_MMIO |
39 | tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" | 39 | tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" |
40 | depends on EXPERIMENTAL | 40 | depends on HAS_IOMEM && EXPERIMENTAL |
41 | select VIRTIO | 41 | select VIRTIO |
42 | select VIRTIO_RING | 42 | select VIRTIO_RING |
43 | ---help--- | 43 | ---help--- |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 2f57380d7ed4..0269717436af 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev) | |||
118 | vring_transport_features(vdev); | 118 | vring_transport_features(vdev); |
119 | 119 | ||
120 | for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { | 120 | for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { |
121 | writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET); | 121 | writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); |
122 | writel(vdev->features[i], | 122 | writel(vdev->features[i], |
123 | vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); | 123 | vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); |
124 | } | 124 | } |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 91683e6e7af5..baabb7937ec2 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status) | |||
169 | iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | 169 | iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); |
170 | } | 170 | } |
171 | 171 | ||
172 | /* wait for pending irq handlers */ | ||
173 | static void vp_synchronize_vectors(struct virtio_device *vdev) | ||
174 | { | ||
175 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
176 | int i; | ||
177 | |||
178 | if (vp_dev->intx_enabled) | ||
179 | synchronize_irq(vp_dev->pci_dev->irq); | ||
180 | |||
181 | for (i = 0; i < vp_dev->msix_vectors; ++i) | ||
182 | synchronize_irq(vp_dev->msix_entries[i].vector); | ||
183 | } | ||
184 | |||
172 | static void vp_reset(struct virtio_device *vdev) | 185 | static void vp_reset(struct virtio_device *vdev) |
173 | { | 186 | { |
174 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 187 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
175 | /* 0 status means a reset. */ | 188 | /* 0 status means a reset. */ |
176 | iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | 189 | iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); |
190 | /* Flush out the status write, and flush in device writes, | ||
191 | * including MSi-X interrupts, if any. */ | ||
192 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); | ||
193 | /* Flush pending VQ/configuration callbacks. */ | ||
194 | vp_synchronize_vectors(vdev); | ||
177 | } | 195 | } |
178 | 196 | ||
179 | /* the notify function used when creating a virt queue */ | 197 | /* the notify function used when creating a virt queue */ |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 6285867a9356..79fd606b7cd5 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG | |||
314 | To compile this driver as a module, choose M here: the | 314 | To compile this driver as a module, choose M here: the |
315 | module will be called nuc900_wdt. | 315 | module will be called nuc900_wdt. |
316 | 316 | ||
317 | config ADX_WATCHDOG | ||
318 | tristate "Avionic Design Xanthos watchdog" | ||
319 | depends on ARCH_PXA_ADX | ||
320 | help | ||
321 | Say Y here if you want support for the watchdog timer on Avionic | ||
322 | Design Xanthos boards. | ||
323 | |||
324 | config TS72XX_WATCHDOG | 317 | config TS72XX_WATCHDOG |
325 | tristate "TS-72XX SBC Watchdog" | 318 | tristate "TS-72XX SBC Watchdog" |
326 | depends on MACH_TS72XX | 319 | depends on MACH_TS72XX |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 55bd5740e910..fe893e91935b 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o | |||
51 | obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o | 51 | obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o |
52 | obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o | 52 | obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o |
53 | obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o | 53 | obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o |
54 | obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o | ||
55 | obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o | 54 | obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o |
56 | obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o | 55 | obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o |
57 | 56 | ||
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c deleted file mode 100644 index af6e6b16475a..000000000000 --- a/drivers/watchdog/adx_wdt.c +++ /dev/null | |||
@@ -1,355 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Avionic Design GmbH | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/fs.h> | ||
10 | #include <linux/gfp.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <linux/watchdog.h> | ||
18 | |||
19 | #define WATCHDOG_NAME "adx-wdt" | ||
20 | |||
21 | /* register offsets */ | ||
22 | #define ADX_WDT_CONTROL 0x00 | ||
23 | #define ADX_WDT_CONTROL_ENABLE (1 << 0) | ||
24 | #define ADX_WDT_CONTROL_nRESET (1 << 1) | ||
25 | #define ADX_WDT_TIMEOUT 0x08 | ||
26 | |||
27 | static struct platform_device *adx_wdt_dev; | ||
28 | static unsigned long driver_open; | ||
29 | |||
30 | #define WDT_STATE_STOP 0 | ||
31 | #define WDT_STATE_START 1 | ||
32 | |||
33 | struct adx_wdt { | ||
34 | void __iomem *base; | ||
35 | unsigned long timeout; | ||
36 | unsigned int state; | ||
37 | unsigned int wake; | ||
38 | spinlock_t lock; | ||
39 | }; | ||
40 | |||
41 | static const struct watchdog_info adx_wdt_info = { | ||
42 | .identity = "Avionic Design Xanthos Watchdog", | ||
43 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | ||
44 | }; | ||
45 | |||
46 | static void adx_wdt_start_locked(struct adx_wdt *wdt) | ||
47 | { | ||
48 | u32 ctrl; | ||
49 | |||
50 | ctrl = readl(wdt->base + ADX_WDT_CONTROL); | ||
51 | ctrl |= ADX_WDT_CONTROL_ENABLE; | ||
52 | writel(ctrl, wdt->base + ADX_WDT_CONTROL); | ||
53 | wdt->state = WDT_STATE_START; | ||
54 | } | ||
55 | |||
56 | static void adx_wdt_start(struct adx_wdt *wdt) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | |||
60 | spin_lock_irqsave(&wdt->lock, flags); | ||
61 | adx_wdt_start_locked(wdt); | ||
62 | spin_unlock_irqrestore(&wdt->lock, flags); | ||
63 | } | ||
64 | |||
65 | static void adx_wdt_stop_locked(struct adx_wdt *wdt) | ||
66 | { | ||
67 | u32 ctrl; | ||
68 | |||
69 | ctrl = readl(wdt->base + ADX_WDT_CONTROL); | ||
70 | ctrl &= ~ADX_WDT_CONTROL_ENABLE; | ||
71 | writel(ctrl, wdt->base + ADX_WDT_CONTROL); | ||
72 | wdt->state = WDT_STATE_STOP; | ||
73 | } | ||
74 | |||
75 | static void adx_wdt_stop(struct adx_wdt *wdt) | ||
76 | { | ||
77 | unsigned long flags; | ||
78 | |||
79 | spin_lock_irqsave(&wdt->lock, flags); | ||
80 | adx_wdt_stop_locked(wdt); | ||
81 | spin_unlock_irqrestore(&wdt->lock, flags); | ||
82 | } | ||
83 | |||
84 | static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds) | ||
85 | { | ||
86 | unsigned long timeout = seconds * 1000; | ||
87 | unsigned long flags; | ||
88 | unsigned int state; | ||
89 | |||
90 | spin_lock_irqsave(&wdt->lock, flags); | ||
91 | state = wdt->state; | ||
92 | adx_wdt_stop_locked(wdt); | ||
93 | writel(timeout, wdt->base + ADX_WDT_TIMEOUT); | ||
94 | |||
95 | if (state == WDT_STATE_START) | ||
96 | adx_wdt_start_locked(wdt); | ||
97 | |||
98 | wdt->timeout = timeout; | ||
99 | spin_unlock_irqrestore(&wdt->lock, flags); | ||
100 | } | ||
101 | |||
102 | static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds) | ||
103 | { | ||
104 | *seconds = wdt->timeout / 1000; | ||
105 | } | ||
106 | |||
107 | static void adx_wdt_keepalive(struct adx_wdt *wdt) | ||
108 | { | ||
109 | unsigned long flags; | ||
110 | |||
111 | spin_lock_irqsave(&wdt->lock, flags); | ||
112 | writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT); | ||
113 | spin_unlock_irqrestore(&wdt->lock, flags); | ||
114 | } | ||
115 | |||
116 | static int adx_wdt_open(struct inode *inode, struct file *file) | ||
117 | { | ||
118 | struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev); | ||
119 | |||
120 | if (test_and_set_bit(0, &driver_open)) | ||
121 | return -EBUSY; | ||
122 | |||
123 | file->private_data = wdt; | ||
124 | adx_wdt_set_timeout(wdt, 30); | ||
125 | adx_wdt_start(wdt); | ||
126 | |||
127 | return nonseekable_open(inode, file); | ||
128 | } | ||
129 | |||
130 | static int adx_wdt_release(struct inode *inode, struct file *file) | ||
131 | { | ||
132 | struct adx_wdt *wdt = file->private_data; | ||
133 | |||
134 | adx_wdt_stop(wdt); | ||
135 | clear_bit(0, &driver_open); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
141 | { | ||
142 | struct adx_wdt *wdt = file->private_data; | ||
143 | void __user *argp = (void __user *)arg; | ||
144 | unsigned long __user *p = argp; | ||
145 | unsigned long seconds = 0; | ||
146 | unsigned int options; | ||
147 | long ret = -EINVAL; | ||
148 | |||
149 | switch (cmd) { | ||
150 | case WDIOC_GETSUPPORT: | ||
151 | if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info))) | ||
152 | return -EFAULT; | ||
153 | else | ||
154 | return 0; | ||
155 | |||
156 | case WDIOC_GETSTATUS: | ||
157 | case WDIOC_GETBOOTSTATUS: | ||
158 | return put_user(0, p); | ||
159 | |||
160 | case WDIOC_KEEPALIVE: | ||
161 | adx_wdt_keepalive(wdt); | ||
162 | return 0; | ||
163 | |||
164 | case WDIOC_SETTIMEOUT: | ||
165 | if (get_user(seconds, p)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | adx_wdt_set_timeout(wdt, seconds); | ||
169 | |||
170 | /* fallthrough */ | ||
171 | case WDIOC_GETTIMEOUT: | ||
172 | adx_wdt_get_timeout(wdt, &seconds); | ||
173 | return put_user(seconds, p); | ||
174 | |||
175 | case WDIOC_SETOPTIONS: | ||
176 | if (copy_from_user(&options, argp, sizeof(options))) | ||
177 | return -EFAULT; | ||
178 | |||
179 | if (options & WDIOS_DISABLECARD) { | ||
180 | adx_wdt_stop(wdt); | ||
181 | ret = 0; | ||
182 | } | ||
183 | |||
184 | if (options & WDIOS_ENABLECARD) { | ||
185 | adx_wdt_start(wdt); | ||
186 | ret = 0; | ||
187 | } | ||
188 | |||
189 | return ret; | ||
190 | |||
191 | default: | ||
192 | break; | ||
193 | } | ||
194 | |||
195 | return -ENOTTY; | ||
196 | } | ||
197 | |||
198 | static ssize_t adx_wdt_write(struct file *file, const char __user *data, | ||
199 | size_t len, loff_t *ppos) | ||
200 | { | ||
201 | struct adx_wdt *wdt = file->private_data; | ||
202 | |||
203 | if (len) | ||
204 | adx_wdt_keepalive(wdt); | ||
205 | |||
206 | return len; | ||
207 | } | ||
208 | |||
209 | static const struct file_operations adx_wdt_fops = { | ||
210 | .owner = THIS_MODULE, | ||
211 | .llseek = no_llseek, | ||
212 | .open = adx_wdt_open, | ||
213 | .release = adx_wdt_release, | ||
214 | .unlocked_ioctl = adx_wdt_ioctl, | ||
215 | .write = adx_wdt_write, | ||
216 | }; | ||
217 | |||
218 | static struct miscdevice adx_wdt_miscdev = { | ||
219 | .minor = WATCHDOG_MINOR, | ||
220 | .name = "watchdog", | ||
221 | .fops = &adx_wdt_fops, | ||
222 | }; | ||
223 | |||
224 | static int __devinit adx_wdt_probe(struct platform_device *pdev) | ||
225 | { | ||
226 | struct resource *res; | ||
227 | struct adx_wdt *wdt; | ||
228 | int ret = 0; | ||
229 | u32 ctrl; | ||
230 | |||
231 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); | ||
232 | if (!wdt) { | ||
233 | dev_err(&pdev->dev, "cannot allocate WDT structure\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | |||
237 | spin_lock_init(&wdt->lock); | ||
238 | |||
239 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
240 | if (!res) { | ||
241 | dev_err(&pdev->dev, "cannot obtain I/O memory region\n"); | ||
242 | return -ENXIO; | ||
243 | } | ||
244 | |||
245 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
246 | resource_size(res), res->name); | ||
247 | if (!res) { | ||
248 | dev_err(&pdev->dev, "cannot request I/O memory region\n"); | ||
249 | return -ENXIO; | ||
250 | } | ||
251 | |||
252 | wdt->base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
253 | resource_size(res)); | ||
254 | if (!wdt->base) { | ||
255 | dev_err(&pdev->dev, "cannot remap I/O memory region\n"); | ||
256 | return -ENXIO; | ||
257 | } | ||
258 | |||
259 | /* disable watchdog and reboot on timeout */ | ||
260 | ctrl = readl(wdt->base + ADX_WDT_CONTROL); | ||
261 | ctrl &= ~ADX_WDT_CONTROL_ENABLE; | ||
262 | ctrl &= ~ADX_WDT_CONTROL_nRESET; | ||
263 | writel(ctrl, wdt->base + ADX_WDT_CONTROL); | ||
264 | |||
265 | platform_set_drvdata(pdev, wdt); | ||
266 | adx_wdt_dev = pdev; | ||
267 | |||
268 | ret = misc_register(&adx_wdt_miscdev); | ||
269 | if (ret) { | ||
270 | dev_err(&pdev->dev, "cannot register miscdev on minor %d " | ||
271 | "(err=%d)\n", WATCHDOG_MINOR, ret); | ||
272 | return ret; | ||
273 | } | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int __devexit adx_wdt_remove(struct platform_device *pdev) | ||
279 | { | ||
280 | struct adx_wdt *wdt = platform_get_drvdata(pdev); | ||
281 | |||
282 | misc_deregister(&adx_wdt_miscdev); | ||
283 | adx_wdt_stop(wdt); | ||
284 | platform_set_drvdata(pdev, NULL); | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static void adx_wdt_shutdown(struct platform_device *pdev) | ||
290 | { | ||
291 | struct adx_wdt *wdt = platform_get_drvdata(pdev); | ||
292 | adx_wdt_stop(wdt); | ||
293 | } | ||
294 | |||
295 | #ifdef CONFIG_PM | ||
296 | static int adx_wdt_suspend(struct device *dev) | ||
297 | { | ||
298 | struct platform_device *pdev = to_platform_device(dev); | ||
299 | struct adx_wdt *wdt = platform_get_drvdata(pdev); | ||
300 | |||
301 | wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0; | ||
302 | adx_wdt_stop(wdt); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int adx_wdt_resume(struct device *dev) | ||
308 | { | ||
309 | struct platform_device *pdev = to_platform_device(dev); | ||
310 | struct adx_wdt *wdt = platform_get_drvdata(pdev); | ||
311 | |||
312 | if (wdt->wake) | ||
313 | adx_wdt_start(wdt); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static const struct dev_pm_ops adx_wdt_pm_ops = { | ||
319 | .suspend = adx_wdt_suspend, | ||
320 | .resume = adx_wdt_resume, | ||
321 | }; | ||
322 | |||
323 | # define ADX_WDT_PM_OPS (&adx_wdt_pm_ops) | ||
324 | #else | ||
325 | # define ADX_WDT_PM_OPS NULL | ||
326 | #endif | ||
327 | |||
328 | static struct platform_driver adx_wdt_driver = { | ||
329 | .probe = adx_wdt_probe, | ||
330 | .remove = __devexit_p(adx_wdt_remove), | ||
331 | .shutdown = adx_wdt_shutdown, | ||
332 | .driver = { | ||
333 | .name = WATCHDOG_NAME, | ||
334 | .owner = THIS_MODULE, | ||
335 | .pm = ADX_WDT_PM_OPS, | ||
336 | }, | ||
337 | }; | ||
338 | |||
339 | static int __init adx_wdt_init(void) | ||
340 | { | ||
341 | return platform_driver_register(&adx_wdt_driver); | ||
342 | } | ||
343 | |||
344 | static void __exit adx_wdt_exit(void) | ||
345 | { | ||
346 | platform_driver_unregister(&adx_wdt_driver); | ||
347 | } | ||
348 | |||
349 | module_init(adx_wdt_init); | ||
350 | module_exit(adx_wdt_exit); | ||
351 | |||
352 | MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver"); | ||
353 | MODULE_LICENSE("GPL v2"); | ||
354 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); | ||
355 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 5de7e4fa5b8a..a79e3840782a 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c | |||
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
401 | 401 | ||
402 | dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", | 402 | dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", |
403 | (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", | 403 | (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", |
404 | (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", | 404 | (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis", |
405 | (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); | 405 | (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis"); |
406 | 406 | ||
407 | return 0; | 407 | return 0; |
408 | 408 | ||
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index 7be38556aed0..e789a47db41f 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c | |||
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev, | |||
150 | if (wm831x_wdt_cfgs[i].time == timeout) | 150 | if (wm831x_wdt_cfgs[i].time == timeout) |
151 | break; | 151 | break; |
152 | if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) | 152 | if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) |
153 | ret = -EINVAL; | 153 | return -EINVAL; |
154 | 154 | ||
155 | ret = wm831x_reg_unlock(wm831x); | 155 | ret = wm831x_reg_unlock(wm831x); |
156 | if (ret == 0) { | 156 | if (ret == 0) { |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8855aad3929c..22c64fff1bd5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | |||
683 | return PTR_ERR(fspath); | 683 | return PTR_ERR(fspath); |
684 | 684 | ||
685 | if (fspath > fspath_min) { | 685 | if (fspath > fspath_min) { |
686 | ipath->fspath->val[i] = (u64)fspath; | 686 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
687 | ++ipath->fspath->elem_cnt; | 687 | ++ipath->fspath->elem_cnt; |
688 | ipath->fspath->bytes_left = fspath - fspath_min; | 688 | ipath->fspath->bytes_left = fspath - fspath_min; |
689 | } else { | 689 | } else { |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0fe615e4ea38..dede441bdeee 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, | |||
514 | struct btrfs_root *root, | 514 | struct btrfs_root *root, |
515 | struct extent_buffer *buf) | 515 | struct extent_buffer *buf) |
516 | { | 516 | { |
517 | /* ensure we can see the force_cow */ | ||
518 | smp_rmb(); | ||
519 | |||
520 | /* | ||
521 | * We do not need to cow a block if | ||
522 | * 1) this block is not created or changed in this transaction; | ||
523 | * 2) this block does not belong to TREE_RELOC tree; | ||
524 | * 3) the root is not forced COW. | ||
525 | * | ||
526 | * What is forced COW: | ||
527 | * when we create snapshot during commiting the transaction, | ||
528 | * after we've finished coping src root, we must COW the shared | ||
529 | * block to ensure the metadata consistency. | ||
530 | */ | ||
517 | if (btrfs_header_generation(buf) == trans->transid && | 531 | if (btrfs_header_generation(buf) == trans->transid && |
518 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && | 532 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && |
519 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && | 533 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
520 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) | 534 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && |
535 | !root->force_cow) | ||
521 | return 0; | 536 | return 0; |
522 | return 1; | 537 | return 1; |
523 | } | 538 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b9ba59ff9292..50634abef9b4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -848,7 +848,8 @@ struct btrfs_free_cluster { | |||
848 | enum btrfs_caching_type { | 848 | enum btrfs_caching_type { |
849 | BTRFS_CACHE_NO = 0, | 849 | BTRFS_CACHE_NO = 0, |
850 | BTRFS_CACHE_STARTED = 1, | 850 | BTRFS_CACHE_STARTED = 1, |
851 | BTRFS_CACHE_FINISHED = 2, | 851 | BTRFS_CACHE_FAST = 2, |
852 | BTRFS_CACHE_FINISHED = 3, | ||
852 | }; | 853 | }; |
853 | 854 | ||
854 | enum btrfs_disk_cache_state { | 855 | enum btrfs_disk_cache_state { |
@@ -1271,6 +1272,8 @@ struct btrfs_root { | |||
1271 | * for stat. It may be used for more later | 1272 | * for stat. It may be used for more later |
1272 | */ | 1273 | */ |
1273 | dev_t anon_dev; | 1274 | dev_t anon_dev; |
1275 | |||
1276 | int force_cow; | ||
1274 | }; | 1277 | }; |
1275 | 1278 | ||
1276 | struct btrfs_ioctl_defrag_range_args { | 1279 | struct btrfs_ioctl_defrag_range_args { |
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root, | |||
2366 | int btrfs_block_rsv_refill(struct btrfs_root *root, | 2369 | int btrfs_block_rsv_refill(struct btrfs_root *root, |
2367 | struct btrfs_block_rsv *block_rsv, | 2370 | struct btrfs_block_rsv *block_rsv, |
2368 | u64 min_reserved); | 2371 | u64 min_reserved); |
2372 | int btrfs_block_rsv_refill_noflush(struct btrfs_root *root, | ||
2373 | struct btrfs_block_rsv *block_rsv, | ||
2374 | u64 min_reserved); | ||
2369 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | 2375 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, |
2370 | struct btrfs_block_rsv *dst_rsv, | 2376 | struct btrfs_block_rsv *dst_rsv, |
2371 | u64 num_bytes); | 2377 | u64 num_bytes); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 62afe5c5694e..632f8f3cc9db 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -620,7 +620,7 @@ out: | |||
620 | 620 | ||
621 | static int btree_io_failed_hook(struct bio *failed_bio, | 621 | static int btree_io_failed_hook(struct bio *failed_bio, |
622 | struct page *page, u64 start, u64 end, | 622 | struct page *page, u64 start, u64 end, |
623 | u64 mirror_num, struct extent_state *state) | 623 | int mirror_num, struct extent_state *state) |
624 | { | 624 | { |
625 | struct extent_io_tree *tree; | 625 | struct extent_io_tree *tree; |
626 | unsigned long len; | 626 | unsigned long len; |
@@ -2573,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2573 | int errors = 0; | 2573 | int errors = 0; |
2574 | u32 crc; | 2574 | u32 crc; |
2575 | u64 bytenr; | 2575 | u64 bytenr; |
2576 | int last_barrier = 0; | ||
2577 | 2576 | ||
2578 | if (max_mirrors == 0) | 2577 | if (max_mirrors == 0) |
2579 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | 2578 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
2580 | 2579 | ||
2581 | /* make sure only the last submit_bh does a barrier */ | ||
2582 | if (do_barriers) { | ||
2583 | for (i = 0; i < max_mirrors; i++) { | ||
2584 | bytenr = btrfs_sb_offset(i); | ||
2585 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | ||
2586 | device->total_bytes) | ||
2587 | break; | ||
2588 | last_barrier = i; | ||
2589 | } | ||
2590 | } | ||
2591 | |||
2592 | for (i = 0; i < max_mirrors; i++) { | 2580 | for (i = 0; i < max_mirrors; i++) { |
2593 | bytenr = btrfs_sb_offset(i); | 2581 | bytenr = btrfs_sb_offset(i); |
2594 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) | 2582 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) |
@@ -2634,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2634 | bh->b_end_io = btrfs_end_buffer_write_sync; | 2622 | bh->b_end_io = btrfs_end_buffer_write_sync; |
2635 | } | 2623 | } |
2636 | 2624 | ||
2637 | if (i == last_barrier && do_barriers) | 2625 | /* |
2638 | ret = submit_bh(WRITE_FLUSH_FUA, bh); | 2626 | * we fua the first super. The others we allow |
2639 | else | 2627 | * to go down lazy. |
2640 | ret = submit_bh(WRITE_SYNC, bh); | 2628 | */ |
2641 | 2629 | ret = submit_bh(WRITE_FUA, bh); | |
2642 | if (ret) | 2630 | if (ret) |
2643 | errors++; | 2631 | errors++; |
2644 | } | 2632 | } |
2645 | return errors < i ? 0 : -1; | 2633 | return errors < i ? 0 : -1; |
2646 | } | 2634 | } |
2647 | 2635 | ||
2636 | /* | ||
2637 | * endio for the write_dev_flush, this will wake anyone waiting | ||
2638 | * for the barrier when it is done | ||
2639 | */ | ||
2640 | static void btrfs_end_empty_barrier(struct bio *bio, int err) | ||
2641 | { | ||
2642 | if (err) { | ||
2643 | if (err == -EOPNOTSUPP) | ||
2644 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | ||
2645 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
2646 | } | ||
2647 | if (bio->bi_private) | ||
2648 | complete(bio->bi_private); | ||
2649 | bio_put(bio); | ||
2650 | } | ||
2651 | |||
2652 | /* | ||
2653 | * trigger flushes for one the devices. If you pass wait == 0, the flushes are | ||
2654 | * sent down. With wait == 1, it waits for the previous flush. | ||
2655 | * | ||
2656 | * any device where the flush fails with eopnotsupp are flagged as not-barrier | ||
2657 | * capable | ||
2658 | */ | ||
2659 | static int write_dev_flush(struct btrfs_device *device, int wait) | ||
2660 | { | ||
2661 | struct bio *bio; | ||
2662 | int ret = 0; | ||
2663 | |||
2664 | if (device->nobarriers) | ||
2665 | return 0; | ||
2666 | |||
2667 | if (wait) { | ||
2668 | bio = device->flush_bio; | ||
2669 | if (!bio) | ||
2670 | return 0; | ||
2671 | |||
2672 | wait_for_completion(&device->flush_wait); | ||
2673 | |||
2674 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | ||
2675 | printk("btrfs: disabling barriers on dev %s\n", | ||
2676 | device->name); | ||
2677 | device->nobarriers = 1; | ||
2678 | } | ||
2679 | if (!bio_flagged(bio, BIO_UPTODATE)) { | ||
2680 | ret = -EIO; | ||
2681 | } | ||
2682 | |||
2683 | /* drop the reference from the wait == 0 run */ | ||
2684 | bio_put(bio); | ||
2685 | device->flush_bio = NULL; | ||
2686 | |||
2687 | return ret; | ||
2688 | } | ||
2689 | |||
2690 | /* | ||
2691 | * one reference for us, and we leave it for the | ||
2692 | * caller | ||
2693 | */ | ||
2694 | device->flush_bio = NULL;; | ||
2695 | bio = bio_alloc(GFP_NOFS, 0); | ||
2696 | if (!bio) | ||
2697 | return -ENOMEM; | ||
2698 | |||
2699 | bio->bi_end_io = btrfs_end_empty_barrier; | ||
2700 | bio->bi_bdev = device->bdev; | ||
2701 | init_completion(&device->flush_wait); | ||
2702 | bio->bi_private = &device->flush_wait; | ||
2703 | device->flush_bio = bio; | ||
2704 | |||
2705 | bio_get(bio); | ||
2706 | submit_bio(WRITE_FLUSH, bio); | ||
2707 | |||
2708 | return 0; | ||
2709 | } | ||
2710 | |||
2711 | /* | ||
2712 | * send an empty flush down to each device in parallel, | ||
2713 | * then wait for them | ||
2714 | */ | ||
2715 | static int barrier_all_devices(struct btrfs_fs_info *info) | ||
2716 | { | ||
2717 | struct list_head *head; | ||
2718 | struct btrfs_device *dev; | ||
2719 | int errors = 0; | ||
2720 | int ret; | ||
2721 | |||
2722 | /* send down all the barriers */ | ||
2723 | head = &info->fs_devices->devices; | ||
2724 | list_for_each_entry_rcu(dev, head, dev_list) { | ||
2725 | if (!dev->bdev) { | ||
2726 | errors++; | ||
2727 | continue; | ||
2728 | } | ||
2729 | if (!dev->in_fs_metadata || !dev->writeable) | ||
2730 | continue; | ||
2731 | |||
2732 | ret = write_dev_flush(dev, 0); | ||
2733 | if (ret) | ||
2734 | errors++; | ||
2735 | } | ||
2736 | |||
2737 | /* wait for all the barriers */ | ||
2738 | list_for_each_entry_rcu(dev, head, dev_list) { | ||
2739 | if (!dev->bdev) { | ||
2740 | errors++; | ||
2741 | continue; | ||
2742 | } | ||
2743 | if (!dev->in_fs_metadata || !dev->writeable) | ||
2744 | continue; | ||
2745 | |||
2746 | ret = write_dev_flush(dev, 1); | ||
2747 | if (ret) | ||
2748 | errors++; | ||
2749 | } | ||
2750 | if (errors) | ||
2751 | return -EIO; | ||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2648 | int write_all_supers(struct btrfs_root *root, int max_mirrors) | 2755 | int write_all_supers(struct btrfs_root *root, int max_mirrors) |
2649 | { | 2756 | { |
2650 | struct list_head *head; | 2757 | struct list_head *head; |
@@ -2666,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
2666 | 2773 | ||
2667 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 2774 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
2668 | head = &root->fs_info->fs_devices->devices; | 2775 | head = &root->fs_info->fs_devices->devices; |
2776 | |||
2777 | if (do_barriers) | ||
2778 | barrier_all_devices(root->fs_info); | ||
2779 | |||
2669 | list_for_each_entry_rcu(dev, head, dev_list) { | 2780 | list_for_each_entry_rcu(dev, head, dev_list) { |
2670 | if (!dev->bdev) { | 2781 | if (!dev->bdev) { |
2671 | total_errors++; | 2782 | total_errors++; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b232150b5b6b..f0d5718d2587 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
467 | struct btrfs_root *root, | 467 | struct btrfs_root *root, |
468 | int load_cache_only) | 468 | int load_cache_only) |
469 | { | 469 | { |
470 | DEFINE_WAIT(wait); | ||
470 | struct btrfs_fs_info *fs_info = cache->fs_info; | 471 | struct btrfs_fs_info *fs_info = cache->fs_info; |
471 | struct btrfs_caching_control *caching_ctl; | 472 | struct btrfs_caching_control *caching_ctl; |
472 | int ret = 0; | 473 | int ret = 0; |
473 | 474 | ||
474 | smp_mb(); | 475 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); |
475 | if (cache->cached != BTRFS_CACHE_NO) | 476 | BUG_ON(!caching_ctl); |
477 | |||
478 | INIT_LIST_HEAD(&caching_ctl->list); | ||
479 | mutex_init(&caching_ctl->mutex); | ||
480 | init_waitqueue_head(&caching_ctl->wait); | ||
481 | caching_ctl->block_group = cache; | ||
482 | caching_ctl->progress = cache->key.objectid; | ||
483 | atomic_set(&caching_ctl->count, 1); | ||
484 | caching_ctl->work.func = caching_thread; | ||
485 | |||
486 | spin_lock(&cache->lock); | ||
487 | /* | ||
488 | * This should be a rare occasion, but this could happen I think in the | ||
489 | * case where one thread starts to load the space cache info, and then | ||
490 | * some other thread starts a transaction commit which tries to do an | ||
491 | * allocation while the other thread is still loading the space cache | ||
492 | * info. The previous loop should have kept us from choosing this block | ||
493 | * group, but if we've moved to the state where we will wait on caching | ||
494 | * block groups we need to first check if we're doing a fast load here, | ||
495 | * so we can wait for it to finish, otherwise we could end up allocating | ||
496 | * from a block group who's cache gets evicted for one reason or | ||
497 | * another. | ||
498 | */ | ||
499 | while (cache->cached == BTRFS_CACHE_FAST) { | ||
500 | struct btrfs_caching_control *ctl; | ||
501 | |||
502 | ctl = cache->caching_ctl; | ||
503 | atomic_inc(&ctl->count); | ||
504 | prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); | ||
505 | spin_unlock(&cache->lock); | ||
506 | |||
507 | schedule(); | ||
508 | |||
509 | finish_wait(&ctl->wait, &wait); | ||
510 | put_caching_control(ctl); | ||
511 | spin_lock(&cache->lock); | ||
512 | } | ||
513 | |||
514 | if (cache->cached != BTRFS_CACHE_NO) { | ||
515 | spin_unlock(&cache->lock); | ||
516 | kfree(caching_ctl); | ||
476 | return 0; | 517 | return 0; |
518 | } | ||
519 | WARN_ON(cache->caching_ctl); | ||
520 | cache->caching_ctl = caching_ctl; | ||
521 | cache->cached = BTRFS_CACHE_FAST; | ||
522 | spin_unlock(&cache->lock); | ||
477 | 523 | ||
478 | /* | 524 | /* |
479 | * We can't do the read from on-disk cache during a commit since we need | 525 | * We can't do the read from on-disk cache during a commit since we need |
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
484 | if (trans && (!trans->transaction->in_commit) && | 530 | if (trans && (!trans->transaction->in_commit) && |
485 | (root && root != root->fs_info->tree_root) && | 531 | (root && root != root->fs_info->tree_root) && |
486 | btrfs_test_opt(root, SPACE_CACHE)) { | 532 | btrfs_test_opt(root, SPACE_CACHE)) { |
487 | spin_lock(&cache->lock); | ||
488 | if (cache->cached != BTRFS_CACHE_NO) { | ||
489 | spin_unlock(&cache->lock); | ||
490 | return 0; | ||
491 | } | ||
492 | cache->cached = BTRFS_CACHE_STARTED; | ||
493 | spin_unlock(&cache->lock); | ||
494 | |||
495 | ret = load_free_space_cache(fs_info, cache); | 533 | ret = load_free_space_cache(fs_info, cache); |
496 | 534 | ||
497 | spin_lock(&cache->lock); | 535 | spin_lock(&cache->lock); |
498 | if (ret == 1) { | 536 | if (ret == 1) { |
537 | cache->caching_ctl = NULL; | ||
499 | cache->cached = BTRFS_CACHE_FINISHED; | 538 | cache->cached = BTRFS_CACHE_FINISHED; |
500 | cache->last_byte_to_unpin = (u64)-1; | 539 | cache->last_byte_to_unpin = (u64)-1; |
501 | } else { | 540 | } else { |
502 | cache->cached = BTRFS_CACHE_NO; | 541 | if (load_cache_only) { |
542 | cache->caching_ctl = NULL; | ||
543 | cache->cached = BTRFS_CACHE_NO; | ||
544 | } else { | ||
545 | cache->cached = BTRFS_CACHE_STARTED; | ||
546 | } | ||
503 | } | 547 | } |
504 | spin_unlock(&cache->lock); | 548 | spin_unlock(&cache->lock); |
549 | wake_up(&caching_ctl->wait); | ||
505 | if (ret == 1) { | 550 | if (ret == 1) { |
551 | put_caching_control(caching_ctl); | ||
506 | free_excluded_extents(fs_info->extent_root, cache); | 552 | free_excluded_extents(fs_info->extent_root, cache); |
507 | return 0; | 553 | return 0; |
508 | } | 554 | } |
555 | } else { | ||
556 | /* | ||
557 | * We are not going to do the fast caching, set cached to the | ||
558 | * appropriate value and wakeup any waiters. | ||
559 | */ | ||
560 | spin_lock(&cache->lock); | ||
561 | if (load_cache_only) { | ||
562 | cache->caching_ctl = NULL; | ||
563 | cache->cached = BTRFS_CACHE_NO; | ||
564 | } else { | ||
565 | cache->cached = BTRFS_CACHE_STARTED; | ||
566 | } | ||
567 | spin_unlock(&cache->lock); | ||
568 | wake_up(&caching_ctl->wait); | ||
509 | } | 569 | } |
510 | 570 | ||
511 | if (load_cache_only) | 571 | if (load_cache_only) { |
512 | return 0; | 572 | put_caching_control(caching_ctl); |
513 | |||
514 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); | ||
515 | BUG_ON(!caching_ctl); | ||
516 | |||
517 | INIT_LIST_HEAD(&caching_ctl->list); | ||
518 | mutex_init(&caching_ctl->mutex); | ||
519 | init_waitqueue_head(&caching_ctl->wait); | ||
520 | caching_ctl->block_group = cache; | ||
521 | caching_ctl->progress = cache->key.objectid; | ||
522 | /* one for caching kthread, one for caching block group list */ | ||
523 | atomic_set(&caching_ctl->count, 2); | ||
524 | caching_ctl->work.func = caching_thread; | ||
525 | |||
526 | spin_lock(&cache->lock); | ||
527 | if (cache->cached != BTRFS_CACHE_NO) { | ||
528 | spin_unlock(&cache->lock); | ||
529 | kfree(caching_ctl); | ||
530 | return 0; | 573 | return 0; |
531 | } | 574 | } |
532 | cache->caching_ctl = caching_ctl; | ||
533 | cache->cached = BTRFS_CACHE_STARTED; | ||
534 | spin_unlock(&cache->lock); | ||
535 | 575 | ||
536 | down_write(&fs_info->extent_commit_sem); | 576 | down_write(&fs_info->extent_commit_sem); |
577 | atomic_inc(&caching_ctl->count); | ||
537 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); | 578 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); |
538 | up_write(&fs_info->extent_commit_sem); | 579 | up_write(&fs_info->extent_commit_sem); |
539 | 580 | ||
@@ -3847,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root, | |||
3847 | return ret; | 3888 | return ret; |
3848 | } | 3889 | } |
3849 | 3890 | ||
3850 | int btrfs_block_rsv_refill(struct btrfs_root *root, | 3891 | static inline int __btrfs_block_rsv_refill(struct btrfs_root *root, |
3851 | struct btrfs_block_rsv *block_rsv, | 3892 | struct btrfs_block_rsv *block_rsv, |
3852 | u64 min_reserved) | 3893 | u64 min_reserved, int flush) |
3853 | { | 3894 | { |
3854 | u64 num_bytes = 0; | 3895 | u64 num_bytes = 0; |
3855 | int ret = -ENOSPC; | 3896 | int ret = -ENOSPC; |
@@ -3868,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, | |||
3868 | if (!ret) | 3909 | if (!ret) |
3869 | return 0; | 3910 | return 0; |
3870 | 3911 | ||
3871 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); | 3912 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); |
3872 | if (!ret) { | 3913 | if (!ret) { |
3873 | block_rsv_add_bytes(block_rsv, num_bytes, 0); | 3914 | block_rsv_add_bytes(block_rsv, num_bytes, 0); |
3874 | return 0; | 3915 | return 0; |
@@ -3877,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, | |||
3877 | return ret; | 3918 | return ret; |
3878 | } | 3919 | } |
3879 | 3920 | ||
3921 | int btrfs_block_rsv_refill(struct btrfs_root *root, | ||
3922 | struct btrfs_block_rsv *block_rsv, | ||
3923 | u64 min_reserved) | ||
3924 | { | ||
3925 | return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1); | ||
3926 | } | ||
3927 | |||
3928 | int btrfs_block_rsv_refill_noflush(struct btrfs_root *root, | ||
3929 | struct btrfs_block_rsv *block_rsv, | ||
3930 | u64 min_reserved) | ||
3931 | { | ||
3932 | return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0); | ||
3933 | } | ||
3934 | |||
3880 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | 3935 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, |
3881 | struct btrfs_block_rsv *dst_rsv, | 3936 | struct btrfs_block_rsv *dst_rsv, |
3882 | u64 num_bytes) | 3937 | u64 num_bytes) |
@@ -5178,13 +5233,15 @@ search: | |||
5178 | } | 5233 | } |
5179 | 5234 | ||
5180 | have_block_group: | 5235 | have_block_group: |
5181 | if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { | 5236 | cached = block_group_cache_done(block_group); |
5237 | if (unlikely(!cached)) { | ||
5182 | u64 free_percent; | 5238 | u64 free_percent; |
5183 | 5239 | ||
5240 | found_uncached_bg = true; | ||
5184 | ret = cache_block_group(block_group, trans, | 5241 | ret = cache_block_group(block_group, trans, |
5185 | orig_root, 1); | 5242 | orig_root, 1); |
5186 | if (block_group->cached == BTRFS_CACHE_FINISHED) | 5243 | if (block_group->cached == BTRFS_CACHE_FINISHED) |
5187 | goto have_block_group; | 5244 | goto alloc; |
5188 | 5245 | ||
5189 | free_percent = btrfs_block_group_used(&block_group->item); | 5246 | free_percent = btrfs_block_group_used(&block_group->item); |
5190 | free_percent *= 100; | 5247 | free_percent *= 100; |
@@ -5206,7 +5263,6 @@ have_block_group: | |||
5206 | orig_root, 0); | 5263 | orig_root, 0); |
5207 | BUG_ON(ret); | 5264 | BUG_ON(ret); |
5208 | } | 5265 | } |
5209 | found_uncached_bg = true; | ||
5210 | 5266 | ||
5211 | /* | 5267 | /* |
5212 | * If loop is set for cached only, try the next block | 5268 | * If loop is set for cached only, try the next block |
@@ -5216,17 +5272,14 @@ have_block_group: | |||
5216 | goto loop; | 5272 | goto loop; |
5217 | } | 5273 | } |
5218 | 5274 | ||
5219 | cached = block_group_cache_done(block_group); | 5275 | alloc: |
5220 | if (unlikely(!cached)) | ||
5221 | found_uncached_bg = true; | ||
5222 | |||
5223 | if (unlikely(block_group->ro)) | 5276 | if (unlikely(block_group->ro)) |
5224 | goto loop; | 5277 | goto loop; |
5225 | 5278 | ||
5226 | spin_lock(&block_group->free_space_ctl->tree_lock); | 5279 | spin_lock(&block_group->free_space_ctl->tree_lock); |
5227 | if (cached && | 5280 | if (cached && |
5228 | block_group->free_space_ctl->free_space < | 5281 | block_group->free_space_ctl->free_space < |
5229 | num_bytes + empty_size) { | 5282 | num_bytes + empty_cluster + empty_size) { |
5230 | spin_unlock(&block_group->free_space_ctl->tree_lock); | 5283 | spin_unlock(&block_group->free_space_ctl->tree_lock); |
5231 | goto loop; | 5284 | goto loop; |
5232 | } | 5285 | } |
@@ -5247,12 +5300,10 @@ have_block_group: | |||
5247 | * people trying to start a new cluster | 5300 | * people trying to start a new cluster |
5248 | */ | 5301 | */ |
5249 | spin_lock(&last_ptr->refill_lock); | 5302 | spin_lock(&last_ptr->refill_lock); |
5250 | if (last_ptr->block_group && | 5303 | if (!last_ptr->block_group || |
5251 | (last_ptr->block_group->ro || | 5304 | last_ptr->block_group->ro || |
5252 | !block_group_bits(last_ptr->block_group, data))) { | 5305 | !block_group_bits(last_ptr->block_group, data)) |
5253 | offset = 0; | ||
5254 | goto refill_cluster; | 5306 | goto refill_cluster; |
5255 | } | ||
5256 | 5307 | ||
5257 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, | 5308 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, |
5258 | num_bytes, search_start); | 5309 | num_bytes, search_start); |
@@ -5303,7 +5354,7 @@ refill_cluster: | |||
5303 | /* allocate a cluster in this block group */ | 5354 | /* allocate a cluster in this block group */ |
5304 | ret = btrfs_find_space_cluster(trans, root, | 5355 | ret = btrfs_find_space_cluster(trans, root, |
5305 | block_group, last_ptr, | 5356 | block_group, last_ptr, |
5306 | offset, num_bytes, | 5357 | search_start, num_bytes, |
5307 | empty_cluster + empty_size); | 5358 | empty_cluster + empty_size); |
5308 | if (ret == 0) { | 5359 | if (ret == 0) { |
5309 | /* | 5360 | /* |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1f87c4d0e7a0..be1bf627a14b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2285,16 +2285,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2285 | clean_io_failure(start, page); | 2285 | clean_io_failure(start, page); |
2286 | } | 2286 | } |
2287 | if (!uptodate) { | 2287 | if (!uptodate) { |
2288 | u64 failed_mirror; | 2288 | int failed_mirror; |
2289 | failed_mirror = (u64)bio->bi_bdev; | 2289 | failed_mirror = (int)(unsigned long)bio->bi_bdev; |
2290 | if (tree->ops && tree->ops->readpage_io_failed_hook) | 2290 | /* |
2291 | ret = tree->ops->readpage_io_failed_hook( | 2291 | * The generic bio_readpage_error handles errors the |
2292 | bio, page, start, end, | 2292 | * following way: If possible, new read requests are |
2293 | failed_mirror, state); | 2293 | * created and submitted and will end up in |
2294 | else | 2294 | * end_bio_extent_readpage as well (if we're lucky, not |
2295 | ret = bio_readpage_error(bio, page, start, end, | 2295 | * in the !uptodate case). In that case it returns 0 and |
2296 | failed_mirror, NULL); | 2296 | * we just go on with the next page in our bio. If it |
2297 | * can't handle the error it will return -EIO and we | ||
2298 | * remain responsible for that page. | ||
2299 | */ | ||
2300 | ret = bio_readpage_error(bio, page, start, end, | ||
2301 | failed_mirror, NULL); | ||
2297 | if (ret == 0) { | 2302 | if (ret == 0) { |
2303 | error_handled: | ||
2298 | uptodate = | 2304 | uptodate = |
2299 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 2305 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
2300 | if (err) | 2306 | if (err) |
@@ -2302,6 +2308,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2302 | uncache_state(&cached); | 2308 | uncache_state(&cached); |
2303 | continue; | 2309 | continue; |
2304 | } | 2310 | } |
2311 | if (tree->ops && tree->ops->readpage_io_failed_hook) { | ||
2312 | ret = tree->ops->readpage_io_failed_hook( | ||
2313 | bio, page, start, end, | ||
2314 | failed_mirror, state); | ||
2315 | if (ret == 0) | ||
2316 | goto error_handled; | ||
2317 | } | ||
2305 | } | 2318 | } |
2306 | 2319 | ||
2307 | if (uptodate) { | 2320 | if (uptodate) { |
@@ -3366,6 +3379,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3366 | return -ENOMEM; | 3379 | return -ENOMEM; |
3367 | path->leave_spinning = 1; | 3380 | path->leave_spinning = 1; |
3368 | 3381 | ||
3382 | start = ALIGN(start, BTRFS_I(inode)->root->sectorsize); | ||
3383 | len = ALIGN(len, BTRFS_I(inode)->root->sectorsize); | ||
3384 | |||
3369 | /* | 3385 | /* |
3370 | * lookup the last file extent. We're not using i_size here | 3386 | * lookup the last file extent. We're not using i_size here |
3371 | * because there might be preallocation past i_size | 3387 | * because there might be preallocation past i_size |
@@ -3413,7 +3429,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3413 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3429 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
3414 | &cached_state, GFP_NOFS); | 3430 | &cached_state, GFP_NOFS); |
3415 | 3431 | ||
3416 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | 3432 | em = get_extent_skip_holes(inode, start, last_for_get_extent, |
3417 | get_extent); | 3433 | get_extent); |
3418 | if (!em) | 3434 | if (!em) |
3419 | goto out; | 3435 | goto out; |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index feb9be0e23bc..7604c3001322 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -70,7 +70,7 @@ struct extent_io_ops { | |||
70 | unsigned long bio_flags); | 70 | unsigned long bio_flags); |
71 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); | 71 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); |
72 | int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, | 72 | int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, |
73 | u64 start, u64 end, u64 failed_mirror, | 73 | u64 start, u64 end, int failed_mirror, |
74 | struct extent_state *state); | 74 | struct extent_state *state); |
75 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, | 75 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, |
76 | u64 start, u64 end, | 76 | u64 start, u64 end, |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 181760f9d2ab..ec23d43d0c35 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, | |||
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | for (i = 0; i < io_ctl->num_pages; i++) { | ||
355 | clear_page_dirty_for_io(io_ctl->pages[i]); | ||
356 | set_page_extent_mapped(io_ctl->pages[i]); | ||
357 | } | ||
358 | |||
354 | return 0; | 359 | return 0; |
355 | } | 360 | } |
356 | 361 | ||
@@ -1465,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1465 | { | 1470 | { |
1466 | info->offset = offset_to_bitmap(ctl, offset); | 1471 | info->offset = offset_to_bitmap(ctl, offset); |
1467 | info->bytes = 0; | 1472 | info->bytes = 0; |
1473 | INIT_LIST_HEAD(&info->list); | ||
1468 | link_free_space(ctl, info); | 1474 | link_free_space(ctl, info); |
1469 | ctl->total_bitmaps++; | 1475 | ctl->total_bitmaps++; |
1470 | 1476 | ||
@@ -1844,7 +1850,13 @@ again: | |||
1844 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | 1850 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1845 | 1, 0); | 1851 | 1, 0); |
1846 | if (!info) { | 1852 | if (!info) { |
1847 | WARN_ON(1); | 1853 | /* the tree logging code might be calling us before we |
1854 | * have fully loaded the free space rbtree for this | ||
1855 | * block group. So it is possible the entry won't | ||
1856 | * be in the rbtree yet at all. The caching code | ||
1857 | * will make sure not to put it in the rbtree if | ||
1858 | * the logging code has pinned it. | ||
1859 | */ | ||
1848 | goto out_lock; | 1860 | goto out_lock; |
1849 | } | 1861 | } |
1850 | } | 1862 | } |
@@ -2308,6 +2320,7 @@ again: | |||
2308 | 2320 | ||
2309 | if (!found) { | 2321 | if (!found) { |
2310 | start = i; | 2322 | start = i; |
2323 | cluster->max_size = 0; | ||
2311 | found = true; | 2324 | found = true; |
2312 | } | 2325 | } |
2313 | 2326 | ||
@@ -2451,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2451 | { | 2464 | { |
2452 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2465 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2453 | struct btrfs_free_space *entry; | 2466 | struct btrfs_free_space *entry; |
2454 | struct rb_node *node; | ||
2455 | int ret = -ENOSPC; | 2467 | int ret = -ENOSPC; |
2468 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); | ||
2456 | 2469 | ||
2457 | if (ctl->total_bitmaps == 0) | 2470 | if (ctl->total_bitmaps == 0) |
2458 | return -ENOSPC; | 2471 | return -ENOSPC; |
2459 | 2472 | ||
2460 | /* | 2473 | /* |
2461 | * First check our cached list of bitmaps and see if there is an entry | 2474 | * The bitmap that covers offset won't be in the list unless offset |
2462 | * here that will work. | 2475 | * is just its start offset. |
2463 | */ | 2476 | */ |
2477 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | ||
2478 | if (entry->offset != bitmap_offset) { | ||
2479 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); | ||
2480 | if (entry && list_empty(&entry->list)) | ||
2481 | list_add(&entry->list, bitmaps); | ||
2482 | } | ||
2483 | |||
2464 | list_for_each_entry(entry, bitmaps, list) { | 2484 | list_for_each_entry(entry, bitmaps, list) { |
2465 | if (entry->bytes < min_bytes) | 2485 | if (entry->bytes < min_bytes) |
2466 | continue; | 2486 | continue; |
@@ -2471,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2471 | } | 2491 | } |
2472 | 2492 | ||
2473 | /* | 2493 | /* |
2474 | * If we do have entries on our list and we are here then we didn't find | 2494 | * The bitmaps list has all the bitmaps that record free space |
2475 | * anything, so go ahead and get the next entry after the last entry in | 2495 | * starting after offset, so no more search is required. |
2476 | * this list and start the search from there. | ||
2477 | */ | 2496 | */ |
2478 | if (!list_empty(bitmaps)) { | 2497 | return -ENOSPC; |
2479 | entry = list_entry(bitmaps->prev, struct btrfs_free_space, | ||
2480 | list); | ||
2481 | node = rb_next(&entry->offset_index); | ||
2482 | if (!node) | ||
2483 | return -ENOSPC; | ||
2484 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2485 | goto search; | ||
2486 | } | ||
2487 | |||
2488 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); | ||
2489 | if (!entry) | ||
2490 | return -ENOSPC; | ||
2491 | |||
2492 | search: | ||
2493 | node = &entry->offset_index; | ||
2494 | do { | ||
2495 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2496 | node = rb_next(&entry->offset_index); | ||
2497 | if (!entry->bitmap) | ||
2498 | continue; | ||
2499 | if (entry->bytes < min_bytes) | ||
2500 | continue; | ||
2501 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2502 | bytes, min_bytes); | ||
2503 | } while (ret && node); | ||
2504 | |||
2505 | return ret; | ||
2506 | } | 2498 | } |
2507 | 2499 | ||
2508 | /* | 2500 | /* |
@@ -2520,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2520 | u64 offset, u64 bytes, u64 empty_size) | 2512 | u64 offset, u64 bytes, u64 empty_size) |
2521 | { | 2513 | { |
2522 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2514 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2523 | struct list_head bitmaps; | ||
2524 | struct btrfs_free_space *entry, *tmp; | 2515 | struct btrfs_free_space *entry, *tmp; |
2516 | LIST_HEAD(bitmaps); | ||
2525 | u64 min_bytes; | 2517 | u64 min_bytes; |
2526 | int ret; | 2518 | int ret; |
2527 | 2519 | ||
@@ -2560,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2560 | goto out; | 2552 | goto out; |
2561 | } | 2553 | } |
2562 | 2554 | ||
2563 | INIT_LIST_HEAD(&bitmaps); | ||
2564 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, | 2555 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
2565 | bytes, min_bytes); | 2556 | bytes, min_bytes); |
2566 | if (ret) | 2557 | if (ret) |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 116ab67a06df..2c984f7d4c2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
3490 | * doing the truncate. | 3490 | * doing the truncate. |
3491 | */ | 3491 | */ |
3492 | while (1) { | 3492 | while (1) { |
3493 | ret = btrfs_block_rsv_refill(root, rsv, min_size); | 3493 | ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); |
3494 | 3494 | ||
3495 | /* | 3495 | /* |
3496 | * Try and steal from the global reserve since we will | 3496 | * Try and steal from the global reserve since we will |
@@ -6794,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt, | |||
6794 | struct dentry *dentry, struct kstat *stat) | 6794 | struct dentry *dentry, struct kstat *stat) |
6795 | { | 6795 | { |
6796 | struct inode *inode = dentry->d_inode; | 6796 | struct inode *inode = dentry->d_inode; |
6797 | u32 blocksize = inode->i_sb->s_blocksize; | ||
6798 | |||
6797 | generic_fillattr(inode, stat); | 6799 | generic_fillattr(inode, stat); |
6798 | stat->dev = BTRFS_I(inode)->root->anon_dev; | 6800 | stat->dev = BTRFS_I(inode)->root->anon_dev; |
6799 | stat->blksize = PAGE_CACHE_SIZE; | 6801 | stat->blksize = PAGE_CACHE_SIZE; |
6800 | stat->blocks = (inode_get_bytes(inode) + | 6802 | stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + |
6801 | BTRFS_I(inode)->delalloc_bytes) >> 9; | 6803 | ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; |
6802 | return 0; | 6804 | return 0; |
6803 | } | 6805 | } |
6804 | 6806 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4a34c472f126..72d461656f60 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1216 | *devstr = '\0'; | 1216 | *devstr = '\0'; |
1217 | devstr = vol_args->name; | 1217 | devstr = vol_args->name; |
1218 | devid = simple_strtoull(devstr, &end, 10); | 1218 | devid = simple_strtoull(devstr, &end, 10); |
1219 | printk(KERN_INFO "resizing devid %llu\n", | 1219 | printk(KERN_INFO "btrfs: resizing devid %llu\n", |
1220 | (unsigned long long)devid); | 1220 | (unsigned long long)devid); |
1221 | } | 1221 | } |
1222 | device = btrfs_find_device(root, devid, NULL, NULL); | 1222 | device = btrfs_find_device(root, devid, NULL, NULL); |
1223 | if (!device) { | 1223 | if (!device) { |
1224 | printk(KERN_INFO "resizer unable to find device %llu\n", | 1224 | printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", |
1225 | (unsigned long long)devid); | 1225 | (unsigned long long)devid); |
1226 | ret = -EINVAL; | 1226 | ret = -EINVAL; |
1227 | goto out_unlock; | 1227 | goto out_unlock; |
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1267 | do_div(new_size, root->sectorsize); | 1267 | do_div(new_size, root->sectorsize); |
1268 | new_size *= root->sectorsize; | 1268 | new_size *= root->sectorsize; |
1269 | 1269 | ||
1270 | printk(KERN_INFO "new size for %s is %llu\n", | 1270 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", |
1271 | device->name, (unsigned long long)new_size); | 1271 | device->name, (unsigned long long)new_size); |
1272 | 1272 | ||
1273 | if (new_size > old_size) { | 1273 | if (new_size > old_size) { |
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1278 | } | 1278 | } |
1279 | ret = btrfs_grow_device(trans, device, new_size); | 1279 | ret = btrfs_grow_device(trans, device, new_size); |
1280 | btrfs_commit_transaction(trans, root); | 1280 | btrfs_commit_transaction(trans, root); |
1281 | } else { | 1281 | } else if (new_size < old_size) { |
1282 | ret = btrfs_shrink_device(device, new_size); | 1282 | ret = btrfs_shrink_device(device, new_size); |
1283 | } | 1283 | } |
1284 | 1284 | ||
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) | |||
2930 | goto out; | 2930 | goto out; |
2931 | 2931 | ||
2932 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) { | 2932 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) { |
2933 | rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; | 2933 | rel_ptr = ipath->fspath->val[i] - |
2934 | (u64)(unsigned long)ipath->fspath->val; | ||
2934 | ipath->fspath->val[i] = rel_ptr; | 2935 | ipath->fspath->val[i] = rel_ptr; |
2935 | } | 2936 | } |
2936 | 2937 | ||
2937 | ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); | 2938 | ret = copy_to_user((void *)(unsigned long)ipa->fspath, |
2939 | (void *)(unsigned long)ipath->fspath, size); | ||
2938 | if (ret) { | 2940 | if (ret) { |
2939 | ret = -EFAULT; | 2941 | ret = -EFAULT; |
2940 | goto out; | 2942 | goto out; |
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
3017 | if (ret < 0) | 3019 | if (ret < 0) |
3018 | goto out; | 3020 | goto out; |
3019 | 3021 | ||
3020 | ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); | 3022 | ret = copy_to_user((void *)(unsigned long)loi->inodes, |
3023 | (void *)(unsigned long)inodes, size); | ||
3021 | if (ret) | 3024 | if (ret) |
3022 | ret = -EFAULT; | 3025 | ret = -EFAULT; |
3023 | 3026 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f4190f22edfb..c27bcb67f330 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
256 | btrfs_release_path(swarn->path); | 256 | btrfs_release_path(swarn->path); |
257 | 257 | ||
258 | ipath = init_ipath(4096, local_root, swarn->path); | 258 | ipath = init_ipath(4096, local_root, swarn->path); |
259 | if (IS_ERR(ipath)) { | ||
260 | ret = PTR_ERR(ipath); | ||
261 | ipath = NULL; | ||
262 | goto err; | ||
263 | } | ||
259 | ret = paths_from_inode(inum, ipath); | 264 | ret = paths_from_inode(inum, ipath); |
260 | 265 | ||
261 | if (ret < 0) | 266 | if (ret < 0) |
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
272 | swarn->logical, swarn->dev->name, | 277 | swarn->logical, swarn->dev->name, |
273 | (unsigned long long)swarn->sector, root, inum, offset, | 278 | (unsigned long long)swarn->sector, root, inum, offset, |
274 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 279 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
275 | (char *)ipath->fspath->val[i]); | 280 | (char *)(unsigned long)ipath->fspath->val[i]); |
276 | 281 | ||
277 | free_ipath(ipath); | 282 | free_ipath(ipath); |
278 | return 0; | 283 | return 0; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 17ee7fc5e64e..e28ad4baf483 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1057,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1057 | int i = 0, nr_devices; | 1057 | int i = 0, nr_devices; |
1058 | int ret; | 1058 | int ret; |
1059 | 1059 | ||
1060 | nr_devices = fs_info->fs_devices->rw_devices; | 1060 | nr_devices = fs_info->fs_devices->open_devices; |
1061 | BUG_ON(!nr_devices); | 1061 | BUG_ON(!nr_devices); |
1062 | 1062 | ||
1063 | devices_info = kmalloc(sizeof(*devices_info) * nr_devices, | 1063 | devices_info = kmalloc(sizeof(*devices_info) * nr_devices, |
@@ -1079,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1079 | else | 1079 | else |
1080 | min_stripe_size = BTRFS_STRIPE_LEN; | 1080 | min_stripe_size = BTRFS_STRIPE_LEN; |
1081 | 1081 | ||
1082 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { | 1082 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
1083 | if (!device->in_fs_metadata) | 1083 | if (!device->in_fs_metadata || !device->bdev) |
1084 | continue; | 1084 | continue; |
1085 | 1085 | ||
1086 | avail_space = device->total_bytes - device->bytes_used; | 1086 | avail_space = device->total_bytes - device->bytes_used; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6a0574e923bc..81376d94cd3c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
785 | 785 | ||
786 | btrfs_save_ino_cache(root, trans); | 786 | btrfs_save_ino_cache(root, trans); |
787 | 787 | ||
788 | /* see comments in should_cow_block() */ | ||
789 | root->force_cow = 0; | ||
790 | smp_wmb(); | ||
791 | |||
788 | if (root->commit_root != root->node) { | 792 | if (root->commit_root != root->node) { |
789 | mutex_lock(&root->fs_commit_mutex); | 793 | mutex_lock(&root->fs_commit_mutex); |
790 | switch_commit_root(root); | 794 | switch_commit_root(root); |
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
947 | btrfs_tree_unlock(old); | 951 | btrfs_tree_unlock(old); |
948 | free_extent_buffer(old); | 952 | free_extent_buffer(old); |
949 | 953 | ||
954 | /* see comments in should_cow_block() */ | ||
955 | root->force_cow = 1; | ||
956 | smp_wmb(); | ||
957 | |||
950 | btrfs_set_root_node(new_root_item, tmp); | 958 | btrfs_set_root_node(new_root_item, tmp); |
951 | /* record when the snapshot was created in key.offset */ | 959 | /* record when the snapshot was created in key.offset */ |
952 | key.offset = trans->transid; | 960 | key.offset = trans->transid; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ab5b1c49f352..78f2d4d4f37f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -100,6 +100,12 @@ struct btrfs_device { | |||
100 | struct reada_zone *reada_curr_zone; | 100 | struct reada_zone *reada_curr_zone; |
101 | struct radix_tree_root reada_zones; | 101 | struct radix_tree_root reada_zones; |
102 | struct radix_tree_root reada_extents; | 102 | struct radix_tree_root reada_extents; |
103 | |||
104 | /* for sending down flush barriers */ | ||
105 | struct bio *flush_bio; | ||
106 | struct completion flush_wait; | ||
107 | int nobarriers; | ||
108 | |||
103 | }; | 109 | }; |
104 | 110 | ||
105 | struct btrfs_fs_devices { | 111 | struct btrfs_fs_devices { |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 2abd0dfad7f8..bca3948e9dbf 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -1143,7 +1143,7 @@ static void ceph_d_prune(struct dentry *dentry) | |||
1143 | { | 1143 | { |
1144 | struct ceph_dentry_info *di; | 1144 | struct ceph_dentry_info *di; |
1145 | 1145 | ||
1146 | dout("d_release %p\n", dentry); | 1146 | dout("ceph_d_prune %p\n", dentry); |
1147 | 1147 | ||
1148 | /* do we have a valid parent? */ | 1148 | /* do we have a valid parent? */ |
1149 | if (!dentry->d_parent || IS_ROOT(dentry)) | 1149 | if (!dentry->d_parent || IS_ROOT(dentry)) |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e392bfce84a3..116f36502f17 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -1328,12 +1328,13 @@ int ceph_inode_set_size(struct inode *inode, loff_t size) | |||
1328 | */ | 1328 | */ |
1329 | void ceph_queue_writeback(struct inode *inode) | 1329 | void ceph_queue_writeback(struct inode *inode) |
1330 | { | 1330 | { |
1331 | ihold(inode); | ||
1331 | if (queue_work(ceph_inode_to_client(inode)->wb_wq, | 1332 | if (queue_work(ceph_inode_to_client(inode)->wb_wq, |
1332 | &ceph_inode(inode)->i_wb_work)) { | 1333 | &ceph_inode(inode)->i_wb_work)) { |
1333 | dout("ceph_queue_writeback %p\n", inode); | 1334 | dout("ceph_queue_writeback %p\n", inode); |
1334 | ihold(inode); | ||
1335 | } else { | 1335 | } else { |
1336 | dout("ceph_queue_writeback %p failed\n", inode); | 1336 | dout("ceph_queue_writeback %p failed\n", inode); |
1337 | iput(inode); | ||
1337 | } | 1338 | } |
1338 | } | 1339 | } |
1339 | 1340 | ||
@@ -1353,12 +1354,13 @@ static void ceph_writeback_work(struct work_struct *work) | |||
1353 | */ | 1354 | */ |
1354 | void ceph_queue_invalidate(struct inode *inode) | 1355 | void ceph_queue_invalidate(struct inode *inode) |
1355 | { | 1356 | { |
1357 | ihold(inode); | ||
1356 | if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, | 1358 | if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, |
1357 | &ceph_inode(inode)->i_pg_inv_work)) { | 1359 | &ceph_inode(inode)->i_pg_inv_work)) { |
1358 | dout("ceph_queue_invalidate %p\n", inode); | 1360 | dout("ceph_queue_invalidate %p\n", inode); |
1359 | ihold(inode); | ||
1360 | } else { | 1361 | } else { |
1361 | dout("ceph_queue_invalidate %p failed\n", inode); | 1362 | dout("ceph_queue_invalidate %p failed\n", inode); |
1363 | iput(inode); | ||
1362 | } | 1364 | } |
1363 | } | 1365 | } |
1364 | 1366 | ||
@@ -1434,13 +1436,14 @@ void ceph_queue_vmtruncate(struct inode *inode) | |||
1434 | { | 1436 | { |
1435 | struct ceph_inode_info *ci = ceph_inode(inode); | 1437 | struct ceph_inode_info *ci = ceph_inode(inode); |
1436 | 1438 | ||
1439 | ihold(inode); | ||
1437 | if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, | 1440 | if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, |
1438 | &ci->i_vmtruncate_work)) { | 1441 | &ci->i_vmtruncate_work)) { |
1439 | dout("ceph_queue_vmtruncate %p\n", inode); | 1442 | dout("ceph_queue_vmtruncate %p\n", inode); |
1440 | ihold(inode); | ||
1441 | } else { | 1443 | } else { |
1442 | dout("ceph_queue_vmtruncate %p failed, pending=%d\n", | 1444 | dout("ceph_queue_vmtruncate %p failed, pending=%d\n", |
1443 | inode, ci->i_truncate_pending); | 1445 | inode, ci->i_truncate_pending); |
1446 | iput(inode); | ||
1444 | } | 1447 | } |
1445 | } | 1448 | } |
1446 | 1449 | ||
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a90846fac759..8dc73a594a90 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -638,10 +638,12 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, | |||
638 | if (err == 0) { | 638 | if (err == 0) { |
639 | dout("open_root_inode success\n"); | 639 | dout("open_root_inode success\n"); |
640 | if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && | 640 | if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && |
641 | fsc->sb->s_root == NULL) | 641 | fsc->sb->s_root == NULL) { |
642 | root = d_alloc_root(req->r_target_inode); | 642 | root = d_alloc_root(req->r_target_inode); |
643 | else | 643 | ceph_init_dentry(root); |
644 | } else { | ||
644 | root = d_obtain_alias(req->r_target_inode); | 645 | root = d_obtain_alias(req->r_target_inode); |
646 | } | ||
645 | req->r_target_inode = NULL; | 647 | req->r_target_inode = NULL; |
646 | dout("open_root_inode success, root dentry is %p\n", root); | 648 | dout("open_root_inode success, root dentry is %p\n", root); |
647 | } else { | 649 | } else { |
diff --git a/fs/dcache.c b/fs/dcache.c index a901c6901bce..10ba92def3f6 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/bit_spinlock.h> | 36 | #include <linux/bit_spinlock.h> |
37 | #include <linux/rculist_bl.h> | 37 | #include <linux/rculist_bl.h> |
38 | #include <linux/prefetch.h> | 38 | #include <linux/prefetch.h> |
39 | #include <linux/ratelimit.h> | ||
39 | #include "internal.h" | 40 | #include "internal.h" |
40 | 41 | ||
41 | /* | 42 | /* |
@@ -2383,8 +2384,16 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2383 | actual = __d_unalias(inode, dentry, alias); | 2384 | actual = __d_unalias(inode, dentry, alias); |
2384 | } | 2385 | } |
2385 | write_sequnlock(&rename_lock); | 2386 | write_sequnlock(&rename_lock); |
2386 | if (IS_ERR(actual)) | 2387 | if (IS_ERR(actual)) { |
2388 | if (PTR_ERR(actual) == -ELOOP) | ||
2389 | pr_warn_ratelimited( | ||
2390 | "VFS: Lookup of '%s' in %s %s" | ||
2391 | " would have caused loop\n", | ||
2392 | dentry->d_name.name, | ||
2393 | inode->i_sb->s_type->name, | ||
2394 | inode->i_sb->s_id); | ||
2387 | dput(alias); | 2395 | dput(alias); |
2396 | } | ||
2388 | goto out_nolock; | 2397 | goto out_nolock; |
2389 | } | 2398 | } |
2390 | } | 2399 | } |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 58609bde3b9f..2a834255c75d 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals( | |||
967 | 967 | ||
968 | /** | 968 | /** |
969 | * ecryptfs_new_file_context | 969 | * ecryptfs_new_file_context |
970 | * @ecryptfs_dentry: The eCryptfs dentry | 970 | * @ecryptfs_inode: The eCryptfs inode |
971 | * | 971 | * |
972 | * If the crypto context for the file has not yet been established, | 972 | * If the crypto context for the file has not yet been established, |
973 | * this is where we do that. Establishing a new crypto context | 973 | * this is where we do that. Establishing a new crypto context |
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals( | |||
984 | * | 984 | * |
985 | * Returns zero on success; non-zero otherwise | 985 | * Returns zero on success; non-zero otherwise |
986 | */ | 986 | */ |
987 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry) | 987 | int ecryptfs_new_file_context(struct inode *ecryptfs_inode) |
988 | { | 988 | { |
989 | struct ecryptfs_crypt_stat *crypt_stat = | 989 | struct ecryptfs_crypt_stat *crypt_stat = |
990 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 990 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
991 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = | 991 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = |
992 | &ecryptfs_superblock_to_private( | 992 | &ecryptfs_superblock_to_private( |
993 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 993 | ecryptfs_inode->i_sb)->mount_crypt_stat; |
994 | int cipher_name_len; | 994 | int cipher_name_len; |
995 | int rc = 0; | 995 | int rc = 0; |
996 | 996 | ||
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, | |||
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | static int | 1301 | static int |
1302 | ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry, | 1302 | ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode, |
1303 | char *virt, size_t virt_len) | 1303 | char *virt, size_t virt_len) |
1304 | { | 1304 | { |
1305 | int rc; | 1305 | int rc; |
1306 | 1306 | ||
1307 | rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, | 1307 | rc = ecryptfs_write_lower(ecryptfs_inode, virt, |
1308 | 0, virt_len); | 1308 | 0, virt_len); |
1309 | if (rc < 0) | 1309 | if (rc < 0) |
1310 | printk(KERN_ERR "%s: Error attempting to write header " | 1310 | printk(KERN_ERR "%s: Error attempting to write header " |
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, | |||
1338 | 1338 | ||
1339 | /** | 1339 | /** |
1340 | * ecryptfs_write_metadata | 1340 | * ecryptfs_write_metadata |
1341 | * @ecryptfs_dentry: The eCryptfs dentry | 1341 | * @ecryptfs_dentry: The eCryptfs dentry, which should be negative |
1342 | * @ecryptfs_inode: The newly created eCryptfs inode | ||
1342 | * | 1343 | * |
1343 | * Write the file headers out. This will likely involve a userspace | 1344 | * Write the file headers out. This will likely involve a userspace |
1344 | * callout, in which the session key is encrypted with one or more | 1345 | * callout, in which the session key is encrypted with one or more |
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, | |||
1348 | * | 1349 | * |
1349 | * Returns zero on success; non-zero on error | 1350 | * Returns zero on success; non-zero on error |
1350 | */ | 1351 | */ |
1351 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | 1352 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry, |
1353 | struct inode *ecryptfs_inode) | ||
1352 | { | 1354 | { |
1353 | struct ecryptfs_crypt_stat *crypt_stat = | 1355 | struct ecryptfs_crypt_stat *crypt_stat = |
1354 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 1356 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
1355 | unsigned int order; | 1357 | unsigned int order; |
1356 | char *virt; | 1358 | char *virt; |
1357 | size_t virt_len; | 1359 | size_t virt_len; |
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1391 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, | 1393 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, |
1392 | size); | 1394 | size); |
1393 | else | 1395 | else |
1394 | rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt, | 1396 | rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt, |
1395 | virt_len); | 1397 | virt_len); |
1396 | if (rc) { | 1398 | if (rc) { |
1397 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " | 1399 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " |
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD" | |||
1943 | 1945 | ||
1944 | /* We could either offset on every reverse map or just pad some 0x00's | 1946 | /* We could either offset on every reverse map or just pad some 0x00's |
1945 | * at the front here */ | 1947 | * at the front here */ |
1946 | static const unsigned char filename_rev_map[] = { | 1948 | static const unsigned char filename_rev_map[256] = { |
1947 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ | 1949 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ |
1948 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ | 1950 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ |
1949 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ | 1951 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ |
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = { | |||
1959 | 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ | 1961 | 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ |
1960 | 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ | 1962 | 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ |
1961 | 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ | 1963 | 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ |
1962 | 0x3D, 0x3E, 0x3F | 1964 | 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */ |
1963 | }; | 1965 | }; |
1964 | 1966 | ||
1965 | /** | 1967 | /** |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 54481a3b2c79..a9f29b12fbf2 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat); | |||
584 | int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode); | 584 | int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode); |
585 | int ecryptfs_encrypt_page(struct page *page); | 585 | int ecryptfs_encrypt_page(struct page *page); |
586 | int ecryptfs_decrypt_page(struct page *page); | 586 | int ecryptfs_decrypt_page(struct page *page); |
587 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); | 587 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry, |
588 | struct inode *ecryptfs_inode); | ||
588 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); | 589 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); |
589 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); | 590 | int ecryptfs_new_file_context(struct inode *ecryptfs_inode); |
590 | void ecryptfs_write_crypt_stat_flags(char *page_virt, | 591 | void ecryptfs_write_crypt_stat_flags(char *page_virt, |
591 | struct ecryptfs_crypt_stat *crypt_stat, | 592 | struct ecryptfs_crypt_stat *crypt_stat, |
592 | size_t *written); | 593 | size_t *written); |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c6ac98cf9baa..d3f95f941c47 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -139,6 +139,27 @@ out: | |||
139 | return rc; | 139 | return rc; |
140 | } | 140 | } |
141 | 141 | ||
142 | static void ecryptfs_vma_close(struct vm_area_struct *vma) | ||
143 | { | ||
144 | filemap_write_and_wait(vma->vm_file->f_mapping); | ||
145 | } | ||
146 | |||
147 | static const struct vm_operations_struct ecryptfs_file_vm_ops = { | ||
148 | .close = ecryptfs_vma_close, | ||
149 | .fault = filemap_fault, | ||
150 | }; | ||
151 | |||
152 | static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma) | ||
153 | { | ||
154 | int rc; | ||
155 | |||
156 | rc = generic_file_mmap(file, vma); | ||
157 | if (!rc) | ||
158 | vma->vm_ops = &ecryptfs_file_vm_ops; | ||
159 | |||
160 | return rc; | ||
161 | } | ||
162 | |||
142 | struct kmem_cache *ecryptfs_file_info_cache; | 163 | struct kmem_cache *ecryptfs_file_info_cache; |
143 | 164 | ||
144 | /** | 165 | /** |
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = { | |||
349 | #ifdef CONFIG_COMPAT | 370 | #ifdef CONFIG_COMPAT |
350 | .compat_ioctl = ecryptfs_compat_ioctl, | 371 | .compat_ioctl = ecryptfs_compat_ioctl, |
351 | #endif | 372 | #endif |
352 | .mmap = generic_file_mmap, | 373 | .mmap = ecryptfs_file_mmap, |
353 | .open = ecryptfs_open, | 374 | .open = ecryptfs_open, |
354 | .flush = ecryptfs_flush, | 375 | .flush = ecryptfs_flush, |
355 | .release = ecryptfs_release, | 376 | .release = ecryptfs_release, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index a36d327f1521..32f90a3ae63e 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, | |||
172 | * it. It will also update the eCryptfs directory inode to mimic the | 172 | * it. It will also update the eCryptfs directory inode to mimic the |
173 | * stat of the lower directory inode. | 173 | * stat of the lower directory inode. |
174 | * | 174 | * |
175 | * Returns zero on success; non-zero on error condition | 175 | * Returns the new eCryptfs inode on success; an ERR_PTR on error condition |
176 | */ | 176 | */ |
177 | static int | 177 | static struct inode * |
178 | ecryptfs_do_create(struct inode *directory_inode, | 178 | ecryptfs_do_create(struct inode *directory_inode, |
179 | struct dentry *ecryptfs_dentry, int mode) | 179 | struct dentry *ecryptfs_dentry, int mode) |
180 | { | 180 | { |
181 | int rc; | 181 | int rc; |
182 | struct dentry *lower_dentry; | 182 | struct dentry *lower_dentry; |
183 | struct dentry *lower_dir_dentry; | 183 | struct dentry *lower_dir_dentry; |
184 | struct inode *inode; | ||
184 | 185 | ||
185 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); | 186 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); |
186 | lower_dir_dentry = lock_parent(lower_dentry); | 187 | lower_dir_dentry = lock_parent(lower_dentry); |
187 | if (IS_ERR(lower_dir_dentry)) { | 188 | if (IS_ERR(lower_dir_dentry)) { |
188 | ecryptfs_printk(KERN_ERR, "Error locking directory of " | 189 | ecryptfs_printk(KERN_ERR, "Error locking directory of " |
189 | "dentry\n"); | 190 | "dentry\n"); |
190 | rc = PTR_ERR(lower_dir_dentry); | 191 | inode = ERR_CAST(lower_dir_dentry); |
191 | goto out; | 192 | goto out; |
192 | } | 193 | } |
193 | rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, | 194 | rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, |
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode, | |||
195 | if (rc) { | 196 | if (rc) { |
196 | printk(KERN_ERR "%s: Failure to create dentry in lower fs; " | 197 | printk(KERN_ERR "%s: Failure to create dentry in lower fs; " |
197 | "rc = [%d]\n", __func__, rc); | 198 | "rc = [%d]\n", __func__, rc); |
199 | inode = ERR_PTR(rc); | ||
198 | goto out_lock; | 200 | goto out_lock; |
199 | } | 201 | } |
200 | rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, | 202 | inode = __ecryptfs_get_inode(lower_dentry->d_inode, |
201 | directory_inode->i_sb); | 203 | directory_inode->i_sb); |
202 | if (rc) { | 204 | if (IS_ERR(inode)) |
203 | ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n"); | ||
204 | goto out_lock; | 205 | goto out_lock; |
205 | } | ||
206 | fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); | 206 | fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); |
207 | fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); | 207 | fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); |
208 | out_lock: | 208 | out_lock: |
209 | unlock_dir(lower_dir_dentry); | 209 | unlock_dir(lower_dir_dentry); |
210 | out: | 210 | out: |
211 | return rc; | 211 | return inode; |
212 | } | 212 | } |
213 | 213 | ||
214 | /** | 214 | /** |
@@ -219,26 +219,26 @@ out: | |||
219 | * | 219 | * |
220 | * Returns zero on success | 220 | * Returns zero on success |
221 | */ | 221 | */ |
222 | static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) | 222 | static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, |
223 | struct inode *ecryptfs_inode) | ||
223 | { | 224 | { |
224 | struct ecryptfs_crypt_stat *crypt_stat = | 225 | struct ecryptfs_crypt_stat *crypt_stat = |
225 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 226 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
226 | int rc = 0; | 227 | int rc = 0; |
227 | 228 | ||
228 | if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { | 229 | if (S_ISDIR(ecryptfs_inode->i_mode)) { |
229 | ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); | 230 | ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); |
230 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); | 231 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); |
231 | goto out; | 232 | goto out; |
232 | } | 233 | } |
233 | ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); | 234 | ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); |
234 | rc = ecryptfs_new_file_context(ecryptfs_dentry); | 235 | rc = ecryptfs_new_file_context(ecryptfs_inode); |
235 | if (rc) { | 236 | if (rc) { |
236 | ecryptfs_printk(KERN_ERR, "Error creating new file " | 237 | ecryptfs_printk(KERN_ERR, "Error creating new file " |
237 | "context; rc = [%d]\n", rc); | 238 | "context; rc = [%d]\n", rc); |
238 | goto out; | 239 | goto out; |
239 | } | 240 | } |
240 | rc = ecryptfs_get_lower_file(ecryptfs_dentry, | 241 | rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode); |
241 | ecryptfs_dentry->d_inode); | ||
242 | if (rc) { | 242 | if (rc) { |
243 | printk(KERN_ERR "%s: Error attempting to initialize " | 243 | printk(KERN_ERR "%s: Error attempting to initialize " |
244 | "the lower file for the dentry with name " | 244 | "the lower file for the dentry with name " |
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) | |||
246 | ecryptfs_dentry->d_name.name, rc); | 246 | ecryptfs_dentry->d_name.name, rc); |
247 | goto out; | 247 | goto out; |
248 | } | 248 | } |
249 | rc = ecryptfs_write_metadata(ecryptfs_dentry); | 249 | rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode); |
250 | if (rc) | 250 | if (rc) |
251 | printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); | 251 | printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); |
252 | ecryptfs_put_lower_file(ecryptfs_dentry->d_inode); | 252 | ecryptfs_put_lower_file(ecryptfs_inode); |
253 | out: | 253 | out: |
254 | return rc; | 254 | return rc; |
255 | } | 255 | } |
@@ -269,18 +269,28 @@ static int | |||
269 | ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, | 269 | ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, |
270 | int mode, struct nameidata *nd) | 270 | int mode, struct nameidata *nd) |
271 | { | 271 | { |
272 | struct inode *ecryptfs_inode; | ||
272 | int rc; | 273 | int rc; |
273 | 274 | ||
274 | /* ecryptfs_do_create() calls ecryptfs_interpose() */ | 275 | ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry, |
275 | rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); | 276 | mode); |
276 | if (unlikely(rc)) { | 277 | if (unlikely(IS_ERR(ecryptfs_inode))) { |
277 | ecryptfs_printk(KERN_WARNING, "Failed to create file in" | 278 | ecryptfs_printk(KERN_WARNING, "Failed to create file in" |
278 | "lower filesystem\n"); | 279 | "lower filesystem\n"); |
280 | rc = PTR_ERR(ecryptfs_inode); | ||
279 | goto out; | 281 | goto out; |
280 | } | 282 | } |
281 | /* At this point, a file exists on "disk"; we need to make sure | 283 | /* At this point, a file exists on "disk"; we need to make sure |
282 | * that this on disk file is prepared to be an ecryptfs file */ | 284 | * that this on disk file is prepared to be an ecryptfs file */ |
283 | rc = ecryptfs_initialize_file(ecryptfs_dentry); | 285 | rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode); |
286 | if (rc) { | ||
287 | drop_nlink(ecryptfs_inode); | ||
288 | unlock_new_inode(ecryptfs_inode); | ||
289 | iput(ecryptfs_inode); | ||
290 | goto out; | ||
291 | } | ||
292 | d_instantiate(ecryptfs_dentry, ecryptfs_inode); | ||
293 | unlock_new_inode(ecryptfs_inode); | ||
284 | out: | 294 | out: |
285 | return rc; | 295 | return rc; |
286 | } | 296 | } |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index f6dba4505f1c..12ccacda44e0 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -565,7 +565,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) | |||
565 | brelse(bitmap_bh); | 565 | brelse(bitmap_bh); |
566 | printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" | 566 | printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" |
567 | ", computed = %llu, %llu\n", | 567 | ", computed = %llu, %llu\n", |
568 | EXT4_B2C(sbi, ext4_free_blocks_count(es)), | 568 | EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), |
569 | desc_count, bitmap_count); | 569 | desc_count, bitmap_count); |
570 | return bitmap_count; | 570 | return bitmap_count; |
571 | #else | 571 | #else |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 240f6e2dc7ee..848f436df29f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -2270,6 +2270,7 @@ retry: | |||
2270 | ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " | 2270 | ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " |
2271 | "%ld pages, ino %lu; err %d", __func__, | 2271 | "%ld pages, ino %lu; err %d", __func__, |
2272 | wbc->nr_to_write, inode->i_ino, ret); | 2272 | wbc->nr_to_write, inode->i_ino, ret); |
2273 | blk_finish_plug(&plug); | ||
2273 | goto out_writepages; | 2274 | goto out_writepages; |
2274 | } | 2275 | } |
2275 | 2276 | ||
@@ -2806,8 +2807,8 @@ out: | |||
2806 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 2807 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
2807 | 2808 | ||
2808 | /* queue the work to convert unwritten extents to written */ | 2809 | /* queue the work to convert unwritten extents to written */ |
2809 | queue_work(wq, &io_end->work); | ||
2810 | iocb->private = NULL; | 2810 | iocb->private = NULL; |
2811 | queue_work(wq, &io_end->work); | ||
2811 | 2812 | ||
2812 | /* XXX: probably should move into the real I/O completion handler */ | 2813 | /* XXX: probably should move into the real I/O completion handler */ |
2813 | inode_dio_done(inode); | 2814 | inode_dio_done(inode); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9953d80145ad..3858767ec672 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1683,7 +1683,9 @@ static int parse_options(char *options, struct super_block *sb, | |||
1683 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; | 1683 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; |
1684 | datacheck: | 1684 | datacheck: |
1685 | if (is_remount) { | 1685 | if (is_remount) { |
1686 | if (test_opt(sb, DATA_FLAGS) != data_opt) { | 1686 | if (!sbi->s_journal) |
1687 | ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option"); | ||
1688 | else if (test_opt(sb, DATA_FLAGS) != data_opt) { | ||
1687 | ext4_msg(sb, KERN_ERR, | 1689 | ext4_msg(sb, KERN_ERR, |
1688 | "Cannot change data mode on remount"); | 1690 | "Cannot change data mode on remount"); |
1689 | return 0; | 1691 | return 0; |
@@ -3099,8 +3101,6 @@ static void ext4_destroy_lazyinit_thread(void) | |||
3099 | } | 3101 | } |
3100 | 3102 | ||
3101 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) | 3103 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
3102 | __releases(kernel_lock) | ||
3103 | __acquires(kernel_lock) | ||
3104 | { | 3104 | { |
3105 | char *orig_data = kstrdup(data, GFP_KERNEL); | 3105 | char *orig_data = kstrdup(data, GFP_KERNEL); |
3106 | struct buffer_head *bh; | 3106 | struct buffer_head *bh; |
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c index 3f32bcb0d9bd..ef175cb8cfd8 100644 --- a/fs/minix/bitmap.c +++ b/fs/minix/bitmap.c | |||
@@ -16,38 +16,26 @@ | |||
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | 18 | ||
19 | static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 }; | ||
20 | |||
21 | static DEFINE_SPINLOCK(bitmap_lock); | 19 | static DEFINE_SPINLOCK(bitmap_lock); |
22 | 20 | ||
23 | static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits) | 21 | /* |
22 | * bitmap consists of blocks filled with 16bit words | ||
23 | * bit set == busy, bit clear == free | ||
24 | * endianness is a mess, but for counting zero bits it really doesn't matter... | ||
25 | */ | ||
26 | static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) | ||
24 | { | 27 | { |
25 | unsigned i, j, sum = 0; | 28 | __u32 sum = 0; |
26 | struct buffer_head *bh; | 29 | unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8); |
27 | |||
28 | for (i=0; i<numblocks-1; i++) { | ||
29 | if (!(bh=map[i])) | ||
30 | return(0); | ||
31 | for (j=0; j<bh->b_size; j++) | ||
32 | sum += nibblemap[bh->b_data[j] & 0xf] | ||
33 | + nibblemap[(bh->b_data[j]>>4) & 0xf]; | ||
34 | } | ||
35 | 30 | ||
36 | if (numblocks==0 || !(bh=map[numblocks-1])) | 31 | while (blocks--) { |
37 | return(0); | 32 | unsigned words = blocksize / 2; |
38 | i = ((numbits - (numblocks-1) * bh->b_size * 8) / 16) * 2; | 33 | __u16 *p = (__u16 *)(*map++)->b_data; |
39 | for (j=0; j<i; j++) { | 34 | while (words--) |
40 | sum += nibblemap[bh->b_data[j] & 0xf] | 35 | sum += 16 - hweight16(*p++); |
41 | + nibblemap[(bh->b_data[j]>>4) & 0xf]; | ||
42 | } | 36 | } |
43 | 37 | ||
44 | i = numbits%16; | 38 | return sum; |
45 | if (i!=0) { | ||
46 | i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1); | ||
47 | sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf]; | ||
48 | sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf]; | ||
49 | } | ||
50 | return(sum); | ||
51 | } | 39 | } |
52 | 40 | ||
53 | void minix_free_block(struct inode *inode, unsigned long block) | 41 | void minix_free_block(struct inode *inode, unsigned long block) |
@@ -105,10 +93,12 @@ int minix_new_block(struct inode * inode) | |||
105 | return 0; | 93 | return 0; |
106 | } | 94 | } |
107 | 95 | ||
108 | unsigned long minix_count_free_blocks(struct minix_sb_info *sbi) | 96 | unsigned long minix_count_free_blocks(struct super_block *sb) |
109 | { | 97 | { |
110 | return (count_free(sbi->s_zmap, sbi->s_zmap_blocks, | 98 | struct minix_sb_info *sbi = minix_sb(sb); |
111 | sbi->s_nzones - sbi->s_firstdatazone + 1) | 99 | u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1); |
100 | |||
101 | return (count_free(sbi->s_zmap, sb->s_blocksize, bits) | ||
112 | << sbi->s_log_zone_size); | 102 | << sbi->s_log_zone_size); |
113 | } | 103 | } |
114 | 104 | ||
@@ -273,7 +263,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error) | |||
273 | return inode; | 263 | return inode; |
274 | } | 264 | } |
275 | 265 | ||
276 | unsigned long minix_count_free_inodes(struct minix_sb_info *sbi) | 266 | unsigned long minix_count_free_inodes(struct super_block *sb) |
277 | { | 267 | { |
278 | return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1); | 268 | struct minix_sb_info *sbi = minix_sb(sb); |
269 | u32 bits = sbi->s_ninodes + 1; | ||
270 | |||
271 | return count_free(sbi->s_imap, sb->s_blocksize, bits); | ||
279 | } | 272 | } |
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 64cdcd662ffc..1d9e33966db0 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) | |||
279 | else if (sbi->s_mount_state & MINIX_ERROR_FS) | 279 | else if (sbi->s_mount_state & MINIX_ERROR_FS) |
280 | printk("MINIX-fs: mounting file system with errors, " | 280 | printk("MINIX-fs: mounting file system with errors, " |
281 | "running fsck is recommended\n"); | 281 | "running fsck is recommended\n"); |
282 | |||
283 | /* Apparently minix can create filesystems that allocate more blocks for | ||
284 | * the bitmaps than needed. We simply ignore that, but verify it didn't | ||
285 | * create one with not enough blocks and bail out if so. | ||
286 | */ | ||
287 | block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); | ||
288 | if (sbi->s_imap_blocks < block) { | ||
289 | printk("MINIX-fs: file system does not have enough " | ||
290 | "imap blocks allocated. Refusing to mount\n"); | ||
291 | goto out_iput; | ||
292 | } | ||
293 | |||
294 | block = minix_blocks_needed( | ||
295 | (sbi->s_nzones - (sbi->s_firstdatazone + 1)), | ||
296 | s->s_blocksize); | ||
297 | if (sbi->s_zmap_blocks < block) { | ||
298 | printk("MINIX-fs: file system does not have enough " | ||
299 | "zmap blocks allocated. Refusing to mount.\n"); | ||
300 | goto out_iput; | ||
301 | } | ||
302 | |||
282 | return 0; | 303 | return 0; |
283 | 304 | ||
284 | out_iput: | 305 | out_iput: |
@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
339 | buf->f_type = sb->s_magic; | 360 | buf->f_type = sb->s_magic; |
340 | buf->f_bsize = sb->s_blocksize; | 361 | buf->f_bsize = sb->s_blocksize; |
341 | buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; | 362 | buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; |
342 | buf->f_bfree = minix_count_free_blocks(sbi); | 363 | buf->f_bfree = minix_count_free_blocks(sb); |
343 | buf->f_bavail = buf->f_bfree; | 364 | buf->f_bavail = buf->f_bfree; |
344 | buf->f_files = sbi->s_ninodes; | 365 | buf->f_files = sbi->s_ninodes; |
345 | buf->f_ffree = minix_count_free_inodes(sbi); | 366 | buf->f_ffree = minix_count_free_inodes(sb); |
346 | buf->f_namelen = sbi->s_namelen; | 367 | buf->f_namelen = sbi->s_namelen; |
347 | buf->f_fsid.val[0] = (u32)id; | 368 | buf->f_fsid.val[0] = (u32)id; |
348 | buf->f_fsid.val[1] = (u32)(id >> 32); | 369 | buf->f_fsid.val[1] = (u32)(id >> 32); |
diff --git a/fs/minix/minix.h b/fs/minix/minix.h index 341e2122879a..26bbd55e82ea 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h | |||
@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru | |||
48 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); | 48 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); |
49 | extern struct inode * minix_new_inode(const struct inode *, int, int *); | 49 | extern struct inode * minix_new_inode(const struct inode *, int, int *); |
50 | extern void minix_free_inode(struct inode * inode); | 50 | extern void minix_free_inode(struct inode * inode); |
51 | extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); | 51 | extern unsigned long minix_count_free_inodes(struct super_block *sb); |
52 | extern int minix_new_block(struct inode * inode); | 52 | extern int minix_new_block(struct inode * inode); |
53 | extern void minix_free_block(struct inode *inode, unsigned long block); | 53 | extern void minix_free_block(struct inode *inode, unsigned long block); |
54 | extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); | 54 | extern unsigned long minix_count_free_blocks(struct super_block *sb); |
55 | extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 55 | extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
56 | extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); | 56 | extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); |
57 | 57 | ||
@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode) | |||
88 | return list_entry(inode, struct minix_inode_info, vfs_inode); | 88 | return list_entry(inode, struct minix_inode_info, vfs_inode); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize) | ||
92 | { | ||
93 | return DIV_ROUND_UP(bits, blocksize * 8); | ||
94 | } | ||
95 | |||
91 | #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ | 96 | #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ |
92 | defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) | 97 | defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) |
93 | 98 | ||
@@ -125,7 +130,7 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | |||
125 | if (!size) | 130 | if (!size) |
126 | return 0; | 131 | return 0; |
127 | 132 | ||
128 | size = (size >> 4) + ((size & 15) > 0); | 133 | size >>= 4; |
129 | while (*p++ == 0xffff) { | 134 | while (*p++ == 0xffff) { |
130 | if (--size == 0) | 135 | if (--size == 0) |
131 | return (p - addr) << 4; | 136 | return (p - addr) << 4; |
diff --git a/fs/namespace.c b/fs/namespace.c index 50ee30345b4f..6d3a1963879b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -2493,6 +2493,7 @@ EXPORT_SYMBOL(create_mnt_ns); | |||
2493 | struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) | 2493 | struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) |
2494 | { | 2494 | { |
2495 | struct mnt_namespace *ns; | 2495 | struct mnt_namespace *ns; |
2496 | struct super_block *s; | ||
2496 | struct path path; | 2497 | struct path path; |
2497 | int err; | 2498 | int err; |
2498 | 2499 | ||
@@ -2509,10 +2510,11 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) | |||
2509 | return ERR_PTR(err); | 2510 | return ERR_PTR(err); |
2510 | 2511 | ||
2511 | /* trade a vfsmount reference for active sb one */ | 2512 | /* trade a vfsmount reference for active sb one */ |
2512 | atomic_inc(&path.mnt->mnt_sb->s_active); | 2513 | s = path.mnt->mnt_sb; |
2514 | atomic_inc(&s->s_active); | ||
2513 | mntput(path.mnt); | 2515 | mntput(path.mnt); |
2514 | /* lock the sucker */ | 2516 | /* lock the sucker */ |
2515 | down_write(&path.mnt->mnt_sb->s_umount); | 2517 | down_write(&s->s_umount); |
2516 | /* ... and return the root of (sub)tree on it */ | 2518 | /* ... and return the root of (sub)tree on it */ |
2517 | return path.dentry; | 2519 | return path.dentry; |
2518 | } | 2520 | } |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index b238d95ac48c..ac2899098147 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1468,12 +1468,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
1468 | res = NULL; | 1468 | res = NULL; |
1469 | goto out; | 1469 | goto out; |
1470 | /* This turned out not to be a regular file */ | 1470 | /* This turned out not to be a regular file */ |
1471 | case -EISDIR: | ||
1471 | case -ENOTDIR: | 1472 | case -ENOTDIR: |
1472 | goto no_open; | 1473 | goto no_open; |
1473 | case -ELOOP: | 1474 | case -ELOOP: |
1474 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1475 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
1475 | goto no_open; | 1476 | goto no_open; |
1476 | /* case -EISDIR: */ | ||
1477 | /* case -EINVAL: */ | 1477 | /* case -EINVAL: */ |
1478 | default: | 1478 | default: |
1479 | res = ERR_CAST(inode); | 1479 | res = ERR_CAST(inode); |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 0a1f8312b4dc..eca56d4b39c0 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -40,48 +40,8 @@ | |||
40 | 40 | ||
41 | #define NFSDBG_FACILITY NFSDBG_FILE | 41 | #define NFSDBG_FACILITY NFSDBG_FILE |
42 | 42 | ||
43 | static int nfs_file_open(struct inode *, struct file *); | ||
44 | static int nfs_file_release(struct inode *, struct file *); | ||
45 | static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin); | ||
46 | static int nfs_file_mmap(struct file *, struct vm_area_struct *); | ||
47 | static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos, | ||
48 | struct pipe_inode_info *pipe, | ||
49 | size_t count, unsigned int flags); | ||
50 | static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov, | ||
51 | unsigned long nr_segs, loff_t pos); | ||
52 | static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, | ||
53 | struct file *filp, loff_t *ppos, | ||
54 | size_t count, unsigned int flags); | ||
55 | static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov, | ||
56 | unsigned long nr_segs, loff_t pos); | ||
57 | static int nfs_file_flush(struct file *, fl_owner_t id); | ||
58 | static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync); | ||
59 | static int nfs_check_flags(int flags); | ||
60 | static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); | ||
61 | static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); | ||
62 | static int nfs_setlease(struct file *file, long arg, struct file_lock **fl); | ||
63 | |||
64 | static const struct vm_operations_struct nfs_file_vm_ops; | 43 | static const struct vm_operations_struct nfs_file_vm_ops; |
65 | 44 | ||
66 | const struct file_operations nfs_file_operations = { | ||
67 | .llseek = nfs_file_llseek, | ||
68 | .read = do_sync_read, | ||
69 | .write = do_sync_write, | ||
70 | .aio_read = nfs_file_read, | ||
71 | .aio_write = nfs_file_write, | ||
72 | .mmap = nfs_file_mmap, | ||
73 | .open = nfs_file_open, | ||
74 | .flush = nfs_file_flush, | ||
75 | .release = nfs_file_release, | ||
76 | .fsync = nfs_file_fsync, | ||
77 | .lock = nfs_lock, | ||
78 | .flock = nfs_flock, | ||
79 | .splice_read = nfs_file_splice_read, | ||
80 | .splice_write = nfs_file_splice_write, | ||
81 | .check_flags = nfs_check_flags, | ||
82 | .setlease = nfs_setlease, | ||
83 | }; | ||
84 | |||
85 | const struct inode_operations nfs_file_inode_operations = { | 45 | const struct inode_operations nfs_file_inode_operations = { |
86 | .permission = nfs_permission, | 46 | .permission = nfs_permission, |
87 | .getattr = nfs_getattr, | 47 | .getattr = nfs_getattr, |
@@ -886,3 +846,54 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl) | |||
886 | file->f_path.dentry->d_name.name, arg); | 846 | file->f_path.dentry->d_name.name, arg); |
887 | return -EINVAL; | 847 | return -EINVAL; |
888 | } | 848 | } |
849 | |||
850 | const struct file_operations nfs_file_operations = { | ||
851 | .llseek = nfs_file_llseek, | ||
852 | .read = do_sync_read, | ||
853 | .write = do_sync_write, | ||
854 | .aio_read = nfs_file_read, | ||
855 | .aio_write = nfs_file_write, | ||
856 | .mmap = nfs_file_mmap, | ||
857 | .open = nfs_file_open, | ||
858 | .flush = nfs_file_flush, | ||
859 | .release = nfs_file_release, | ||
860 | .fsync = nfs_file_fsync, | ||
861 | .lock = nfs_lock, | ||
862 | .flock = nfs_flock, | ||
863 | .splice_read = nfs_file_splice_read, | ||
864 | .splice_write = nfs_file_splice_write, | ||
865 | .check_flags = nfs_check_flags, | ||
866 | .setlease = nfs_setlease, | ||
867 | }; | ||
868 | |||
869 | #ifdef CONFIG_NFS_V4 | ||
870 | static int | ||
871 | nfs4_file_open(struct inode *inode, struct file *filp) | ||
872 | { | ||
873 | /* | ||
874 | * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to | ||
875 | * this point, then something is very wrong | ||
876 | */ | ||
877 | dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp); | ||
878 | return -ENOTDIR; | ||
879 | } | ||
880 | |||
881 | const struct file_operations nfs4_file_operations = { | ||
882 | .llseek = nfs_file_llseek, | ||
883 | .read = do_sync_read, | ||
884 | .write = do_sync_write, | ||
885 | .aio_read = nfs_file_read, | ||
886 | .aio_write = nfs_file_write, | ||
887 | .mmap = nfs_file_mmap, | ||
888 | .open = nfs4_file_open, | ||
889 | .flush = nfs_file_flush, | ||
890 | .release = nfs_file_release, | ||
891 | .fsync = nfs_file_fsync, | ||
892 | .lock = nfs_lock, | ||
893 | .flock = nfs_flock, | ||
894 | .splice_read = nfs_file_splice_read, | ||
895 | .splice_write = nfs_file_splice_write, | ||
896 | .check_flags = nfs_check_flags, | ||
897 | .setlease = nfs_setlease, | ||
898 | }; | ||
899 | #endif /* CONFIG_NFS_V4 */ | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c07a55aec838..50a15fa8cf98 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) | |||
291 | */ | 291 | */ |
292 | inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops; | 292 | inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops; |
293 | if (S_ISREG(inode->i_mode)) { | 293 | if (S_ISREG(inode->i_mode)) { |
294 | inode->i_fop = &nfs_file_operations; | 294 | inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops; |
295 | inode->i_data.a_ops = &nfs_file_aops; | 295 | inode->i_data.a_ops = &nfs_file_aops; |
296 | inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; | 296 | inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; |
297 | } else if (S_ISDIR(inode->i_mode)) { | 297 | } else if (S_ISDIR(inode->i_mode)) { |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index c1a1bd8ddf1c..3f4d95751d52 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -299,6 +299,8 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata); | |||
299 | extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, | 299 | extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, |
300 | struct list_head *head); | 300 | struct list_head *head); |
301 | 301 | ||
302 | extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, | ||
303 | struct inode *inode); | ||
302 | extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); | 304 | extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); |
303 | extern void nfs_readdata_release(struct nfs_read_data *rdata); | 305 | extern void nfs_readdata_release(struct nfs_read_data *rdata); |
304 | 306 | ||
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 85f1690ca08c..d4bc9ed91748 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = { | |||
853 | .dentry_ops = &nfs_dentry_operations, | 853 | .dentry_ops = &nfs_dentry_operations, |
854 | .dir_inode_ops = &nfs3_dir_inode_operations, | 854 | .dir_inode_ops = &nfs3_dir_inode_operations, |
855 | .file_inode_ops = &nfs3_file_inode_operations, | 855 | .file_inode_ops = &nfs3_file_inode_operations, |
856 | .file_ops = &nfs_file_operations, | ||
856 | .getroot = nfs3_proc_get_root, | 857 | .getroot = nfs3_proc_get_root, |
857 | .getattr = nfs3_proc_getattr, | 858 | .getattr = nfs3_proc_getattr, |
858 | .setattr = nfs3_proc_setattr, | 859 | .setattr = nfs3_proc_setattr, |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b60fddf606f7..be2bbac13817 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2464,8 +2464,7 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst | |||
2464 | case -NFS4ERR_BADNAME: | 2464 | case -NFS4ERR_BADNAME: |
2465 | return -ENOENT; | 2465 | return -ENOENT; |
2466 | case -NFS4ERR_MOVED: | 2466 | case -NFS4ERR_MOVED: |
2467 | err = nfs4_get_referral(dir, name, fattr, fhandle); | 2467 | return nfs4_get_referral(dir, name, fattr, fhandle); |
2468 | break; | ||
2469 | case -NFS4ERR_WRONGSEC: | 2468 | case -NFS4ERR_WRONGSEC: |
2470 | nfs_fixup_secinfo_attributes(fattr, fhandle); | 2469 | nfs_fixup_secinfo_attributes(fattr, fhandle); |
2471 | } | 2470 | } |
@@ -6253,6 +6252,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { | |||
6253 | .dentry_ops = &nfs4_dentry_operations, | 6252 | .dentry_ops = &nfs4_dentry_operations, |
6254 | .dir_inode_ops = &nfs4_dir_inode_operations, | 6253 | .dir_inode_ops = &nfs4_dir_inode_operations, |
6255 | .file_inode_ops = &nfs4_file_inode_operations, | 6254 | .file_inode_ops = &nfs4_file_inode_operations, |
6255 | .file_ops = &nfs4_file_operations, | ||
6256 | .getroot = nfs4_proc_get_root, | 6256 | .getroot = nfs4_proc_get_root, |
6257 | .getattr = nfs4_proc_getattr, | 6257 | .getattr = nfs4_proc_getattr, |
6258 | .setattr = nfs4_proc_setattr, | 6258 | .setattr = nfs4_proc_setattr, |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index baf73536bc04..8e672a2b2d69 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1260,6 +1260,25 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) | |||
1260 | } | 1260 | } |
1261 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); | 1261 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); |
1262 | 1262 | ||
1263 | static void pnfs_ld_handle_read_error(struct nfs_read_data *data) | ||
1264 | { | ||
1265 | struct nfs_pageio_descriptor pgio; | ||
1266 | |||
1267 | put_lseg(data->lseg); | ||
1268 | data->lseg = NULL; | ||
1269 | dprintk("pnfs write error = %d\n", data->pnfs_error); | ||
1270 | |||
1271 | nfs_pageio_init_read_mds(&pgio, data->inode); | ||
1272 | |||
1273 | while (!list_empty(&data->pages)) { | ||
1274 | struct nfs_page *req = nfs_list_entry(data->pages.next); | ||
1275 | |||
1276 | nfs_list_remove_request(req); | ||
1277 | nfs_pageio_add_request(&pgio, req); | ||
1278 | } | ||
1279 | nfs_pageio_complete(&pgio); | ||
1280 | } | ||
1281 | |||
1263 | /* | 1282 | /* |
1264 | * Called by non rpc-based layout drivers | 1283 | * Called by non rpc-based layout drivers |
1265 | */ | 1284 | */ |
@@ -1268,11 +1287,8 @@ void pnfs_ld_read_done(struct nfs_read_data *data) | |||
1268 | if (likely(!data->pnfs_error)) { | 1287 | if (likely(!data->pnfs_error)) { |
1269 | __nfs4_read_done_cb(data); | 1288 | __nfs4_read_done_cb(data); |
1270 | data->mds_ops->rpc_call_done(&data->task, data); | 1289 | data->mds_ops->rpc_call_done(&data->task, data); |
1271 | } else { | 1290 | } else |
1272 | put_lseg(data->lseg); | 1291 | pnfs_ld_handle_read_error(data); |
1273 | data->lseg = NULL; | ||
1274 | dprintk("pnfs write error = %d\n", data->pnfs_error); | ||
1275 | } | ||
1276 | data->mds_ops->rpc_release(data); | 1292 | data->mds_ops->rpc_release(data); |
1277 | } | 1293 | } |
1278 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); | 1294 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index ac40b8535d7e..f48125da198a 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { | |||
710 | .dentry_ops = &nfs_dentry_operations, | 710 | .dentry_ops = &nfs_dentry_operations, |
711 | .dir_inode_ops = &nfs_dir_inode_operations, | 711 | .dir_inode_ops = &nfs_dir_inode_operations, |
712 | .file_inode_ops = &nfs_file_inode_operations, | 712 | .file_inode_ops = &nfs_file_inode_operations, |
713 | .file_ops = &nfs_file_operations, | ||
713 | .getroot = nfs_proc_get_root, | 714 | .getroot = nfs_proc_get_root, |
714 | .getattr = nfs_proc_getattr, | 715 | .getattr = nfs_proc_getattr, |
715 | .setattr = nfs_proc_setattr, | 716 | .setattr = nfs_proc_setattr, |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 8b48ec63f722..cfa175c223dc 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -109,7 +109,7 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
112 | static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, | 112 | void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, |
113 | struct inode *inode) | 113 | struct inode *inode) |
114 | { | 114 | { |
115 | nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, | 115 | nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, |
@@ -534,23 +534,13 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) | |||
534 | static void nfs_readpage_release_full(void *calldata) | 534 | static void nfs_readpage_release_full(void *calldata) |
535 | { | 535 | { |
536 | struct nfs_read_data *data = calldata; | 536 | struct nfs_read_data *data = calldata; |
537 | struct nfs_pageio_descriptor pgio; | ||
538 | 537 | ||
539 | if (data->pnfs_error) { | ||
540 | nfs_pageio_init_read_mds(&pgio, data->inode); | ||
541 | pgio.pg_recoalesce = 1; | ||
542 | } | ||
543 | while (!list_empty(&data->pages)) { | 538 | while (!list_empty(&data->pages)) { |
544 | struct nfs_page *req = nfs_list_entry(data->pages.next); | 539 | struct nfs_page *req = nfs_list_entry(data->pages.next); |
545 | 540 | ||
546 | nfs_list_remove_request(req); | 541 | nfs_list_remove_request(req); |
547 | if (!data->pnfs_error) | 542 | nfs_readpage_release(req); |
548 | nfs_readpage_release(req); | ||
549 | else | ||
550 | nfs_pageio_add_request(&pgio, req); | ||
551 | } | 543 | } |
552 | if (data->pnfs_error) | ||
553 | nfs_pageio_complete(&pgio); | ||
554 | nfs_readdata_release(calldata); | 544 | nfs_readdata_release(calldata); |
555 | } | 545 | } |
556 | 546 | ||
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index ed553c60de82..3165aebb43c8 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
5699 | OCFS2_JOURNAL_ACCESS_WRITE); | 5699 | OCFS2_JOURNAL_ACCESS_WRITE); |
5700 | if (ret) { | 5700 | if (ret) { |
5701 | mlog_errno(ret); | 5701 | mlog_errno(ret); |
5702 | goto out; | 5702 | goto out_commit; |
5703 | } | 5703 | } |
5704 | 5704 | ||
5705 | dquot_free_space_nodirty(inode, | 5705 | dquot_free_space_nodirty(inode, |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c1efe939c774..78b68af3b0e3 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | if (down_read_trylock(&oi->ip_alloc_sem) == 0) { | 292 | if (down_read_trylock(&oi->ip_alloc_sem) == 0) { |
293 | /* | ||
294 | * Unlock the page and cycle ip_alloc_sem so that we don't | ||
295 | * busyloop waiting for ip_alloc_sem to unlock | ||
296 | */ | ||
293 | ret = AOP_TRUNCATED_PAGE; | 297 | ret = AOP_TRUNCATED_PAGE; |
298 | unlock_page(page); | ||
299 | unlock = 0; | ||
300 | down_read(&oi->ip_alloc_sem); | ||
301 | up_read(&oi->ip_alloc_sem); | ||
294 | goto out_inode_unlock; | 302 | goto out_inode_unlock; |
295 | } | 303 | } |
296 | 304 | ||
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
563 | { | 571 | { |
564 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; | 572 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
565 | int level; | 573 | int level; |
574 | wait_queue_head_t *wq = ocfs2_ioend_wq(inode); | ||
566 | 575 | ||
567 | /* this io's submitter should not have unlocked this before we could */ | 576 | /* this io's submitter should not have unlocked this before we could */ |
568 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); | 577 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); |
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
570 | if (ocfs2_iocb_is_sem_locked(iocb)) | 579 | if (ocfs2_iocb_is_sem_locked(iocb)) |
571 | ocfs2_iocb_clear_sem_locked(iocb); | 580 | ocfs2_iocb_clear_sem_locked(iocb); |
572 | 581 | ||
582 | if (ocfs2_iocb_is_unaligned_aio(iocb)) { | ||
583 | ocfs2_iocb_clear_unaligned_aio(iocb); | ||
584 | |||
585 | if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) && | ||
586 | waitqueue_active(wq)) { | ||
587 | wake_up_all(wq); | ||
588 | } | ||
589 | } | ||
590 | |||
573 | ocfs2_iocb_clear_rw_locked(iocb); | 591 | ocfs2_iocb_clear_rw_locked(iocb); |
574 | 592 | ||
575 | level = ocfs2_iocb_rw_locked_level(iocb); | 593 | level = ocfs2_iocb_rw_locked_level(iocb); |
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt { | |||
863 | struct page *w_target_page; | 881 | struct page *w_target_page; |
864 | 882 | ||
865 | /* | 883 | /* |
884 | * w_target_locked is used for page_mkwrite path indicating no unlocking | ||
885 | * against w_target_page in ocfs2_write_end_nolock. | ||
886 | */ | ||
887 | unsigned int w_target_locked:1; | ||
888 | |||
889 | /* | ||
866 | * ocfs2_write_end() uses this to know what the real range to | 890 | * ocfs2_write_end() uses this to know what the real range to |
867 | * write in the target should be. | 891 | * write in the target should be. |
868 | */ | 892 | */ |
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) | |||
895 | 919 | ||
896 | static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) | 920 | static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) |
897 | { | 921 | { |
922 | int i; | ||
923 | |||
924 | /* | ||
925 | * w_target_locked is only set to true in the page_mkwrite() case. | ||
926 | * The intent is to allow us to lock the target page from write_begin() | ||
927 | * to write_end(). The caller must hold a ref on w_target_page. | ||
928 | */ | ||
929 | if (wc->w_target_locked) { | ||
930 | BUG_ON(!wc->w_target_page); | ||
931 | for (i = 0; i < wc->w_num_pages; i++) { | ||
932 | if (wc->w_target_page == wc->w_pages[i]) { | ||
933 | wc->w_pages[i] = NULL; | ||
934 | break; | ||
935 | } | ||
936 | } | ||
937 | mark_page_accessed(wc->w_target_page); | ||
938 | page_cache_release(wc->w_target_page); | ||
939 | } | ||
898 | ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); | 940 | ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); |
899 | 941 | ||
900 | brelse(wc->w_di_bh); | 942 | brelse(wc->w_di_bh); |
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, | |||
1132 | */ | 1174 | */ |
1133 | lock_page(mmap_page); | 1175 | lock_page(mmap_page); |
1134 | 1176 | ||
1177 | /* Exit and let the caller retry */ | ||
1135 | if (mmap_page->mapping != mapping) { | 1178 | if (mmap_page->mapping != mapping) { |
1179 | WARN_ON(mmap_page->mapping); | ||
1136 | unlock_page(mmap_page); | 1180 | unlock_page(mmap_page); |
1137 | /* | 1181 | ret = -EAGAIN; |
1138 | * Sanity check - the locking in | ||
1139 | * ocfs2_pagemkwrite() should ensure | ||
1140 | * that this code doesn't trigger. | ||
1141 | */ | ||
1142 | ret = -EINVAL; | ||
1143 | mlog_errno(ret); | ||
1144 | goto out; | 1182 | goto out; |
1145 | } | 1183 | } |
1146 | 1184 | ||
1147 | page_cache_get(mmap_page); | 1185 | page_cache_get(mmap_page); |
1148 | wc->w_pages[i] = mmap_page; | 1186 | wc->w_pages[i] = mmap_page; |
1187 | wc->w_target_locked = true; | ||
1149 | } else { | 1188 | } else { |
1150 | wc->w_pages[i] = find_or_create_page(mapping, index, | 1189 | wc->w_pages[i] = find_or_create_page(mapping, index, |
1151 | GFP_NOFS); | 1190 | GFP_NOFS); |
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, | |||
1160 | wc->w_target_page = wc->w_pages[i]; | 1199 | wc->w_target_page = wc->w_pages[i]; |
1161 | } | 1200 | } |
1162 | out: | 1201 | out: |
1202 | if (ret) | ||
1203 | wc->w_target_locked = false; | ||
1163 | return ret; | 1204 | return ret; |
1164 | } | 1205 | } |
1165 | 1206 | ||
@@ -1817,11 +1858,23 @@ try_again: | |||
1817 | */ | 1858 | */ |
1818 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, | 1859 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, |
1819 | cluster_of_pages, mmap_page); | 1860 | cluster_of_pages, mmap_page); |
1820 | if (ret) { | 1861 | if (ret && ret != -EAGAIN) { |
1821 | mlog_errno(ret); | 1862 | mlog_errno(ret); |
1822 | goto out_quota; | 1863 | goto out_quota; |
1823 | } | 1864 | } |
1824 | 1865 | ||
1866 | /* | ||
1867 | * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock | ||
1868 | * the target page. In this case, we exit with no error and no target | ||
1869 | * page. This will trigger the caller, page_mkwrite(), to re-try | ||
1870 | * the operation. | ||
1871 | */ | ||
1872 | if (ret == -EAGAIN) { | ||
1873 | BUG_ON(wc->w_target_page); | ||
1874 | ret = 0; | ||
1875 | goto out_quota; | ||
1876 | } | ||
1877 | |||
1825 | ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, | 1878 | ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, |
1826 | len); | 1879 | len); |
1827 | if (ret) { | 1880 | if (ret) { |
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 75cf3ad987a6..ffb2da370a99 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h | |||
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits { | |||
78 | OCFS2_IOCB_RW_LOCK = 0, | 78 | OCFS2_IOCB_RW_LOCK = 0, |
79 | OCFS2_IOCB_RW_LOCK_LEVEL, | 79 | OCFS2_IOCB_RW_LOCK_LEVEL, |
80 | OCFS2_IOCB_SEM, | 80 | OCFS2_IOCB_SEM, |
81 | OCFS2_IOCB_UNALIGNED_IO, | ||
81 | OCFS2_IOCB_NUM_LOCKS | 82 | OCFS2_IOCB_NUM_LOCKS |
82 | }; | 83 | }; |
83 | 84 | ||
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits { | |||
91 | clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) | 92 | clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) |
92 | #define ocfs2_iocb_is_sem_locked(iocb) \ | 93 | #define ocfs2_iocb_is_sem_locked(iocb) \ |
93 | test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) | 94 | test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) |
95 | |||
96 | #define ocfs2_iocb_set_unaligned_aio(iocb) \ | ||
97 | set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | ||
98 | #define ocfs2_iocb_clear_unaligned_aio(iocb) \ | ||
99 | clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | ||
100 | #define ocfs2_iocb_is_unaligned_aio(iocb) \ | ||
101 | test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | ||
102 | |||
103 | #define OCFS2_IOEND_WQ_HASH_SZ 37 | ||
104 | #define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\ | ||
105 | OCFS2_IOEND_WQ_HASH_SZ]) | ||
106 | extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; | ||
107 | |||
94 | #endif /* OCFS2_FILE_H */ | 108 | #endif /* OCFS2_FILE_H */ |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9a3e6bbff27b..a4e855e3690e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -216,6 +216,7 @@ struct o2hb_region { | |||
216 | 216 | ||
217 | struct list_head hr_all_item; | 217 | struct list_head hr_all_item; |
218 | unsigned hr_unclean_stop:1, | 218 | unsigned hr_unclean_stop:1, |
219 | hr_aborted_start:1, | ||
219 | hr_item_pinned:1, | 220 | hr_item_pinned:1, |
220 | hr_item_dropped:1; | 221 | hr_item_dropped:1; |
221 | 222 | ||
@@ -254,6 +255,10 @@ struct o2hb_region { | |||
254 | * a more complete api that doesn't lead to this sort of fragility. */ | 255 | * a more complete api that doesn't lead to this sort of fragility. */ |
255 | atomic_t hr_steady_iterations; | 256 | atomic_t hr_steady_iterations; |
256 | 257 | ||
258 | /* terminate o2hb thread if it does not reach steady state | ||
259 | * (hr_steady_iterations == 0) within hr_unsteady_iterations */ | ||
260 | atomic_t hr_unsteady_iterations; | ||
261 | |||
257 | char hr_dev_name[BDEVNAME_SIZE]; | 262 | char hr_dev_name[BDEVNAME_SIZE]; |
258 | 263 | ||
259 | unsigned int hr_timeout_ms; | 264 | unsigned int hr_timeout_ms; |
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work) | |||
324 | 329 | ||
325 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) | 330 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) |
326 | { | 331 | { |
332 | /* Arm writeout only after thread reaches steady state */ | ||
333 | if (atomic_read(®->hr_steady_iterations) != 0) | ||
334 | return; | ||
335 | |||
327 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", | 336 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", |
328 | O2HB_MAX_WRITE_TIMEOUT_MS); | 337 | O2HB_MAX_WRITE_TIMEOUT_MS); |
329 | 338 | ||
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg, | |||
537 | return read == computed; | 546 | return read == computed; |
538 | } | 547 | } |
539 | 548 | ||
540 | /* We want to make sure that nobody is heartbeating on top of us -- | 549 | /* |
541 | * this will help detect an invalid configuration. */ | 550 | * Compare the slot data with what we wrote in the last iteration. |
542 | static void o2hb_check_last_timestamp(struct o2hb_region *reg) | 551 | * If the match fails, print an appropriate error message. This is to |
552 | * detect errors like... another node hearting on the same slot, | ||
553 | * flaky device that is losing writes, etc. | ||
554 | * Returns 1 if check succeeds, 0 otherwise. | ||
555 | */ | ||
556 | static int o2hb_check_own_slot(struct o2hb_region *reg) | ||
543 | { | 557 | { |
544 | struct o2hb_disk_slot *slot; | 558 | struct o2hb_disk_slot *slot; |
545 | struct o2hb_disk_heartbeat_block *hb_block; | 559 | struct o2hb_disk_heartbeat_block *hb_block; |
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg) | |||
548 | slot = ®->hr_slots[o2nm_this_node()]; | 562 | slot = ®->hr_slots[o2nm_this_node()]; |
549 | /* Don't check on our 1st timestamp */ | 563 | /* Don't check on our 1st timestamp */ |
550 | if (!slot->ds_last_time) | 564 | if (!slot->ds_last_time) |
551 | return; | 565 | return 0; |
552 | 566 | ||
553 | hb_block = slot->ds_raw_block; | 567 | hb_block = slot->ds_raw_block; |
554 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && | 568 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && |
555 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && | 569 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && |
556 | hb_block->hb_node == slot->ds_node_num) | 570 | hb_block->hb_node == slot->ds_node_num) |
557 | return; | 571 | return 1; |
558 | 572 | ||
559 | #define ERRSTR1 "Another node is heartbeating on device" | 573 | #define ERRSTR1 "Another node is heartbeating on device" |
560 | #define ERRSTR2 "Heartbeat generation mismatch on device" | 574 | #define ERRSTR2 "Heartbeat generation mismatch on device" |
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg) | |||
574 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, | 588 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, |
575 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), | 589 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), |
576 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); | 590 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); |
591 | |||
592 | return 0; | ||
577 | } | 593 | } |
578 | 594 | ||
579 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | 595 | static inline void o2hb_prepare_block(struct o2hb_region *reg, |
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) | |||
719 | o2nm_node_put(node); | 735 | o2nm_node_put(node); |
720 | } | 736 | } |
721 | 737 | ||
722 | static void o2hb_set_quorum_device(struct o2hb_region *reg, | 738 | static void o2hb_set_quorum_device(struct o2hb_region *reg) |
723 | struct o2hb_disk_slot *slot) | ||
724 | { | 739 | { |
725 | assert_spin_locked(&o2hb_live_lock); | ||
726 | |||
727 | if (!o2hb_global_heartbeat_active()) | 740 | if (!o2hb_global_heartbeat_active()) |
728 | return; | 741 | return; |
729 | 742 | ||
730 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | 743 | /* Prevent race with o2hb_heartbeat_group_drop_item() */ |
744 | if (kthread_should_stop()) | ||
745 | return; | ||
746 | |||
747 | /* Tag region as quorum only after thread reaches steady state */ | ||
748 | if (atomic_read(®->hr_steady_iterations) != 0) | ||
731 | return; | 749 | return; |
732 | 750 | ||
751 | spin_lock(&o2hb_live_lock); | ||
752 | |||
753 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
754 | goto unlock; | ||
755 | |||
733 | /* | 756 | /* |
734 | * A region can be added to the quorum only when it sees all | 757 | * A region can be added to the quorum only when it sees all |
735 | * live nodes heartbeat on it. In other words, the region has been | 758 | * live nodes heartbeat on it. In other words, the region has been |
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, | |||
737 | */ | 760 | */ |
738 | if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, | 761 | if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, |
739 | sizeof(o2hb_live_node_bitmap))) | 762 | sizeof(o2hb_live_node_bitmap))) |
740 | return; | 763 | goto unlock; |
741 | |||
742 | if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD) | ||
743 | return; | ||
744 | 764 | ||
745 | printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n", | 765 | printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n", |
746 | config_item_name(®->hr_item)); | 766 | config_item_name(®->hr_item), reg->hr_dev_name); |
747 | 767 | ||
748 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | 768 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); |
749 | 769 | ||
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, | |||
754 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | 774 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, |
755 | O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) | 775 | O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) |
756 | o2hb_region_unpin(NULL); | 776 | o2hb_region_unpin(NULL); |
777 | unlock: | ||
778 | spin_unlock(&o2hb_live_lock); | ||
757 | } | 779 | } |
758 | 780 | ||
759 | static int o2hb_check_slot(struct o2hb_region *reg, | 781 | static int o2hb_check_slot(struct o2hb_region *reg, |
@@ -925,8 +947,6 @@ fire_callbacks: | |||
925 | slot->ds_equal_samples = 0; | 947 | slot->ds_equal_samples = 0; |
926 | } | 948 | } |
927 | out: | 949 | out: |
928 | o2hb_set_quorum_device(reg, slot); | ||
929 | |||
930 | spin_unlock(&o2hb_live_lock); | 950 | spin_unlock(&o2hb_live_lock); |
931 | 951 | ||
932 | o2hb_run_event_list(&event); | 952 | o2hb_run_event_list(&event); |
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes, | |||
957 | 977 | ||
958 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | 978 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) |
959 | { | 979 | { |
960 | int i, ret, highest_node, change = 0; | 980 | int i, ret, highest_node; |
981 | int membership_change = 0, own_slot_ok = 0; | ||
961 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 982 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
962 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 983 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
963 | struct o2hb_bio_wait_ctxt write_wc; | 984 | struct o2hb_bio_wait_ctxt write_wc; |
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
966 | sizeof(configured_nodes)); | 987 | sizeof(configured_nodes)); |
967 | if (ret) { | 988 | if (ret) { |
968 | mlog_errno(ret); | 989 | mlog_errno(ret); |
969 | return ret; | 990 | goto bail; |
970 | } | 991 | } |
971 | 992 | ||
972 | /* | 993 | /* |
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
982 | 1003 | ||
983 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); | 1004 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); |
984 | if (highest_node >= O2NM_MAX_NODES) { | 1005 | if (highest_node >= O2NM_MAX_NODES) { |
985 | mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); | 1006 | mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); |
986 | return -EINVAL; | 1007 | ret = -EINVAL; |
1008 | goto bail; | ||
987 | } | 1009 | } |
988 | 1010 | ||
989 | /* No sense in reading the slots of nodes that don't exist | 1011 | /* No sense in reading the slots of nodes that don't exist |
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
993 | ret = o2hb_read_slots(reg, highest_node + 1); | 1015 | ret = o2hb_read_slots(reg, highest_node + 1); |
994 | if (ret < 0) { | 1016 | if (ret < 0) { |
995 | mlog_errno(ret); | 1017 | mlog_errno(ret); |
996 | return ret; | 1018 | goto bail; |
997 | } | 1019 | } |
998 | 1020 | ||
999 | /* With an up to date view of the slots, we can check that no | 1021 | /* With an up to date view of the slots, we can check that no |
1000 | * other node has been improperly configured to heartbeat in | 1022 | * other node has been improperly configured to heartbeat in |
1001 | * our slot. */ | 1023 | * our slot. */ |
1002 | o2hb_check_last_timestamp(reg); | 1024 | own_slot_ok = o2hb_check_own_slot(reg); |
1003 | 1025 | ||
1004 | /* fill in the proper info for our next heartbeat */ | 1026 | /* fill in the proper info for our next heartbeat */ |
1005 | o2hb_prepare_block(reg, reg->hr_generation); | 1027 | o2hb_prepare_block(reg, reg->hr_generation); |
1006 | 1028 | ||
1007 | /* And fire off the write. Note that we don't wait on this I/O | ||
1008 | * until later. */ | ||
1009 | ret = o2hb_issue_node_write(reg, &write_wc); | 1029 | ret = o2hb_issue_node_write(reg, &write_wc); |
1010 | if (ret < 0) { | 1030 | if (ret < 0) { |
1011 | mlog_errno(ret); | 1031 | mlog_errno(ret); |
1012 | return ret; | 1032 | goto bail; |
1013 | } | 1033 | } |
1014 | 1034 | ||
1015 | i = -1; | 1035 | i = -1; |
1016 | while((i = find_next_bit(configured_nodes, | 1036 | while((i = find_next_bit(configured_nodes, |
1017 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | 1037 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { |
1018 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); | 1038 | membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]); |
1019 | } | 1039 | } |
1020 | 1040 | ||
1021 | /* | 1041 | /* |
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
1030 | * disk */ | 1050 | * disk */ |
1031 | mlog(ML_ERROR, "Write error %d on device \"%s\"\n", | 1051 | mlog(ML_ERROR, "Write error %d on device \"%s\"\n", |
1032 | write_wc.wc_error, reg->hr_dev_name); | 1052 | write_wc.wc_error, reg->hr_dev_name); |
1033 | return write_wc.wc_error; | 1053 | ret = write_wc.wc_error; |
1054 | goto bail; | ||
1034 | } | 1055 | } |
1035 | 1056 | ||
1036 | o2hb_arm_write_timeout(reg); | 1057 | /* Skip disarming the timeout if own slot has stale/bad data */ |
1058 | if (own_slot_ok) { | ||
1059 | o2hb_set_quorum_device(reg); | ||
1060 | o2hb_arm_write_timeout(reg); | ||
1061 | } | ||
1037 | 1062 | ||
1063 | bail: | ||
1038 | /* let the person who launched us know when things are steady */ | 1064 | /* let the person who launched us know when things are steady */ |
1039 | if (!change && (atomic_read(®->hr_steady_iterations) != 0)) { | 1065 | if (atomic_read(®->hr_steady_iterations) != 0) { |
1040 | if (atomic_dec_and_test(®->hr_steady_iterations)) | 1066 | if (!ret && own_slot_ok && !membership_change) { |
1067 | if (atomic_dec_and_test(®->hr_steady_iterations)) | ||
1068 | wake_up(&o2hb_steady_queue); | ||
1069 | } | ||
1070 | } | ||
1071 | |||
1072 | if (atomic_read(®->hr_steady_iterations) != 0) { | ||
1073 | if (atomic_dec_and_test(®->hr_unsteady_iterations)) { | ||
1074 | printk(KERN_NOTICE "o2hb: Unable to stabilize " | ||
1075 | "heartbeart on region %s (%s)\n", | ||
1076 | config_item_name(®->hr_item), | ||
1077 | reg->hr_dev_name); | ||
1078 | atomic_set(®->hr_steady_iterations, 0); | ||
1079 | reg->hr_aborted_start = 1; | ||
1041 | wake_up(&o2hb_steady_queue); | 1080 | wake_up(&o2hb_steady_queue); |
1081 | ret = -EIO; | ||
1082 | } | ||
1042 | } | 1083 | } |
1043 | 1084 | ||
1044 | return 0; | 1085 | return ret; |
1045 | } | 1086 | } |
1046 | 1087 | ||
1047 | /* Subtract b from a, storing the result in a. a *must* have a larger | 1088 | /* Subtract b from a, storing the result in a. a *must* have a larger |
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data) | |||
1095 | /* Pin node */ | 1136 | /* Pin node */ |
1096 | o2nm_depend_this_node(); | 1137 | o2nm_depend_this_node(); |
1097 | 1138 | ||
1098 | while (!kthread_should_stop() && !reg->hr_unclean_stop) { | 1139 | while (!kthread_should_stop() && |
1140 | !reg->hr_unclean_stop && !reg->hr_aborted_start) { | ||
1099 | /* We track the time spent inside | 1141 | /* We track the time spent inside |
1100 | * o2hb_do_disk_heartbeat so that we avoid more than | 1142 | * o2hb_do_disk_heartbeat so that we avoid more than |
1101 | * hr_timeout_ms between disk writes. On busy systems | 1143 | * hr_timeout_ms between disk writes. On busy systems |
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data) | |||
1103 | * likely to time itself out. */ | 1145 | * likely to time itself out. */ |
1104 | do_gettimeofday(&before_hb); | 1146 | do_gettimeofday(&before_hb); |
1105 | 1147 | ||
1106 | i = 0; | 1148 | ret = o2hb_do_disk_heartbeat(reg); |
1107 | do { | ||
1108 | ret = o2hb_do_disk_heartbeat(reg); | ||
1109 | } while (ret && ++i < 2); | ||
1110 | 1149 | ||
1111 | do_gettimeofday(&after_hb); | 1150 | do_gettimeofday(&after_hb); |
1112 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); | 1151 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); |
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data) | |||
1117 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, | 1156 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, |
1118 | elapsed_msec); | 1157 | elapsed_msec); |
1119 | 1158 | ||
1120 | if (elapsed_msec < reg->hr_timeout_ms) { | 1159 | if (!kthread_should_stop() && |
1160 | elapsed_msec < reg->hr_timeout_ms) { | ||
1121 | /* the kthread api has blocked signals for us so no | 1161 | /* the kthread api has blocked signals for us so no |
1122 | * need to record the return value. */ | 1162 | * need to record the return value. */ |
1123 | msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); | 1163 | msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); |
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data) | |||
1134 | * to timeout on this region when we could just as easily | 1174 | * to timeout on this region when we could just as easily |
1135 | * write a clear generation - thus indicating to them that | 1175 | * write a clear generation - thus indicating to them that |
1136 | * this node has left this region. | 1176 | * this node has left this region. |
1137 | * | 1177 | */ |
1138 | * XXX: Should we skip this on unclean_stop? */ | 1178 | if (!reg->hr_unclean_stop && !reg->hr_aborted_start) { |
1139 | o2hb_prepare_block(reg, 0); | 1179 | o2hb_prepare_block(reg, 0); |
1140 | ret = o2hb_issue_node_write(reg, &write_wc); | 1180 | ret = o2hb_issue_node_write(reg, &write_wc); |
1141 | if (ret == 0) { | 1181 | if (ret == 0) |
1142 | o2hb_wait_on_io(reg, &write_wc); | 1182 | o2hb_wait_on_io(reg, &write_wc); |
1143 | } else { | 1183 | else |
1144 | mlog_errno(ret); | 1184 | mlog_errno(ret); |
1145 | } | 1185 | } |
1146 | 1186 | ||
1147 | /* Unpin node */ | 1187 | /* Unpin node */ |
1148 | o2nm_undepend_this_node(); | 1188 | o2nm_undepend_this_node(); |
1149 | 1189 | ||
1150 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); | 1190 | mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n"); |
1151 | 1191 | ||
1152 | return 0; | 1192 | return 0; |
1153 | } | 1193 | } |
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) | |||
1158 | struct o2hb_debug_buf *db = inode->i_private; | 1198 | struct o2hb_debug_buf *db = inode->i_private; |
1159 | struct o2hb_region *reg; | 1199 | struct o2hb_region *reg; |
1160 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 1200 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
1201 | unsigned long lts; | ||
1161 | char *buf = NULL; | 1202 | char *buf = NULL; |
1162 | int i = -1; | 1203 | int i = -1; |
1163 | int out = 0; | 1204 | int out = 0; |
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) | |||
1194 | 1235 | ||
1195 | case O2HB_DB_TYPE_REGION_ELAPSED_TIME: | 1236 | case O2HB_DB_TYPE_REGION_ELAPSED_TIME: |
1196 | reg = (struct o2hb_region *)db->db_data; | 1237 | reg = (struct o2hb_region *)db->db_data; |
1197 | out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", | 1238 | lts = reg->hr_last_timeout_start; |
1198 | jiffies_to_msecs(jiffies - | 1239 | /* If 0, it has never been set before */ |
1199 | reg->hr_last_timeout_start)); | 1240 | if (lts) |
1241 | lts = jiffies_to_msecs(jiffies - lts); | ||
1242 | out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts); | ||
1200 | goto done; | 1243 | goto done; |
1201 | 1244 | ||
1202 | case O2HB_DB_TYPE_REGION_PINNED: | 1245 | case O2HB_DB_TYPE_REGION_PINNED: |
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item) | |||
1426 | struct page *page; | 1469 | struct page *page; |
1427 | struct o2hb_region *reg = to_o2hb_region(item); | 1470 | struct o2hb_region *reg = to_o2hb_region(item); |
1428 | 1471 | ||
1472 | mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name); | ||
1473 | |||
1429 | if (reg->hr_tmp_block) | 1474 | if (reg->hr_tmp_block) |
1430 | kfree(reg->hr_tmp_block); | 1475 | kfree(reg->hr_tmp_block); |
1431 | 1476 | ||
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1792 | live_threshold <<= 1; | 1837 | live_threshold <<= 1; |
1793 | spin_unlock(&o2hb_live_lock); | 1838 | spin_unlock(&o2hb_live_lock); |
1794 | } | 1839 | } |
1795 | atomic_set(®->hr_steady_iterations, live_threshold + 1); | 1840 | ++live_threshold; |
1841 | atomic_set(®->hr_steady_iterations, live_threshold); | ||
1842 | /* unsteady_iterations is double the steady_iterations */ | ||
1843 | atomic_set(®->hr_unsteady_iterations, (live_threshold << 1)); | ||
1796 | 1844 | ||
1797 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", | 1845 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
1798 | reg->hr_item.ci_name); | 1846 | reg->hr_item.ci_name); |
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1809 | ret = wait_event_interruptible(o2hb_steady_queue, | 1857 | ret = wait_event_interruptible(o2hb_steady_queue, |
1810 | atomic_read(®->hr_steady_iterations) == 0); | 1858 | atomic_read(®->hr_steady_iterations) == 0); |
1811 | if (ret) { | 1859 | if (ret) { |
1812 | /* We got interrupted (hello ptrace!). Clean up */ | 1860 | atomic_set(®->hr_steady_iterations, 0); |
1813 | spin_lock(&o2hb_live_lock); | 1861 | reg->hr_aborted_start = 1; |
1814 | hb_task = reg->hr_task; | 1862 | } |
1815 | reg->hr_task = NULL; | ||
1816 | spin_unlock(&o2hb_live_lock); | ||
1817 | 1863 | ||
1818 | if (hb_task) | 1864 | if (reg->hr_aborted_start) { |
1819 | kthread_stop(hb_task); | 1865 | ret = -EIO; |
1820 | goto out; | 1866 | goto out; |
1821 | } | 1867 | } |
1822 | 1868 | ||
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1833 | ret = -EIO; | 1879 | ret = -EIO; |
1834 | 1880 | ||
1835 | if (hb_task && o2hb_global_heartbeat_active()) | 1881 | if (hb_task && o2hb_global_heartbeat_active()) |
1836 | printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n", | 1882 | printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n", |
1837 | config_item_name(®->hr_item)); | 1883 | config_item_name(®->hr_item), reg->hr_dev_name); |
1838 | 1884 | ||
1839 | out: | 1885 | out: |
1840 | if (filp) | 1886 | if (filp) |
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |||
2092 | 2138 | ||
2093 | /* stop the thread when the user removes the region dir */ | 2139 | /* stop the thread when the user removes the region dir */ |
2094 | spin_lock(&o2hb_live_lock); | 2140 | spin_lock(&o2hb_live_lock); |
2095 | if (o2hb_global_heartbeat_active()) { | ||
2096 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); | ||
2097 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); | ||
2098 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
2099 | quorum_region = 1; | ||
2100 | clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | ||
2101 | } | ||
2102 | hb_task = reg->hr_task; | 2141 | hb_task = reg->hr_task; |
2103 | reg->hr_task = NULL; | 2142 | reg->hr_task = NULL; |
2104 | reg->hr_item_dropped = 1; | 2143 | reg->hr_item_dropped = 1; |
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |||
2107 | if (hb_task) | 2146 | if (hb_task) |
2108 | kthread_stop(hb_task); | 2147 | kthread_stop(hb_task); |
2109 | 2148 | ||
2149 | if (o2hb_global_heartbeat_active()) { | ||
2150 | spin_lock(&o2hb_live_lock); | ||
2151 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); | ||
2152 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); | ||
2153 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
2154 | quorum_region = 1; | ||
2155 | clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | ||
2156 | spin_unlock(&o2hb_live_lock); | ||
2157 | printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n", | ||
2158 | ((atomic_read(®->hr_steady_iterations) == 0) ? | ||
2159 | "stopped" : "start aborted"), config_item_name(item), | ||
2160 | reg->hr_dev_name); | ||
2161 | } | ||
2162 | |||
2110 | /* | 2163 | /* |
2111 | * If we're racing a dev_write(), we need to wake them. They will | 2164 | * If we're racing a dev_write(), we need to wake them. They will |
2112 | * check reg->hr_task | 2165 | * check reg->hr_task |
2113 | */ | 2166 | */ |
2114 | if (atomic_read(®->hr_steady_iterations) != 0) { | 2167 | if (atomic_read(®->hr_steady_iterations) != 0) { |
2168 | reg->hr_aborted_start = 1; | ||
2115 | atomic_set(®->hr_steady_iterations, 0); | 2169 | atomic_set(®->hr_steady_iterations, 0); |
2116 | wake_up(&o2hb_steady_queue); | 2170 | wake_up(&o2hb_steady_queue); |
2117 | } | 2171 | } |
2118 | 2172 | ||
2119 | if (o2hb_global_heartbeat_active()) | ||
2120 | printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", | ||
2121 | config_item_name(®->hr_item)); | ||
2122 | |||
2123 | config_item_put(item); | 2173 | config_item_put(item); |
2124 | 2174 | ||
2125 | if (!o2hb_global_heartbeat_active() || !quorum_region) | 2175 | if (!o2hb_global_heartbeat_active() || !quorum_region) |
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 3a5835904b3d..dc45deb19e68 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #define SC_DEBUG_NAME "sock_containers" | 47 | #define SC_DEBUG_NAME "sock_containers" |
48 | #define NST_DEBUG_NAME "send_tracking" | 48 | #define NST_DEBUG_NAME "send_tracking" |
49 | #define STATS_DEBUG_NAME "stats" | 49 | #define STATS_DEBUG_NAME "stats" |
50 | #define NODES_DEBUG_NAME "connected_nodes" | ||
50 | 51 | ||
51 | #define SHOW_SOCK_CONTAINERS 0 | 52 | #define SHOW_SOCK_CONTAINERS 0 |
52 | #define SHOW_SOCK_STATS 1 | 53 | #define SHOW_SOCK_STATS 1 |
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry; | |||
55 | static struct dentry *sc_dentry; | 56 | static struct dentry *sc_dentry; |
56 | static struct dentry *nst_dentry; | 57 | static struct dentry *nst_dentry; |
57 | static struct dentry *stats_dentry; | 58 | static struct dentry *stats_dentry; |
59 | static struct dentry *nodes_dentry; | ||
58 | 60 | ||
59 | static DEFINE_SPINLOCK(o2net_debug_lock); | 61 | static DEFINE_SPINLOCK(o2net_debug_lock); |
60 | 62 | ||
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = { | |||
491 | .release = sc_fop_release, | 493 | .release = sc_fop_release, |
492 | }; | 494 | }; |
493 | 495 | ||
494 | int o2net_debugfs_init(void) | 496 | static int o2net_fill_bitmap(char *buf, int len) |
495 | { | 497 | { |
496 | o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); | 498 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
497 | if (!o2net_dentry) { | 499 | int i = -1, out = 0; |
498 | mlog_errno(-ENOMEM); | ||
499 | goto bail; | ||
500 | } | ||
501 | 500 | ||
502 | nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR, | 501 | o2net_fill_node_map(map, sizeof(map)); |
503 | o2net_dentry, NULL, | ||
504 | &nst_seq_fops); | ||
505 | if (!nst_dentry) { | ||
506 | mlog_errno(-ENOMEM); | ||
507 | goto bail; | ||
508 | } | ||
509 | 502 | ||
510 | sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR, | 503 | while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) |
511 | o2net_dentry, NULL, | 504 | out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); |
512 | &sc_seq_fops); | 505 | out += snprintf(buf + out, PAGE_SIZE - out, "\n"); |
513 | if (!sc_dentry) { | ||
514 | mlog_errno(-ENOMEM); | ||
515 | goto bail; | ||
516 | } | ||
517 | 506 | ||
518 | stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, | 507 | return out; |
519 | o2net_dentry, NULL, | 508 | } |
520 | &stats_seq_fops); | 509 | |
521 | if (!stats_dentry) { | 510 | static int nodes_fop_open(struct inode *inode, struct file *file) |
522 | mlog_errno(-ENOMEM); | 511 | { |
523 | goto bail; | 512 | char *buf; |
524 | } | 513 | |
514 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
515 | if (!buf) | ||
516 | return -ENOMEM; | ||
517 | |||
518 | i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE)); | ||
519 | |||
520 | file->private_data = buf; | ||
525 | 521 | ||
526 | return 0; | 522 | return 0; |
527 | bail: | ||
528 | debugfs_remove(stats_dentry); | ||
529 | debugfs_remove(sc_dentry); | ||
530 | debugfs_remove(nst_dentry); | ||
531 | debugfs_remove(o2net_dentry); | ||
532 | return -ENOMEM; | ||
533 | } | 523 | } |
534 | 524 | ||
525 | static int o2net_debug_release(struct inode *inode, struct file *file) | ||
526 | { | ||
527 | kfree(file->private_data); | ||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | static ssize_t o2net_debug_read(struct file *file, char __user *buf, | ||
532 | size_t nbytes, loff_t *ppos) | ||
533 | { | ||
534 | return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, | ||
535 | i_size_read(file->f_mapping->host)); | ||
536 | } | ||
537 | |||
538 | static const struct file_operations nodes_fops = { | ||
539 | .open = nodes_fop_open, | ||
540 | .release = o2net_debug_release, | ||
541 | .read = o2net_debug_read, | ||
542 | .llseek = generic_file_llseek, | ||
543 | }; | ||
544 | |||
535 | void o2net_debugfs_exit(void) | 545 | void o2net_debugfs_exit(void) |
536 | { | 546 | { |
547 | debugfs_remove(nodes_dentry); | ||
537 | debugfs_remove(stats_dentry); | 548 | debugfs_remove(stats_dentry); |
538 | debugfs_remove(sc_dentry); | 549 | debugfs_remove(sc_dentry); |
539 | debugfs_remove(nst_dentry); | 550 | debugfs_remove(nst_dentry); |
540 | debugfs_remove(o2net_dentry); | 551 | debugfs_remove(o2net_dentry); |
541 | } | 552 | } |
542 | 553 | ||
554 | int o2net_debugfs_init(void) | ||
555 | { | ||
556 | mode_t mode = S_IFREG|S_IRUSR; | ||
557 | |||
558 | o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); | ||
559 | if (o2net_dentry) | ||
560 | nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode, | ||
561 | o2net_dentry, NULL, &nst_seq_fops); | ||
562 | if (nst_dentry) | ||
563 | sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode, | ||
564 | o2net_dentry, NULL, &sc_seq_fops); | ||
565 | if (sc_dentry) | ||
566 | stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode, | ||
567 | o2net_dentry, NULL, &stats_seq_fops); | ||
568 | if (stats_dentry) | ||
569 | nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode, | ||
570 | o2net_dentry, NULL, &nodes_fops); | ||
571 | if (nodes_dentry) | ||
572 | return 0; | ||
573 | |||
574 | o2net_debugfs_exit(); | ||
575 | mlog_errno(-ENOMEM); | ||
576 | return -ENOMEM; | ||
577 | } | ||
578 | |||
543 | #endif /* CONFIG_DEBUG_FS */ | 579 | #endif /* CONFIG_DEBUG_FS */ |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index ad7d0c155de4..044e7b58d31c 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, | |||
546 | } | 546 | } |
547 | 547 | ||
548 | if (was_valid && !valid) { | 548 | if (was_valid && !valid) { |
549 | printk(KERN_NOTICE "o2net: no longer connected to " | 549 | printk(KERN_NOTICE "o2net: No longer connected to " |
550 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); | 550 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); |
551 | o2net_complete_nodes_nsw(nn); | 551 | o2net_complete_nodes_nsw(nn); |
552 | } | 552 | } |
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, | |||
556 | cancel_delayed_work(&nn->nn_connect_expired); | 556 | cancel_delayed_work(&nn->nn_connect_expired); |
557 | printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", | 557 | printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", |
558 | o2nm_this_node() > sc->sc_node->nd_num ? | 558 | o2nm_this_node() > sc->sc_node->nd_num ? |
559 | "connected to" : "accepted connection from", | 559 | "Connected to" : "Accepted connection from", |
560 | SC_NODEF_ARGS(sc)); | 560 | SC_NODEF_ARGS(sc)); |
561 | } | 561 | } |
562 | 562 | ||
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk) | |||
644 | o2net_sc_queue_work(sc, &sc->sc_connect_work); | 644 | o2net_sc_queue_work(sc, &sc->sc_connect_work); |
645 | break; | 645 | break; |
646 | default: | 646 | default: |
647 | printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT | 647 | printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT |
648 | " shutdown, state %d\n", | 648 | " shutdown, state %d\n", |
649 | SC_NODEF_ARGS(sc), sk->sk_state); | 649 | SC_NODEF_ARGS(sc), sk->sk_state); |
650 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); | 650 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); |
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn, | |||
1035 | return ret; | 1035 | return ret; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | /* Get a map of all nodes to which this node is currently connected to */ | ||
1039 | void o2net_fill_node_map(unsigned long *map, unsigned bytes) | ||
1040 | { | ||
1041 | struct o2net_sock_container *sc; | ||
1042 | int node, ret; | ||
1043 | |||
1044 | BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); | ||
1045 | |||
1046 | memset(map, 0, bytes); | ||
1047 | for (node = 0; node < O2NM_MAX_NODES; ++node) { | ||
1048 | o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret); | ||
1049 | if (!ret) { | ||
1050 | set_bit(node, map); | ||
1051 | sc_put(sc); | ||
1052 | } | ||
1053 | } | ||
1054 | } | ||
1055 | EXPORT_SYMBOL_GPL(o2net_fill_node_map); | ||
1056 | |||
1038 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, | 1057 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
1039 | size_t caller_veclen, u8 target_node, int *status) | 1058 | size_t caller_veclen, u8 target_node, int *status) |
1040 | { | 1059 | { |
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc) | |||
1285 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 1304 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1286 | 1305 | ||
1287 | if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { | 1306 | if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { |
1288 | mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " | 1307 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net " |
1289 | "version %llu but %llu is required, disconnecting\n", | 1308 | "protocol version %llu but %llu is required. " |
1290 | SC_NODEF_ARGS(sc), | 1309 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1291 | (unsigned long long)be64_to_cpu(hand->protocol_version), | 1310 | (unsigned long long)be64_to_cpu(hand->protocol_version), |
1292 | O2NET_PROTOCOL_VERSION); | 1311 | O2NET_PROTOCOL_VERSION); |
1293 | 1312 | ||
1294 | /* don't bother reconnecting if its the wrong version. */ | 1313 | /* don't bother reconnecting if its the wrong version. */ |
1295 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1314 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc) | |||
1303 | */ | 1322 | */ |
1304 | if (be32_to_cpu(hand->o2net_idle_timeout_ms) != | 1323 | if (be32_to_cpu(hand->o2net_idle_timeout_ms) != |
1305 | o2net_idle_timeout()) { | 1324 | o2net_idle_timeout()) { |
1306 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " | 1325 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network " |
1307 | "%u ms, but we use %u ms locally. disconnecting\n", | 1326 | "idle timeout of %u ms, but we use %u ms locally. " |
1308 | SC_NODEF_ARGS(sc), | 1327 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1309 | be32_to_cpu(hand->o2net_idle_timeout_ms), | 1328 | be32_to_cpu(hand->o2net_idle_timeout_ms), |
1310 | o2net_idle_timeout()); | 1329 | o2net_idle_timeout()); |
1311 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1330 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1312 | return -1; | 1331 | return -1; |
1313 | } | 1332 | } |
1314 | 1333 | ||
1315 | if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != | 1334 | if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != |
1316 | o2net_keepalive_delay()) { | 1335 | o2net_keepalive_delay()) { |
1317 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " | 1336 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive " |
1318 | "%u ms, but we use %u ms locally. disconnecting\n", | 1337 | "delay of %u ms, but we use %u ms locally. " |
1319 | SC_NODEF_ARGS(sc), | 1338 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1320 | be32_to_cpu(hand->o2net_keepalive_delay_ms), | 1339 | be32_to_cpu(hand->o2net_keepalive_delay_ms), |
1321 | o2net_keepalive_delay()); | 1340 | o2net_keepalive_delay()); |
1322 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1341 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1323 | return -1; | 1342 | return -1; |
1324 | } | 1343 | } |
1325 | 1344 | ||
1326 | if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != | 1345 | if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != |
1327 | O2HB_MAX_WRITE_TIMEOUT_MS) { | 1346 | O2HB_MAX_WRITE_TIMEOUT_MS) { |
1328 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " | 1347 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat " |
1329 | "%u ms, but we use %u ms locally. disconnecting\n", | 1348 | "timeout of %u ms, but we use %u ms locally. " |
1330 | SC_NODEF_ARGS(sc), | 1349 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1331 | be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), | 1350 | be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), |
1332 | O2HB_MAX_WRITE_TIMEOUT_MS); | 1351 | O2HB_MAX_WRITE_TIMEOUT_MS); |
1333 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1352 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1334 | return -1; | 1353 | return -1; |
1335 | } | 1354 | } |
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data) | |||
1540 | { | 1559 | { |
1541 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; | 1560 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; |
1542 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 1561 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1543 | |||
1544 | #ifdef CONFIG_DEBUG_FS | 1562 | #ifdef CONFIG_DEBUG_FS |
1545 | ktime_t now = ktime_get(); | 1563 | unsigned long msecs = ktime_to_ms(ktime_get()) - |
1564 | ktime_to_ms(sc->sc_tv_timer); | ||
1565 | #else | ||
1566 | unsigned long msecs = o2net_idle_timeout(); | ||
1546 | #endif | 1567 | #endif |
1547 | 1568 | ||
1548 | printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " | 1569 | printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been " |
1549 | "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), | 1570 | "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc), |
1550 | o2net_idle_timeout() / 1000, | 1571 | msecs / 1000, msecs % 1000); |
1551 | o2net_idle_timeout() % 1000); | ||
1552 | |||
1553 | #ifdef CONFIG_DEBUG_FS | ||
1554 | mlog(ML_NOTICE, "Here are some times that might help debug the " | ||
1555 | "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, " | ||
1556 | "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n", | ||
1557 | (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now), | ||
1558 | (long long)ktime_to_us(sc->sc_tv_data_ready), | ||
1559 | (long long)ktime_to_us(sc->sc_tv_advance_start), | ||
1560 | (long long)ktime_to_us(sc->sc_tv_advance_stop), | ||
1561 | sc->sc_msg_key, sc->sc_msg_type, | ||
1562 | (long long)ktime_to_us(sc->sc_tv_func_start), | ||
1563 | (long long)ktime_to_us(sc->sc_tv_func_stop)); | ||
1564 | #endif | ||
1565 | 1572 | ||
1566 | /* | 1573 | /* |
1567 | * Initialize the nn_timeout so that the next connection attempt | 1574 | * Initialize the nn_timeout so that the next connection attempt |
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work) | |||
1694 | 1701 | ||
1695 | out: | 1702 | out: |
1696 | if (ret) { | 1703 | if (ret) { |
1697 | mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " | 1704 | printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT |
1698 | "with errno %d\n", SC_NODEF_ARGS(sc), ret); | 1705 | " failed with errno %d\n", SC_NODEF_ARGS(sc), ret); |
1699 | /* 0 err so that another will be queued and attempted | 1706 | /* 0 err so that another will be queued and attempted |
1700 | * from set_nn_state */ | 1707 | * from set_nn_state */ |
1701 | if (sc) | 1708 | if (sc) |
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work) | |||
1718 | 1725 | ||
1719 | spin_lock(&nn->nn_lock); | 1726 | spin_lock(&nn->nn_lock); |
1720 | if (!nn->nn_sc_valid) { | 1727 | if (!nn->nn_sc_valid) { |
1721 | mlog(ML_ERROR, "no connection established with node %u after " | 1728 | printk(KERN_NOTICE "o2net: No connection established with " |
1722 | "%u.%u seconds, giving up and returning errors.\n", | 1729 | "node %u after %u.%u seconds, giving up.\n", |
1723 | o2net_num_from_nn(nn), | 1730 | o2net_num_from_nn(nn), |
1724 | o2net_idle_timeout() / 1000, | 1731 | o2net_idle_timeout() / 1000, |
1725 | o2net_idle_timeout() % 1000); | 1732 | o2net_idle_timeout() % 1000); |
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock) | |||
1862 | 1869 | ||
1863 | node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); | 1870 | node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); |
1864 | if (node == NULL) { | 1871 | if (node == NULL) { |
1865 | mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", | 1872 | printk(KERN_NOTICE "o2net: Attempt to connect from unknown " |
1866 | &sin.sin_addr.s_addr, ntohs(sin.sin_port)); | 1873 | "node at %pI4:%d\n", &sin.sin_addr.s_addr, |
1874 | ntohs(sin.sin_port)); | ||
1867 | ret = -EINVAL; | 1875 | ret = -EINVAL; |
1868 | goto out; | 1876 | goto out; |
1869 | } | 1877 | } |
1870 | 1878 | ||
1871 | if (o2nm_this_node() >= node->nd_num) { | 1879 | if (o2nm_this_node() >= node->nd_num) { |
1872 | local_node = o2nm_get_node_by_num(o2nm_this_node()); | 1880 | local_node = o2nm_get_node_by_num(o2nm_this_node()); |
1873 | mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' (" | 1881 | printk(KERN_NOTICE "o2net: Unexpected connect attempt seen " |
1874 | "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n", | 1882 | "at node '%s' (%u, %pI4:%d) from node '%s' (%u, " |
1875 | local_node->nd_name, local_node->nd_num, | 1883 | "%pI4:%d)\n", local_node->nd_name, local_node->nd_num, |
1876 | &(local_node->nd_ipv4_address), | 1884 | &(local_node->nd_ipv4_address), |
1877 | ntohs(local_node->nd_ipv4_port), | 1885 | ntohs(local_node->nd_ipv4_port), node->nd_name, |
1878 | node->nd_name, node->nd_num, &sin.sin_addr.s_addr, | 1886 | node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port)); |
1879 | ntohs(sin.sin_port)); | ||
1880 | ret = -EINVAL; | 1887 | ret = -EINVAL; |
1881 | goto out; | 1888 | goto out; |
1882 | } | 1889 | } |
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock) | |||
1901 | ret = 0; | 1908 | ret = 0; |
1902 | spin_unlock(&nn->nn_lock); | 1909 | spin_unlock(&nn->nn_lock); |
1903 | if (ret) { | 1910 | if (ret) { |
1904 | mlog(ML_NOTICE, "attempt to connect from node '%s' at " | 1911 | printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' " |
1905 | "%pI4:%d but it already has an open connection\n", | 1912 | "at %pI4:%d but it already has an open connection\n", |
1906 | node->nd_name, &sin.sin_addr.s_addr, | 1913 | node->nd_name, &sin.sin_addr.s_addr, |
1907 | ntohs(sin.sin_port)); | 1914 | ntohs(sin.sin_port)); |
1908 | goto out; | 1915 | goto out; |
1909 | } | 1916 | } |
1910 | 1917 | ||
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) | |||
1984 | 1991 | ||
1985 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | 1992 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); |
1986 | if (ret < 0) { | 1993 | if (ret < 0) { |
1987 | mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); | 1994 | printk(KERN_ERR "o2net: Error %d while creating socket\n", ret); |
1988 | goto out; | 1995 | goto out; |
1989 | } | 1996 | } |
1990 | 1997 | ||
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) | |||
2001 | sock->sk->sk_reuse = 1; | 2008 | sock->sk->sk_reuse = 1; |
2002 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); | 2009 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
2003 | if (ret < 0) { | 2010 | if (ret < 0) { |
2004 | mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " | 2011 | printk(KERN_ERR "o2net: Error %d while binding socket at " |
2005 | "ret=%d\n", &addr, ntohs(port), ret); | 2012 | "%pI4:%u\n", ret, &addr, ntohs(port)); |
2006 | goto out; | 2013 | goto out; |
2007 | } | 2014 | } |
2008 | 2015 | ||
2009 | ret = sock->ops->listen(sock, 64); | 2016 | ret = sock->ops->listen(sock, 64); |
2010 | if (ret < 0) { | 2017 | if (ret < 0) |
2011 | mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", | 2018 | printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n", |
2012 | &addr, ntohs(port), ret); | 2019 | ret, &addr, ntohs(port)); |
2013 | } | ||
2014 | 2020 | ||
2015 | out: | 2021 | out: |
2016 | if (ret) { | 2022 | if (ret) { |
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h index fd6179eb26d4..5bada2a69b50 100644 --- a/fs/ocfs2/cluster/tcp.h +++ b/fs/ocfs2/cluster/tcp.h | |||
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, | |||
106 | struct list_head *unreg_list); | 106 | struct list_head *unreg_list); |
107 | void o2net_unregister_handler_list(struct list_head *list); | 107 | void o2net_unregister_handler_list(struct list_head *list); |
108 | 108 | ||
109 | void o2net_fill_node_map(unsigned long *map, unsigned bytes); | ||
110 | |||
109 | struct o2nm_node; | 111 | struct o2nm_node; |
110 | int o2net_register_hb_callbacks(void); | 112 | int o2net_register_hb_callbacks(void); |
111 | void o2net_unregister_hb_callbacks(void); | 113 | void o2net_unregister_hb_callbacks(void); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index e2878b5895fb..8fe4e2892ab9 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, | |||
1184 | if (pde) | 1184 | if (pde) |
1185 | le16_add_cpu(&pde->rec_len, | 1185 | le16_add_cpu(&pde->rec_len, |
1186 | le16_to_cpu(de->rec_len)); | 1186 | le16_to_cpu(de->rec_len)); |
1187 | else | 1187 | de->inode = 0; |
1188 | de->inode = 0; | ||
1189 | dir->i_version++; | 1188 | dir->i_version++; |
1190 | ocfs2_journal_dirty(handle, bh); | 1189 | ocfs2_journal_dirty(handle, bh); |
1191 | goto bail; | 1190 | goto bail; |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index d602abb51b61..a5952ceecba5 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | |||
859 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); | 859 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); |
860 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | 860 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); |
861 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); | 861 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); |
862 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); | 862 | void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); |
863 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); | 863 | void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); |
864 | 864 | ||
865 | void dlm_put(struct dlm_ctxt *dlm); | 865 | void dlm_put(struct dlm_ctxt *dlm); |
866 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); | 866 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); |
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res) | |||
877 | kref_get(&res->refs); | 877 | kref_get(&res->refs); |
878 | } | 878 | } |
879 | void dlm_lockres_put(struct dlm_lock_resource *res); | 879 | void dlm_lockres_put(struct dlm_lock_resource *res); |
880 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); | 880 | void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); |
881 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 881 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); |
882 | struct dlm_lock_resource *res); | ||
883 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | 882 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, |
884 | const char *name, | 883 | const char *name, |
885 | unsigned int len, | 884 | unsigned int len, |
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | |||
902 | const char *name, | 901 | const char *name, |
903 | unsigned int namelen); | 902 | unsigned int namelen); |
904 | 903 | ||
905 | #define dlm_lockres_set_refmap_bit(bit,res) \ | 904 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
906 | __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) | 905 | struct dlm_lock_resource *res, int bit); |
907 | #define dlm_lockres_clear_refmap_bit(bit,res) \ | 906 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, |
908 | __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) | 907 | struct dlm_lock_resource *res, int bit); |
909 | 908 | ||
910 | static inline void __dlm_lockres_set_refmap_bit(int bit, | 909 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
911 | struct dlm_lock_resource *res, | 910 | struct dlm_lock_resource *res); |
912 | const char *file, | 911 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, |
913 | int line) | 912 | struct dlm_lock_resource *res); |
914 | { | ||
915 | //printk("%s:%d:%.*s: setting bit %d\n", file, line, | ||
916 | // res->lockname.len, res->lockname.name, bit); | ||
917 | set_bit(bit, res->refmap); | ||
918 | } | ||
919 | |||
920 | static inline void __dlm_lockres_clear_refmap_bit(int bit, | ||
921 | struct dlm_lock_resource *res, | ||
922 | const char *file, | ||
923 | int line) | ||
924 | { | ||
925 | //printk("%s:%d:%.*s: clearing bit %d\n", file, line, | ||
926 | // res->lockname.len, res->lockname.name, bit); | ||
927 | clear_bit(bit, res->refmap); | ||
928 | } | ||
929 | |||
930 | void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | ||
931 | struct dlm_lock_resource *res, | ||
932 | const char *file, | ||
933 | int line); | ||
934 | void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
935 | struct dlm_lock_resource *res, | ||
936 | int new_lockres, | ||
937 | const char *file, | ||
938 | int line); | ||
939 | #define dlm_lockres_drop_inflight_ref(d,r) \ | ||
940 | __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__) | ||
941 | #define dlm_lockres_grab_inflight_ref(d,r) \ | ||
942 | __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__) | ||
943 | #define dlm_lockres_grab_inflight_ref_new(d,r) \ | ||
944 | __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__) | ||
945 | 913 | ||
946 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 914 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
947 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 915 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6ed6b95dcf93..92f2ead0fab6 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing, | |||
157 | 157 | ||
158 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | 158 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); |
159 | 159 | ||
160 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) | 160 | void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
161 | { | 161 | { |
162 | if (!hlist_unhashed(&lockres->hash_node)) { | 162 | if (hlist_unhashed(&res->hash_node)) |
163 | hlist_del_init(&lockres->hash_node); | 163 | return; |
164 | dlm_lockres_put(lockres); | 164 | |
165 | } | 165 | mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, |
166 | res->lockname.name); | ||
167 | hlist_del_init(&res->hash_node); | ||
168 | dlm_lockres_put(res); | ||
166 | } | 169 | } |
167 | 170 | ||
168 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 171 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
169 | struct dlm_lock_resource *res) | ||
170 | { | 172 | { |
171 | struct hlist_head *bucket; | 173 | struct hlist_head *bucket; |
172 | struct qstr *q; | 174 | struct qstr *q; |
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm, | |||
180 | dlm_lockres_get(res); | 182 | dlm_lockres_get(res); |
181 | 183 | ||
182 | hlist_add_head(&res->hash_node, bucket); | 184 | hlist_add_head(&res->hash_node, bucket); |
185 | |||
186 | mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, | ||
187 | res->lockname.name); | ||
183 | } | 188 | } |
184 | 189 | ||
185 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | 190 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, |
@@ -539,17 +544,17 @@ again: | |||
539 | 544 | ||
540 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) | 545 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) |
541 | { | 546 | { |
542 | int node = -1; | 547 | int node = -1, num = 0; |
543 | 548 | ||
544 | assert_spin_locked(&dlm->spinlock); | 549 | assert_spin_locked(&dlm->spinlock); |
545 | 550 | ||
546 | printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name); | 551 | printk("( "); |
547 | |||
548 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | 552 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, |
549 | node + 1)) < O2NM_MAX_NODES) { | 553 | node + 1)) < O2NM_MAX_NODES) { |
550 | printk("%d ", node); | 554 | printk("%d ", node); |
555 | ++num; | ||
551 | } | 556 | } |
552 | printk("\n"); | 557 | printk(") %u nodes\n", num); |
553 | } | 558 | } |
554 | 559 | ||
555 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | 560 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, |
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | |||
566 | 571 | ||
567 | node = exit_msg->node_idx; | 572 | node = exit_msg->node_idx; |
568 | 573 | ||
569 | printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name); | ||
570 | |||
571 | spin_lock(&dlm->spinlock); | 574 | spin_lock(&dlm->spinlock); |
572 | clear_bit(node, dlm->domain_map); | 575 | clear_bit(node, dlm->domain_map); |
573 | clear_bit(node, dlm->exit_domain_map); | 576 | clear_bit(node, dlm->exit_domain_map); |
577 | printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name); | ||
574 | __dlm_print_nodes(dlm); | 578 | __dlm_print_nodes(dlm); |
575 | 579 | ||
576 | /* notify anything attached to the heartbeat events */ | 580 | /* notify anything attached to the heartbeat events */ |
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
755 | 759 | ||
756 | dlm_mark_domain_leaving(dlm); | 760 | dlm_mark_domain_leaving(dlm); |
757 | dlm_leave_domain(dlm); | 761 | dlm_leave_domain(dlm); |
762 | printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name); | ||
758 | dlm_force_free_mles(dlm); | 763 | dlm_force_free_mles(dlm); |
759 | dlm_complete_dlm_shutdown(dlm); | 764 | dlm_complete_dlm_shutdown(dlm); |
760 | } | 765 | } |
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, | |||
970 | clear_bit(assert->node_idx, dlm->exit_domain_map); | 975 | clear_bit(assert->node_idx, dlm->exit_domain_map); |
971 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | 976 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); |
972 | 977 | ||
973 | printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", | 978 | printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ", |
974 | assert->node_idx, dlm->name); | 979 | assert->node_idx, dlm->name); |
975 | __dlm_print_nodes(dlm); | 980 | __dlm_print_nodes(dlm); |
976 | 981 | ||
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
1701 | bail: | 1706 | bail: |
1702 | spin_lock(&dlm->spinlock); | 1707 | spin_lock(&dlm->spinlock); |
1703 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | 1708 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); |
1704 | if (!status) | 1709 | if (!status) { |
1710 | printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name); | ||
1705 | __dlm_print_nodes(dlm); | 1711 | __dlm_print_nodes(dlm); |
1712 | } | ||
1706 | spin_unlock(&dlm->spinlock); | 1713 | spin_unlock(&dlm->spinlock); |
1707 | 1714 | ||
1708 | if (ctxt) { | 1715 | if (ctxt) { |
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain, | |||
2131 | goto leave; | 2138 | goto leave; |
2132 | } | 2139 | } |
2133 | 2140 | ||
2134 | if (!o2hb_check_local_node_heartbeating()) { | ||
2135 | mlog(ML_ERROR, "the local node has not been configured, or is " | ||
2136 | "not heartbeating\n"); | ||
2137 | ret = -EPROTO; | ||
2138 | goto leave; | ||
2139 | } | ||
2140 | |||
2141 | mlog(0, "register called for domain \"%s\"\n", domain); | 2141 | mlog(0, "register called for domain \"%s\"\n", domain); |
2142 | 2142 | ||
2143 | retry: | 2143 | retry: |
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 8d39e0fd66f7..975810b98492 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, | |||
183 | kick_thread = 1; | 183 | kick_thread = 1; |
184 | } | 184 | } |
185 | } | 185 | } |
186 | /* reduce the inflight count, this may result in the lockres | ||
187 | * being purged below during calc_usage */ | ||
188 | if (lock->ml.node == dlm->node_num) | ||
189 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
190 | 186 | ||
191 | spin_unlock(&res->spinlock); | 187 | spin_unlock(&res->spinlock); |
192 | wake_up(&res->wq); | 188 | wake_up(&res->wq); |
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | |||
231 | lock->ml.type, res->lockname.len, | 227 | lock->ml.type, res->lockname.len, |
232 | res->lockname.name, flags); | 228 | res->lockname.name, flags); |
233 | 229 | ||
230 | /* | ||
231 | * Wait if resource is getting recovered, remastered, etc. | ||
232 | * If the resource was remastered and new owner is self, then exit. | ||
233 | */ | ||
234 | spin_lock(&res->spinlock); | 234 | spin_lock(&res->spinlock); |
235 | |||
236 | /* will exit this call with spinlock held */ | ||
237 | __dlm_wait_on_lockres(res); | 235 | __dlm_wait_on_lockres(res); |
236 | if (res->owner == dlm->node_num) { | ||
237 | spin_unlock(&res->spinlock); | ||
238 | return DLM_RECOVERING; | ||
239 | } | ||
238 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | 240 | res->state |= DLM_LOCK_RES_IN_PROGRESS; |
239 | 241 | ||
240 | /* add lock to local (secondary) queue */ | 242 | /* add lock to local (secondary) queue */ |
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | |||
319 | tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, | 321 | tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, |
320 | sizeof(create), res->owner, &status); | 322 | sizeof(create), res->owner, &status); |
321 | if (tmpret >= 0) { | 323 | if (tmpret >= 0) { |
322 | // successfully sent and received | 324 | ret = status; |
323 | ret = status; // this is already a dlm_status | ||
324 | if (ret == DLM_REJECTED) { | 325 | if (ret == DLM_REJECTED) { |
325 | mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " | 326 | mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " |
326 | "no longer owned by %u. that node is coming back " | 327 | "owned by node %u. That node is coming back up " |
327 | "up currently.\n", dlm->name, create.namelen, | 328 | "currently.\n", dlm->name, create.namelen, |
328 | create.name, res->owner); | 329 | create.name, res->owner); |
329 | dlm_print_one_lock_resource(res); | 330 | dlm_print_one_lock_resource(res); |
330 | BUG(); | 331 | BUG(); |
331 | } | 332 | } |
332 | } else { | 333 | } else { |
333 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 334 | mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " |
334 | "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, | 335 | "node %u\n", dlm->name, create.namelen, create.name, |
335 | res->owner); | 336 | tmpret, res->owner); |
336 | if (dlm_is_host_down(tmpret)) { | 337 | if (dlm_is_host_down(tmpret)) |
337 | ret = DLM_RECOVERING; | 338 | ret = DLM_RECOVERING; |
338 | mlog(0, "node %u died so returning DLM_RECOVERING " | 339 | else |
339 | "from lock message!\n", res->owner); | ||
340 | } else { | ||
341 | ret = dlm_err_to_dlm_status(tmpret); | 340 | ret = dlm_err_to_dlm_status(tmpret); |
342 | } | ||
343 | } | 341 | } |
344 | 342 | ||
345 | return ret; | 343 | return ret; |
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | |||
440 | /* zero memory only if kernel-allocated */ | 438 | /* zero memory only if kernel-allocated */ |
441 | lksb = kzalloc(sizeof(*lksb), GFP_NOFS); | 439 | lksb = kzalloc(sizeof(*lksb), GFP_NOFS); |
442 | if (!lksb) { | 440 | if (!lksb) { |
443 | kfree(lock); | 441 | kmem_cache_free(dlm_lock_cache, lock); |
444 | return NULL; | 442 | return NULL; |
445 | } | 443 | } |
446 | kernel_allocated = 1; | 444 | kernel_allocated = 1; |
@@ -718,18 +716,10 @@ retry_lock: | |||
718 | 716 | ||
719 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || | 717 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || |
720 | status == DLM_FORWARD) { | 718 | status == DLM_FORWARD) { |
721 | mlog(0, "retrying lock with migration/" | ||
722 | "recovery/in progress\n"); | ||
723 | msleep(100); | 719 | msleep(100); |
724 | /* no waiting for dlm_reco_thread */ | ||
725 | if (recovery) { | 720 | if (recovery) { |
726 | if (status != DLM_RECOVERING) | 721 | if (status != DLM_RECOVERING) |
727 | goto retry_lock; | 722 | goto retry_lock; |
728 | |||
729 | mlog(0, "%s: got RECOVERING " | ||
730 | "for $RECOVERY lock, master " | ||
731 | "was %u\n", dlm->name, | ||
732 | res->owner); | ||
733 | /* wait to see the node go down, then | 723 | /* wait to see the node go down, then |
734 | * drop down and allow the lockres to | 724 | * drop down and allow the lockres to |
735 | * get cleaned up. need to remaster. */ | 725 | * get cleaned up. need to remaster. */ |
@@ -741,6 +731,14 @@ retry_lock: | |||
741 | } | 731 | } |
742 | } | 732 | } |
743 | 733 | ||
734 | /* Inflight taken in dlm_get_lock_resource() is dropped here */ | ||
735 | spin_lock(&res->spinlock); | ||
736 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
737 | spin_unlock(&res->spinlock); | ||
738 | |||
739 | dlm_lockres_calc_usage(dlm, res); | ||
740 | dlm_kick_thread(dlm, res); | ||
741 | |||
744 | if (status != DLM_NORMAL) { | 742 | if (status != DLM_NORMAL) { |
745 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; | 743 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; |
746 | if (status != DLM_NOTQUEUED) | 744 | if (status != DLM_NOTQUEUED) |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 11eefb8c12e9..005261c333b0 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -631,39 +631,54 @@ error: | |||
631 | return NULL; | 631 | return NULL; |
632 | } | 632 | } |
633 | 633 | ||
634 | void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | 634 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
635 | struct dlm_lock_resource *res, | 635 | struct dlm_lock_resource *res, int bit) |
636 | int new_lockres, | ||
637 | const char *file, | ||
638 | int line) | ||
639 | { | 636 | { |
640 | if (!new_lockres) | 637 | assert_spin_locked(&res->spinlock); |
641 | assert_spin_locked(&res->spinlock); | 638 | |
639 | mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, | ||
640 | res->lockname.name, bit, __builtin_return_address(0)); | ||
641 | |||
642 | set_bit(bit, res->refmap); | ||
643 | } | ||
644 | |||
645 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | ||
646 | struct dlm_lock_resource *res, int bit) | ||
647 | { | ||
648 | assert_spin_locked(&res->spinlock); | ||
649 | |||
650 | mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, | ||
651 | res->lockname.name, bit, __builtin_return_address(0)); | ||
652 | |||
653 | clear_bit(bit, res->refmap); | ||
654 | } | ||
655 | |||
656 | |||
657 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
658 | struct dlm_lock_resource *res) | ||
659 | { | ||
660 | assert_spin_locked(&res->spinlock); | ||
642 | 661 | ||
643 | if (!test_bit(dlm->node_num, res->refmap)) { | ||
644 | BUG_ON(res->inflight_locks != 0); | ||
645 | dlm_lockres_set_refmap_bit(dlm->node_num, res); | ||
646 | } | ||
647 | res->inflight_locks++; | 662 | res->inflight_locks++; |
648 | mlog(0, "%s:%.*s: inflight++: now %u\n", | 663 | |
649 | dlm->name, res->lockname.len, res->lockname.name, | 664 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
650 | res->inflight_locks); | 665 | res->lockname.len, res->lockname.name, res->inflight_locks, |
666 | __builtin_return_address(0)); | ||
651 | } | 667 | } |
652 | 668 | ||
653 | void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | 669 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
654 | struct dlm_lock_resource *res, | 670 | struct dlm_lock_resource *res) |
655 | const char *file, | ||
656 | int line) | ||
657 | { | 671 | { |
658 | assert_spin_locked(&res->spinlock); | 672 | assert_spin_locked(&res->spinlock); |
659 | 673 | ||
660 | BUG_ON(res->inflight_locks == 0); | 674 | BUG_ON(res->inflight_locks == 0); |
675 | |||
661 | res->inflight_locks--; | 676 | res->inflight_locks--; |
662 | mlog(0, "%s:%.*s: inflight--: now %u\n", | 677 | |
663 | dlm->name, res->lockname.len, res->lockname.name, | 678 | mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, |
664 | res->inflight_locks); | 679 | res->lockname.len, res->lockname.name, res->inflight_locks, |
665 | if (res->inflight_locks == 0) | 680 | __builtin_return_address(0)); |
666 | dlm_lockres_clear_refmap_bit(dlm->node_num, res); | 681 | |
667 | wake_up(&res->wq); | 682 | wake_up(&res->wq); |
668 | } | 683 | } |
669 | 684 | ||
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | |||
697 | unsigned int hash; | 712 | unsigned int hash; |
698 | int tries = 0; | 713 | int tries = 0; |
699 | int bit, wait_on_recovery = 0; | 714 | int bit, wait_on_recovery = 0; |
700 | int drop_inflight_if_nonlocal = 0; | ||
701 | 715 | ||
702 | BUG_ON(!lockid); | 716 | BUG_ON(!lockid); |
703 | 717 | ||
@@ -709,36 +723,33 @@ lookup: | |||
709 | spin_lock(&dlm->spinlock); | 723 | spin_lock(&dlm->spinlock); |
710 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); | 724 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); |
711 | if (tmpres) { | 725 | if (tmpres) { |
712 | int dropping_ref = 0; | ||
713 | |||
714 | spin_unlock(&dlm->spinlock); | 726 | spin_unlock(&dlm->spinlock); |
715 | |||
716 | spin_lock(&tmpres->spinlock); | 727 | spin_lock(&tmpres->spinlock); |
717 | /* We wait for the other thread that is mastering the resource */ | 728 | /* Wait on the thread that is mastering the resource */ |
718 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | 729 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { |
719 | __dlm_wait_on_lockres(tmpres); | 730 | __dlm_wait_on_lockres(tmpres); |
720 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); | 731 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); |
732 | spin_unlock(&tmpres->spinlock); | ||
733 | dlm_lockres_put(tmpres); | ||
734 | tmpres = NULL; | ||
735 | goto lookup; | ||
721 | } | 736 | } |
722 | 737 | ||
723 | if (tmpres->owner == dlm->node_num) { | 738 | /* Wait on the resource purge to complete before continuing */ |
724 | BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); | 739 | if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { |
725 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | 740 | BUG_ON(tmpres->owner == dlm->node_num); |
726 | } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) | 741 | __dlm_wait_on_lockres_flags(tmpres, |
727 | dropping_ref = 1; | 742 | DLM_LOCK_RES_DROPPING_REF); |
728 | spin_unlock(&tmpres->spinlock); | ||
729 | |||
730 | /* wait until done messaging the master, drop our ref to allow | ||
731 | * the lockres to be purged, start over. */ | ||
732 | if (dropping_ref) { | ||
733 | spin_lock(&tmpres->spinlock); | ||
734 | __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); | ||
735 | spin_unlock(&tmpres->spinlock); | 743 | spin_unlock(&tmpres->spinlock); |
736 | dlm_lockres_put(tmpres); | 744 | dlm_lockres_put(tmpres); |
737 | tmpres = NULL; | 745 | tmpres = NULL; |
738 | goto lookup; | 746 | goto lookup; |
739 | } | 747 | } |
740 | 748 | ||
741 | mlog(0, "found in hash!\n"); | 749 | /* Grab inflight ref to pin the resource */ |
750 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | ||
751 | |||
752 | spin_unlock(&tmpres->spinlock); | ||
742 | if (res) | 753 | if (res) |
743 | dlm_lockres_put(res); | 754 | dlm_lockres_put(res); |
744 | res = tmpres; | 755 | res = tmpres; |
@@ -829,8 +840,8 @@ lookup: | |||
829 | * but they might own this lockres. wait on them. */ | 840 | * but they might own this lockres. wait on them. */ |
830 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | 841 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); |
831 | if (bit < O2NM_MAX_NODES) { | 842 | if (bit < O2NM_MAX_NODES) { |
832 | mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " | 843 | mlog(0, "%s: res %.*s, At least one node (%d) " |
833 | "recover before lock mastery can begin\n", | 844 | "to recover before lock mastery can begin\n", |
834 | dlm->name, namelen, (char *)lockid, bit); | 845 | dlm->name, namelen, (char *)lockid, bit); |
835 | wait_on_recovery = 1; | 846 | wait_on_recovery = 1; |
836 | } | 847 | } |
@@ -843,12 +854,11 @@ lookup: | |||
843 | 854 | ||
844 | /* finally add the lockres to its hash bucket */ | 855 | /* finally add the lockres to its hash bucket */ |
845 | __dlm_insert_lockres(dlm, res); | 856 | __dlm_insert_lockres(dlm, res); |
846 | /* since this lockres is new it doesn't not require the spinlock */ | ||
847 | dlm_lockres_grab_inflight_ref_new(dlm, res); | ||
848 | 857 | ||
849 | /* if this node does not become the master make sure to drop | 858 | /* Grab inflight ref to pin the resource */ |
850 | * this inflight reference below */ | 859 | spin_lock(&res->spinlock); |
851 | drop_inflight_if_nonlocal = 1; | 860 | dlm_lockres_grab_inflight_ref(dlm, res); |
861 | spin_unlock(&res->spinlock); | ||
852 | 862 | ||
853 | /* get an extra ref on the mle in case this is a BLOCK | 863 | /* get an extra ref on the mle in case this is a BLOCK |
854 | * if so, the creator of the BLOCK may try to put the last | 864 | * if so, the creator of the BLOCK may try to put the last |
@@ -864,8 +874,8 @@ redo_request: | |||
864 | * dlm spinlock would be detectable be a change on the mle, | 874 | * dlm spinlock would be detectable be a change on the mle, |
865 | * so we only need to clear out the recovery map once. */ | 875 | * so we only need to clear out the recovery map once. */ |
866 | if (dlm_is_recovery_lock(lockid, namelen)) { | 876 | if (dlm_is_recovery_lock(lockid, namelen)) { |
867 | mlog(ML_NOTICE, "%s: recovery map is not empty, but " | 877 | mlog(0, "%s: Recovery map is not empty, but must " |
868 | "must master $RECOVERY lock now\n", dlm->name); | 878 | "master $RECOVERY lock now\n", dlm->name); |
869 | if (!dlm_pre_master_reco_lockres(dlm, res)) | 879 | if (!dlm_pre_master_reco_lockres(dlm, res)) |
870 | wait_on_recovery = 0; | 880 | wait_on_recovery = 0; |
871 | else { | 881 | else { |
@@ -883,8 +893,8 @@ redo_request: | |||
883 | spin_lock(&dlm->spinlock); | 893 | spin_lock(&dlm->spinlock); |
884 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | 894 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); |
885 | if (bit < O2NM_MAX_NODES) { | 895 | if (bit < O2NM_MAX_NODES) { |
886 | mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " | 896 | mlog(0, "%s: res %.*s, At least one node (%d) " |
887 | "recover before lock mastery can begin\n", | 897 | "to recover before lock mastery can begin\n", |
888 | dlm->name, namelen, (char *)lockid, bit); | 898 | dlm->name, namelen, (char *)lockid, bit); |
889 | wait_on_recovery = 1; | 899 | wait_on_recovery = 1; |
890 | } else | 900 | } else |
@@ -913,8 +923,8 @@ redo_request: | |||
913 | * yet, keep going until it does. this is how the | 923 | * yet, keep going until it does. this is how the |
914 | * master will know that asserts are needed back to | 924 | * master will know that asserts are needed back to |
915 | * the lower nodes. */ | 925 | * the lower nodes. */ |
916 | mlog(0, "%s:%.*s: requests only up to %u but master " | 926 | mlog(0, "%s: res %.*s, Requests only up to %u but " |
917 | "is %u, keep going\n", dlm->name, namelen, | 927 | "master is %u, keep going\n", dlm->name, namelen, |
918 | lockid, nodenum, mle->master); | 928 | lockid, nodenum, mle->master); |
919 | } | 929 | } |
920 | } | 930 | } |
@@ -924,13 +934,12 @@ wait: | |||
924 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | 934 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); |
925 | if (ret < 0) { | 935 | if (ret < 0) { |
926 | wait_on_recovery = 1; | 936 | wait_on_recovery = 1; |
927 | mlog(0, "%s:%.*s: node map changed, redo the " | 937 | mlog(0, "%s: res %.*s, Node map changed, redo the master " |
928 | "master request now, blocked=%d\n", | 938 | "request now, blocked=%d\n", dlm->name, res->lockname.len, |
929 | dlm->name, res->lockname.len, | ||
930 | res->lockname.name, blocked); | 939 | res->lockname.name, blocked); |
931 | if (++tries > 20) { | 940 | if (++tries > 20) { |
932 | mlog(ML_ERROR, "%s:%.*s: spinning on " | 941 | mlog(ML_ERROR, "%s: res %.*s, Spinning on " |
933 | "dlm_wait_for_lock_mastery, blocked=%d\n", | 942 | "dlm_wait_for_lock_mastery, blocked = %d\n", |
934 | dlm->name, res->lockname.len, | 943 | dlm->name, res->lockname.len, |
935 | res->lockname.name, blocked); | 944 | res->lockname.name, blocked); |
936 | dlm_print_one_lock_resource(res); | 945 | dlm_print_one_lock_resource(res); |
@@ -940,7 +949,8 @@ wait: | |||
940 | goto redo_request; | 949 | goto redo_request; |
941 | } | 950 | } |
942 | 951 | ||
943 | mlog(0, "lockres mastered by %u\n", res->owner); | 952 | mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, |
953 | res->lockname.name, res->owner); | ||
944 | /* make sure we never continue without this */ | 954 | /* make sure we never continue without this */ |
945 | BUG_ON(res->owner == O2NM_MAX_NODES); | 955 | BUG_ON(res->owner == O2NM_MAX_NODES); |
946 | 956 | ||
@@ -952,8 +962,6 @@ wait: | |||
952 | 962 | ||
953 | wake_waiters: | 963 | wake_waiters: |
954 | spin_lock(&res->spinlock); | 964 | spin_lock(&res->spinlock); |
955 | if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) | ||
956 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
957 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 965 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
958 | spin_unlock(&res->spinlock); | 966 | spin_unlock(&res->spinlock); |
959 | wake_up(&res->wq); | 967 | wake_up(&res->wq); |
@@ -1426,9 +1434,7 @@ way_up_top: | |||
1426 | } | 1434 | } |
1427 | 1435 | ||
1428 | if (res->owner == dlm->node_num) { | 1436 | if (res->owner == dlm->node_num) { |
1429 | mlog(0, "%s:%.*s: setting bit %u in refmap\n", | 1437 | dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); |
1430 | dlm->name, namelen, name, request->node_idx); | ||
1431 | dlm_lockres_set_refmap_bit(request->node_idx, res); | ||
1432 | spin_unlock(&res->spinlock); | 1438 | spin_unlock(&res->spinlock); |
1433 | response = DLM_MASTER_RESP_YES; | 1439 | response = DLM_MASTER_RESP_YES; |
1434 | if (mle) | 1440 | if (mle) |
@@ -1493,10 +1499,8 @@ way_up_top: | |||
1493 | * go back and clean the mles on any | 1499 | * go back and clean the mles on any |
1494 | * other nodes */ | 1500 | * other nodes */ |
1495 | dispatch_assert = 1; | 1501 | dispatch_assert = 1; |
1496 | dlm_lockres_set_refmap_bit(request->node_idx, res); | 1502 | dlm_lockres_set_refmap_bit(dlm, res, |
1497 | mlog(0, "%s:%.*s: setting bit %u in refmap\n", | 1503 | request->node_idx); |
1498 | dlm->name, namelen, name, | ||
1499 | request->node_idx); | ||
1500 | } else | 1504 | } else |
1501 | response = DLM_MASTER_RESP_NO; | 1505 | response = DLM_MASTER_RESP_NO; |
1502 | } else { | 1506 | } else { |
@@ -1702,7 +1706,7 @@ again: | |||
1702 | "lockres, set the bit in the refmap\n", | 1706 | "lockres, set the bit in the refmap\n", |
1703 | namelen, lockname, to); | 1707 | namelen, lockname, to); |
1704 | spin_lock(&res->spinlock); | 1708 | spin_lock(&res->spinlock); |
1705 | dlm_lockres_set_refmap_bit(to, res); | 1709 | dlm_lockres_set_refmap_bit(dlm, res, to); |
1706 | spin_unlock(&res->spinlock); | 1710 | spin_unlock(&res->spinlock); |
1707 | } | 1711 | } |
1708 | } | 1712 | } |
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2187 | namelen = res->lockname.len; | 2191 | namelen = res->lockname.len; |
2188 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | 2192 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); |
2189 | 2193 | ||
2190 | mlog(0, "%s:%.*s: sending deref to %d\n", | ||
2191 | dlm->name, namelen, lockname, res->owner); | ||
2192 | memset(&deref, 0, sizeof(deref)); | 2194 | memset(&deref, 0, sizeof(deref)); |
2193 | deref.node_idx = dlm->node_num; | 2195 | deref.node_idx = dlm->node_num; |
2194 | deref.namelen = namelen; | 2196 | deref.namelen = namelen; |
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2197 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, | 2199 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, |
2198 | &deref, sizeof(deref), res->owner, &r); | 2200 | &deref, sizeof(deref), res->owner, &r); |
2199 | if (ret < 0) | 2201 | if (ret < 0) |
2200 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 2202 | mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", |
2201 | "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, | 2203 | dlm->name, namelen, lockname, ret, res->owner); |
2202 | res->owner); | ||
2203 | else if (r < 0) { | 2204 | else if (r < 0) { |
2204 | /* BAD. other node says I did not have a ref. */ | 2205 | /* BAD. other node says I did not have a ref. */ |
2205 | mlog(ML_ERROR,"while dropping ref on %s:%.*s " | 2206 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", |
2206 | "(master=%u) got %d.\n", dlm->name, namelen, | 2207 | dlm->name, namelen, lockname, res->owner, r); |
2207 | lockname, res->owner, r); | ||
2208 | dlm_print_one_lock_resource(res); | 2208 | dlm_print_one_lock_resource(res); |
2209 | BUG(); | 2209 | BUG(); |
2210 | } | 2210 | } |
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
2260 | else { | 2260 | else { |
2261 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | 2261 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); |
2262 | if (test_bit(node, res->refmap)) { | 2262 | if (test_bit(node, res->refmap)) { |
2263 | dlm_lockres_clear_refmap_bit(node, res); | 2263 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
2264 | cleared = 1; | 2264 | cleared = 1; |
2265 | } | 2265 | } |
2266 | } | 2266 | } |
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) | |||
2320 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | 2320 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); |
2321 | if (test_bit(node, res->refmap)) { | 2321 | if (test_bit(node, res->refmap)) { |
2322 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); | 2322 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); |
2323 | dlm_lockres_clear_refmap_bit(node, res); | 2323 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
2324 | cleared = 1; | 2324 | cleared = 1; |
2325 | } | 2325 | } |
2326 | spin_unlock(&res->spinlock); | 2326 | spin_unlock(&res->spinlock); |
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
2802 | BUG_ON(!list_empty(&lock->bast_list)); | 2802 | BUG_ON(!list_empty(&lock->bast_list)); |
2803 | BUG_ON(lock->ast_pending); | 2803 | BUG_ON(lock->ast_pending); |
2804 | BUG_ON(lock->bast_pending); | 2804 | BUG_ON(lock->bast_pending); |
2805 | dlm_lockres_clear_refmap_bit(lock->ml.node, res); | 2805 | dlm_lockres_clear_refmap_bit(dlm, res, |
2806 | lock->ml.node); | ||
2806 | list_del_init(&lock->list); | 2807 | list_del_init(&lock->list); |
2807 | dlm_lock_put(lock); | 2808 | dlm_lock_put(lock); |
2808 | /* In a normal unlock, we would have added a | 2809 | /* In a normal unlock, we would have added a |
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
2823 | mlog(0, "%s:%.*s: node %u had a ref to this " | 2824 | mlog(0, "%s:%.*s: node %u had a ref to this " |
2824 | "migrating lockres, clearing\n", dlm->name, | 2825 | "migrating lockres, clearing\n", dlm->name, |
2825 | res->lockname.len, res->lockname.name, bit); | 2826 | res->lockname.len, res->lockname.name, bit); |
2826 | dlm_lockres_clear_refmap_bit(bit, res); | 2827 | dlm_lockres_clear_refmap_bit(dlm, res, bit); |
2827 | } | 2828 | } |
2828 | bit++; | 2829 | bit++; |
2829 | } | 2830 | } |
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
2916 | &migrate, sizeof(migrate), nodenum, | 2917 | &migrate, sizeof(migrate), nodenum, |
2917 | &status); | 2918 | &status); |
2918 | if (ret < 0) { | 2919 | if (ret < 0) { |
2919 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 2920 | mlog(ML_ERROR, "%s: res %.*s, Error %d send " |
2920 | "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, | 2921 | "MIGRATE_REQUEST to node %u\n", dlm->name, |
2921 | dlm->key, nodenum); | 2922 | migrate.namelen, migrate.name, ret, nodenum); |
2922 | if (!dlm_is_host_down(ret)) { | 2923 | if (!dlm_is_host_down(ret)) { |
2923 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); | 2924 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); |
2924 | BUG(); | 2925 | BUG(); |
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
2937 | dlm->name, res->lockname.len, res->lockname.name, | 2938 | dlm->name, res->lockname.len, res->lockname.name, |
2938 | nodenum); | 2939 | nodenum); |
2939 | spin_lock(&res->spinlock); | 2940 | spin_lock(&res->spinlock); |
2940 | dlm_lockres_set_refmap_bit(nodenum, res); | 2941 | dlm_lockres_set_refmap_bit(dlm, res, nodenum); |
2941 | spin_unlock(&res->spinlock); | 2942 | spin_unlock(&res->spinlock); |
2942 | } | 2943 | } |
2943 | } | 2944 | } |
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
3271 | * mastery reference here since old_master will briefly have | 3272 | * mastery reference here since old_master will briefly have |
3272 | * a reference after the migration completes */ | 3273 | * a reference after the migration completes */ |
3273 | spin_lock(&res->spinlock); | 3274 | spin_lock(&res->spinlock); |
3274 | dlm_lockres_set_refmap_bit(old_master, res); | 3275 | dlm_lockres_set_refmap_bit(dlm, res, old_master); |
3275 | spin_unlock(&res->spinlock); | 3276 | spin_unlock(&res->spinlock); |
3276 | 3277 | ||
3277 | mlog(0, "now time to do a migrate request to other nodes\n"); | 3278 | mlog(0, "now time to do a migrate request to other nodes\n"); |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 7efab6d28a21..01ebfd0bdad7 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) | |||
362 | } | 362 | } |
363 | 363 | ||
364 | 364 | ||
365 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) | 365 | void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) |
366 | { | 366 | { |
367 | if (timeout) { | 367 | if (dlm_is_node_dead(dlm, node)) |
368 | mlog(ML_NOTICE, "%s: waiting %dms for notification of " | 368 | return; |
369 | "death of node %u\n", dlm->name, timeout, node); | 369 | |
370 | printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " | ||
371 | "domain %s\n", node, dlm->name); | ||
372 | |||
373 | if (timeout) | ||
370 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 374 | wait_event_timeout(dlm->dlm_reco_thread_wq, |
371 | dlm_is_node_dead(dlm, node), | 375 | dlm_is_node_dead(dlm, node), |
372 | msecs_to_jiffies(timeout)); | 376 | msecs_to_jiffies(timeout)); |
373 | } else { | 377 | else |
374 | mlog(ML_NOTICE, "%s: waiting indefinitely for notification " | ||
375 | "of death of node %u\n", dlm->name, node); | ||
376 | wait_event(dlm->dlm_reco_thread_wq, | 378 | wait_event(dlm->dlm_reco_thread_wq, |
377 | dlm_is_node_dead(dlm, node)); | 379 | dlm_is_node_dead(dlm, node)); |
378 | } | ||
379 | /* for now, return 0 */ | ||
380 | return 0; | ||
381 | } | 380 | } |
382 | 381 | ||
383 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) | 382 | void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) |
384 | { | 383 | { |
385 | if (timeout) { | 384 | if (dlm_is_node_recovered(dlm, node)) |
386 | mlog(0, "%s: waiting %dms for notification of " | 385 | return; |
387 | "recovery of node %u\n", dlm->name, timeout, node); | 386 | |
387 | printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " | ||
388 | "domain %s\n", node, dlm->name); | ||
389 | |||
390 | if (timeout) | ||
388 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 391 | wait_event_timeout(dlm->dlm_reco_thread_wq, |
389 | dlm_is_node_recovered(dlm, node), | 392 | dlm_is_node_recovered(dlm, node), |
390 | msecs_to_jiffies(timeout)); | 393 | msecs_to_jiffies(timeout)); |
391 | } else { | 394 | else |
392 | mlog(0, "%s: waiting indefinitely for notification " | ||
393 | "of recovery of node %u\n", dlm->name, node); | ||
394 | wait_event(dlm->dlm_reco_thread_wq, | 395 | wait_event(dlm->dlm_reco_thread_wq, |
395 | dlm_is_node_recovered(dlm, node)); | 396 | dlm_is_node_recovered(dlm, node)); |
396 | } | ||
397 | /* for now, return 0 */ | ||
398 | return 0; | ||
399 | } | 397 | } |
400 | 398 | ||
401 | /* callers of the top-level api calls (dlmlock/dlmunlock) should | 399 | /* callers of the top-level api calls (dlmlock/dlmunlock) should |
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm) | |||
430 | { | 428 | { |
431 | spin_lock(&dlm->spinlock); | 429 | spin_lock(&dlm->spinlock); |
432 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | 430 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); |
431 | printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", | ||
432 | dlm->name, dlm->reco.dead_node); | ||
433 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; | 433 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; |
434 | spin_unlock(&dlm->spinlock); | 434 | spin_unlock(&dlm->spinlock); |
435 | } | 435 | } |
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm) | |||
440 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); | 440 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); |
441 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; | 441 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; |
442 | spin_unlock(&dlm->spinlock); | 442 | spin_unlock(&dlm->spinlock); |
443 | printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); | ||
443 | wake_up(&dlm->reco.event); | 444 | wake_up(&dlm->reco.event); |
444 | } | 445 | } |
445 | 446 | ||
447 | static void dlm_print_recovery_master(struct dlm_ctxt *dlm) | ||
448 | { | ||
449 | printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " | ||
450 | "dead node %u in domain %s\n", dlm->reco.new_master, | ||
451 | (dlm->node_num == dlm->reco.new_master ? "me" : "he"), | ||
452 | dlm->reco.dead_node, dlm->name); | ||
453 | } | ||
454 | |||
446 | static int dlm_do_recovery(struct dlm_ctxt *dlm) | 455 | static int dlm_do_recovery(struct dlm_ctxt *dlm) |
447 | { | 456 | { |
448 | int status = 0; | 457 | int status = 0; |
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) | |||
505 | } | 514 | } |
506 | mlog(0, "another node will master this recovery session.\n"); | 515 | mlog(0, "another node will master this recovery session.\n"); |
507 | } | 516 | } |
508 | mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", | 517 | |
509 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, | 518 | dlm_print_recovery_master(dlm); |
510 | dlm->node_num, dlm->reco.dead_node); | ||
511 | 519 | ||
512 | /* it is safe to start everything back up here | 520 | /* it is safe to start everything back up here |
513 | * because all of the dead node's lock resources | 521 | * because all of the dead node's lock resources |
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) | |||
518 | return 0; | 526 | return 0; |
519 | 527 | ||
520 | master_here: | 528 | master_here: |
521 | mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " | 529 | dlm_print_recovery_master(dlm); |
522 | "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), | ||
523 | dlm->node_num, dlm->reco.dead_node, dlm->name); | ||
524 | 530 | ||
525 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); | 531 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); |
526 | if (status < 0) { | 532 | if (status < 0) { |
527 | /* we should never hit this anymore */ | 533 | /* we should never hit this anymore */ |
528 | mlog(ML_ERROR, "error %d remastering locks for node %u, " | 534 | mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " |
529 | "retrying.\n", status, dlm->reco.dead_node); | 535 | "retrying.\n", dlm->name, status, dlm->reco.dead_node); |
530 | /* yield a bit to allow any final network messages | 536 | /* yield a bit to allow any final network messages |
531 | * to get handled on remaining nodes */ | 537 | * to get handled on remaining nodes */ |
532 | msleep(100); | 538 | msleep(100); |
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |||
567 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); | 573 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); |
568 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; | 574 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; |
569 | 575 | ||
570 | mlog(0, "requesting lock info from node %u\n", | 576 | mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, |
571 | ndata->node_num); | 577 | ndata->node_num); |
572 | 578 | ||
573 | if (ndata->node_num == dlm->node_num) { | 579 | if (ndata->node_num == dlm->node_num) { |
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |||
640 | spin_unlock(&dlm_reco_state_lock); | 646 | spin_unlock(&dlm_reco_state_lock); |
641 | } | 647 | } |
642 | 648 | ||
643 | mlog(0, "done requesting all lock info\n"); | 649 | mlog(0, "%s: Done requesting all lock info\n", dlm->name); |
644 | 650 | ||
645 | /* nodes should be sending reco data now | 651 | /* nodes should be sending reco data now |
646 | * just need to wait */ | 652 | * just need to wait */ |
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
802 | 808 | ||
803 | /* negative status is handled by caller */ | 809 | /* negative status is handled by caller */ |
804 | if (ret < 0) | 810 | if (ret < 0) |
805 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 811 | mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " |
806 | "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, | 812 | "to recover dead node %u\n", dlm->name, ret, |
807 | dlm->key, request_from); | 813 | request_from, dead_node); |
808 | |||
809 | // return from here, then | 814 | // return from here, then |
810 | // sleep until all received or error | 815 | // sleep until all received or error |
811 | return ret; | 816 | return ret; |
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | |||
956 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | 961 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, |
957 | sizeof(done_msg), send_to, &tmpret); | 962 | sizeof(done_msg), send_to, &tmpret); |
958 | if (ret < 0) { | 963 | if (ret < 0) { |
959 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 964 | mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " |
960 | "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, | 965 | "to recover dead node %u\n", dlm->name, ret, send_to, |
961 | dlm->key, send_to); | 966 | dead_node); |
962 | if (!dlm_is_host_down(ret)) { | 967 | if (!dlm_is_host_down(ret)) { |
963 | BUG(); | 968 | BUG(); |
964 | } | 969 | } |
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | |||
1127 | if (ret < 0) { | 1132 | if (ret < 0) { |
1128 | /* XXX: negative status is not handled. | 1133 | /* XXX: negative status is not handled. |
1129 | * this will end up killing this node. */ | 1134 | * this will end up killing this node. */ |
1130 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 1135 | mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " |
1131 | "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, | 1136 | "node %u (%s)\n", dlm->name, mres->lockname_len, |
1132 | dlm->key, send_to); | 1137 | mres->lockname, ret, send_to, |
1138 | (orig_flags & DLM_MRES_MIGRATION ? | ||
1139 | "migration" : "recovery")); | ||
1133 | } else { | 1140 | } else { |
1134 | /* might get an -ENOMEM back here */ | 1141 | /* might get an -ENOMEM back here */ |
1135 | ret = status; | 1142 | ret = status; |
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
1767 | dlm->name, mres->lockname_len, mres->lockname, | 1774 | dlm->name, mres->lockname_len, mres->lockname, |
1768 | from); | 1775 | from); |
1769 | spin_lock(&res->spinlock); | 1776 | spin_lock(&res->spinlock); |
1770 | dlm_lockres_set_refmap_bit(from, res); | 1777 | dlm_lockres_set_refmap_bit(dlm, res, from); |
1771 | spin_unlock(&res->spinlock); | 1778 | spin_unlock(&res->spinlock); |
1772 | added++; | 1779 | added++; |
1773 | break; | 1780 | break; |
@@ -1965,7 +1972,7 @@ skip_lvb: | |||
1965 | mlog(0, "%s:%.*s: added lock for node %u, " | 1972 | mlog(0, "%s:%.*s: added lock for node %u, " |
1966 | "setting refmap bit\n", dlm->name, | 1973 | "setting refmap bit\n", dlm->name, |
1967 | res->lockname.len, res->lockname.name, ml->node); | 1974 | res->lockname.len, res->lockname.name, ml->node); |
1968 | dlm_lockres_set_refmap_bit(ml->node, res); | 1975 | dlm_lockres_set_refmap_bit(dlm, res, ml->node); |
1969 | added++; | 1976 | added++; |
1970 | } | 1977 | } |
1971 | spin_unlock(&res->spinlock); | 1978 | spin_unlock(&res->spinlock); |
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |||
2084 | 2091 | ||
2085 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | 2092 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { |
2086 | if (res->owner == dead_node) { | 2093 | if (res->owner == dead_node) { |
2094 | mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", | ||
2095 | dlm->name, res->lockname.len, res->lockname.name, | ||
2096 | res->owner, new_master); | ||
2087 | list_del_init(&res->recovering); | 2097 | list_del_init(&res->recovering); |
2088 | spin_lock(&res->spinlock); | 2098 | spin_lock(&res->spinlock); |
2089 | /* new_master has our reference from | 2099 | /* new_master has our reference from |
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |||
2105 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 2115 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
2106 | bucket = dlm_lockres_hash(dlm, i); | 2116 | bucket = dlm_lockres_hash(dlm, i); |
2107 | hlist_for_each_entry(res, hash_iter, bucket, hash_node) { | 2117 | hlist_for_each_entry(res, hash_iter, bucket, hash_node) { |
2108 | if (res->state & DLM_LOCK_RES_RECOVERING) { | 2118 | if (!(res->state & DLM_LOCK_RES_RECOVERING)) |
2109 | if (res->owner == dead_node) { | 2119 | continue; |
2110 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
2111 | "was not on recovering list, but " | ||
2112 | "clearing state anyway\n", | ||
2113 | dlm->node_num, res->lockname.len, | ||
2114 | res->lockname.name, new_master); | ||
2115 | } else if (res->owner == dlm->node_num) { | ||
2116 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
2117 | "was not on recovering list, " | ||
2118 | "owner is THIS node, clearing\n", | ||
2119 | dlm->node_num, res->lockname.len, | ||
2120 | res->lockname.name, new_master); | ||
2121 | } else | ||
2122 | continue; | ||
2123 | 2120 | ||
2124 | if (!list_empty(&res->recovering)) { | 2121 | if (res->owner != dead_node && |
2125 | mlog(0, "%s:%.*s: lockres was " | 2122 | res->owner != dlm->node_num) |
2126 | "marked RECOVERING, owner=%u\n", | 2123 | continue; |
2127 | dlm->name, res->lockname.len, | 2124 | |
2128 | res->lockname.name, res->owner); | 2125 | if (!list_empty(&res->recovering)) { |
2129 | list_del_init(&res->recovering); | 2126 | list_del_init(&res->recovering); |
2130 | dlm_lockres_put(res); | 2127 | dlm_lockres_put(res); |
2131 | } | ||
2132 | spin_lock(&res->spinlock); | ||
2133 | /* new_master has our reference from | ||
2134 | * the lock state sent during recovery */ | ||
2135 | dlm_change_lockres_owner(dlm, res, new_master); | ||
2136 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
2137 | if (__dlm_lockres_has_locks(res)) | ||
2138 | __dlm_dirty_lockres(dlm, res); | ||
2139 | spin_unlock(&res->spinlock); | ||
2140 | wake_up(&res->wq); | ||
2141 | } | 2128 | } |
2129 | |||
2130 | /* new_master has our reference from | ||
2131 | * the lock state sent during recovery */ | ||
2132 | mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", | ||
2133 | dlm->name, res->lockname.len, res->lockname.name, | ||
2134 | res->owner, new_master); | ||
2135 | spin_lock(&res->spinlock); | ||
2136 | dlm_change_lockres_owner(dlm, res, new_master); | ||
2137 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
2138 | if (__dlm_lockres_has_locks(res)) | ||
2139 | __dlm_dirty_lockres(dlm, res); | ||
2140 | spin_unlock(&res->spinlock); | ||
2141 | wake_up(&res->wq); | ||
2142 | } | 2142 | } |
2143 | } | 2143 | } |
2144 | } | 2144 | } |
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, | |||
2252 | res->lockname.len, res->lockname.name, freed, dead_node); | 2252 | res->lockname.len, res->lockname.name, freed, dead_node); |
2253 | __dlm_print_one_lock_resource(res); | 2253 | __dlm_print_one_lock_resource(res); |
2254 | } | 2254 | } |
2255 | dlm_lockres_clear_refmap_bit(dead_node, res); | 2255 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); |
2256 | } else if (test_bit(dead_node, res->refmap)) { | 2256 | } else if (test_bit(dead_node, res->refmap)) { |
2257 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " | 2257 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " |
2258 | "no locks and had not purged before dying\n", dlm->name, | 2258 | "no locks and had not purged before dying\n", dlm->name, |
2259 | res->lockname.len, res->lockname.name, dead_node); | 2259 | res->lockname.len, res->lockname.name, dead_node); |
2260 | dlm_lockres_clear_refmap_bit(dead_node, res); | 2260 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | /* do not kick thread yet */ | 2263 | /* do not kick thread yet */ |
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2324 | dlm_revalidate_lvb(dlm, res, dead_node); | 2324 | dlm_revalidate_lvb(dlm, res, dead_node); |
2325 | if (res->owner == dead_node) { | 2325 | if (res->owner == dead_node) { |
2326 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { | 2326 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { |
2327 | mlog(ML_NOTICE, "Ignore %.*s for " | 2327 | mlog(ML_NOTICE, "%s: res %.*s, Skip " |
2328 | "recovery as it is being freed\n", | 2328 | "recovery as it is being freed\n", |
2329 | res->lockname.len, | 2329 | dlm->name, res->lockname.len, |
2330 | res->lockname.name); | 2330 | res->lockname.name); |
2331 | } else | 2331 | } else |
2332 | dlm_move_lockres_to_recovery_list(dlm, | 2332 | dlm_move_lockres_to_recovery_list(dlm, |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 1d6d1d22c471..e73c833fc2a1 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res) | |||
94 | { | 94 | { |
95 | int bit; | 95 | int bit; |
96 | 96 | ||
97 | assert_spin_locked(&res->spinlock); | ||
98 | |||
97 | if (__dlm_lockres_has_locks(res)) | 99 | if (__dlm_lockres_has_locks(res)) |
98 | return 0; | 100 | return 0; |
99 | 101 | ||
102 | /* Locks are in the process of being created */ | ||
103 | if (res->inflight_locks) | ||
104 | return 0; | ||
105 | |||
100 | if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) | 106 | if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) |
101 | return 0; | 107 | return 0; |
102 | 108 | ||
103 | if (res->state & DLM_LOCK_RES_RECOVERING) | 109 | if (res->state & DLM_LOCK_RES_RECOVERING) |
104 | return 0; | 110 | return 0; |
105 | 111 | ||
112 | /* Another node has this resource with this node as the master */ | ||
106 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | 113 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); |
107 | if (bit < O2NM_MAX_NODES) | 114 | if (bit < O2NM_MAX_NODES) |
108 | return 0; | 115 | return 0; |
109 | 116 | ||
110 | /* | ||
111 | * since the bit for dlm->node_num is not set, inflight_locks better | ||
112 | * be zero | ||
113 | */ | ||
114 | BUG_ON(res->inflight_locks != 0); | ||
115 | return 1; | 117 | return 1; |
116 | } | 118 | } |
117 | 119 | ||
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
185 | /* clear our bit from the master's refmap, ignore errors */ | 187 | /* clear our bit from the master's refmap, ignore errors */ |
186 | ret = dlm_drop_lockres_ref(dlm, res); | 188 | ret = dlm_drop_lockres_ref(dlm, res); |
187 | if (ret < 0) { | 189 | if (ret < 0) { |
188 | mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, | ||
189 | res->lockname.len, res->lockname.name, ret); | ||
190 | if (!dlm_is_host_down(ret)) | 190 | if (!dlm_is_host_down(ret)) |
191 | BUG(); | 191 | BUG(); |
192 | } | 192 | } |
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
209 | BUG(); | 209 | BUG(); |
210 | } | 210 | } |
211 | 211 | ||
212 | __dlm_unhash_lockres(res); | 212 | __dlm_unhash_lockres(dlm, res); |
213 | 213 | ||
214 | /* lockres is not in the hash now. drop the flag and wake up | 214 | /* lockres is not in the hash now. drop the flag and wake up |
215 | * any processes waiting in dlm_get_lock_resource. */ | 215 | * any processes waiting in dlm_get_lock_resource. */ |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index e1ed5e502ff2..81a4cd22f80b 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode) | |||
1692 | mlog(0, "inode %llu take PRMODE open lock\n", | 1692 | mlog(0, "inode %llu take PRMODE open lock\n", |
1693 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 1693 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1694 | 1694 | ||
1695 | if (ocfs2_mount_local(osb)) | 1695 | if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) |
1696 | goto out; | 1696 | goto out; |
1697 | 1697 | ||
1698 | lockres = &OCFS2_I(inode)->ip_open_lockres; | 1698 | lockres = &OCFS2_I(inode)->ip_open_lockres; |
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write) | |||
1718 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 1718 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1719 | write ? "EXMODE" : "PRMODE"); | 1719 | write ? "EXMODE" : "PRMODE"); |
1720 | 1720 | ||
1721 | if (ocfs2_is_hard_readonly(osb)) { | ||
1722 | if (write) | ||
1723 | status = -EROFS; | ||
1724 | goto out; | ||
1725 | } | ||
1726 | |||
1721 | if (ocfs2_mount_local(osb)) | 1727 | if (ocfs2_mount_local(osb)) |
1722 | goto out; | 1728 | goto out; |
1723 | 1729 | ||
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, | |||
2298 | if (ocfs2_is_hard_readonly(osb)) { | 2304 | if (ocfs2_is_hard_readonly(osb)) { |
2299 | if (ex) | 2305 | if (ex) |
2300 | status = -EROFS; | 2306 | status = -EROFS; |
2301 | goto bail; | 2307 | goto getbh; |
2302 | } | 2308 | } |
2303 | 2309 | ||
2304 | if (ocfs2_mount_local(osb)) | 2310 | if (ocfs2_mount_local(osb)) |
@@ -2356,7 +2362,7 @@ local: | |||
2356 | mlog_errno(status); | 2362 | mlog_errno(status); |
2357 | goto bail; | 2363 | goto bail; |
2358 | } | 2364 | } |
2359 | 2365 | getbh: | |
2360 | if (ret_bh) { | 2366 | if (ret_bh) { |
2361 | status = ocfs2_assign_bh(inode, ret_bh, local_bh); | 2367 | status = ocfs2_assign_bh(inode, ret_bh, local_bh); |
2362 | if (status < 0) { | 2368 | if (status < 0) { |
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex) | |||
2628 | 2634 | ||
2629 | BUG_ON(!dl); | 2635 | BUG_ON(!dl); |
2630 | 2636 | ||
2631 | if (ocfs2_is_hard_readonly(osb)) | 2637 | if (ocfs2_is_hard_readonly(osb)) { |
2632 | return -EROFS; | 2638 | if (ex) |
2639 | return -EROFS; | ||
2640 | return 0; | ||
2641 | } | ||
2633 | 2642 | ||
2634 | if (ocfs2_mount_local(osb)) | 2643 | if (ocfs2_mount_local(osb)) |
2635 | return 0; | 2644 | return 0; |
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex) | |||
2647 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; | 2656 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; |
2648 | struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); | 2657 | struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); |
2649 | 2658 | ||
2650 | if (!ocfs2_mount_local(osb)) | 2659 | if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) |
2651 | ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); | 2660 | ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); |
2652 | } | 2661 | } |
2653 | 2662 | ||
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 23457b491e8c..2f5b92ef0e53 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
@@ -832,6 +832,102 @@ out: | |||
832 | return ret; | 832 | return ret; |
833 | } | 833 | } |
834 | 834 | ||
835 | int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin) | ||
836 | { | ||
837 | struct inode *inode = file->f_mapping->host; | ||
838 | int ret; | ||
839 | unsigned int is_last = 0, is_data = 0; | ||
840 | u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; | ||
841 | u32 cpos, cend, clen, hole_size; | ||
842 | u64 extoff, extlen; | ||
843 | struct buffer_head *di_bh = NULL; | ||
844 | struct ocfs2_extent_rec rec; | ||
845 | |||
846 | BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE); | ||
847 | |||
848 | ret = ocfs2_inode_lock(inode, &di_bh, 0); | ||
849 | if (ret) { | ||
850 | mlog_errno(ret); | ||
851 | goto out; | ||
852 | } | ||
853 | |||
854 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
855 | |||
856 | if (*offset >= inode->i_size) { | ||
857 | ret = -ENXIO; | ||
858 | goto out_unlock; | ||
859 | } | ||
860 | |||
861 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | ||
862 | if (origin == SEEK_HOLE) | ||
863 | *offset = inode->i_size; | ||
864 | goto out_unlock; | ||
865 | } | ||
866 | |||
867 | clen = 0; | ||
868 | cpos = *offset >> cs_bits; | ||
869 | cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size); | ||
870 | |||
871 | while (cpos < cend && !is_last) { | ||
872 | ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size, | ||
873 | &rec, &is_last); | ||
874 | if (ret) { | ||
875 | mlog_errno(ret); | ||
876 | goto out_unlock; | ||
877 | } | ||
878 | |||
879 | extoff = cpos; | ||
880 | extoff <<= cs_bits; | ||
881 | |||
882 | if (rec.e_blkno == 0ULL) { | ||
883 | clen = hole_size; | ||
884 | is_data = 0; | ||
885 | } else { | ||
886 | clen = le16_to_cpu(rec.e_leaf_clusters) - | ||
887 | (cpos - le32_to_cpu(rec.e_cpos)); | ||
888 | is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1; | ||
889 | } | ||
890 | |||
891 | if ((!is_data && origin == SEEK_HOLE) || | ||
892 | (is_data && origin == SEEK_DATA)) { | ||
893 | if (extoff > *offset) | ||
894 | *offset = extoff; | ||
895 | goto out_unlock; | ||
896 | } | ||
897 | |||
898 | if (!is_last) | ||
899 | cpos += clen; | ||
900 | } | ||
901 | |||
902 | if (origin == SEEK_HOLE) { | ||
903 | extoff = cpos; | ||
904 | extoff <<= cs_bits; | ||
905 | extlen = clen; | ||
906 | extlen <<= cs_bits; | ||
907 | |||
908 | if ((extoff + extlen) > inode->i_size) | ||
909 | extlen = inode->i_size - extoff; | ||
910 | extoff += extlen; | ||
911 | if (extoff > *offset) | ||
912 | *offset = extoff; | ||
913 | goto out_unlock; | ||
914 | } | ||
915 | |||
916 | ret = -ENXIO; | ||
917 | |||
918 | out_unlock: | ||
919 | |||
920 | brelse(di_bh); | ||
921 | |||
922 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
923 | |||
924 | ocfs2_inode_unlock(inode, 0); | ||
925 | out: | ||
926 | if (ret && ret != -ENXIO) | ||
927 | ret = -ENXIO; | ||
928 | return ret; | ||
929 | } | ||
930 | |||
835 | int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, | 931 | int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, |
836 | struct buffer_head *bhs[], int flags, | 932 | struct buffer_head *bhs[], int flags, |
837 | int (*validate)(struct super_block *sb, | 933 | int (*validate)(struct super_block *sb, |
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h index e79d41c2c909..67ea57d2fd59 100644 --- a/fs/ocfs2/extent_map.h +++ b/fs/ocfs2/extent_map.h | |||
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, | |||
53 | int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 53 | int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
54 | u64 map_start, u64 map_len); | 54 | u64 map_start, u64 map_len); |
55 | 55 | ||
56 | int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin); | ||
57 | |||
56 | int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, | 58 | int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, |
57 | u32 *p_cluster, u32 *num_clusters, | 59 | u32 *p_cluster, u32 *num_clusters, |
58 | struct ocfs2_extent_list *el, | 60 | struct ocfs2_extent_list *el, |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index de4ea1af041b..6e396683c3d4 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, | |||
1950 | if (ret < 0) | 1950 | if (ret < 0) |
1951 | mlog_errno(ret); | 1951 | mlog_errno(ret); |
1952 | 1952 | ||
1953 | if (file->f_flags & O_SYNC) | ||
1954 | handle->h_sync = 1; | ||
1955 | |||
1953 | ocfs2_commit_trans(osb, handle); | 1956 | ocfs2_commit_trans(osb, handle); |
1954 | 1957 | ||
1955 | out_inode_unlock: | 1958 | out_inode_unlock: |
@@ -2052,6 +2055,23 @@ out: | |||
2052 | return ret; | 2055 | return ret; |
2053 | } | 2056 | } |
2054 | 2057 | ||
2058 | static void ocfs2_aiodio_wait(struct inode *inode) | ||
2059 | { | ||
2060 | wait_queue_head_t *wq = ocfs2_ioend_wq(inode); | ||
2061 | |||
2062 | wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0)); | ||
2063 | } | ||
2064 | |||
2065 | static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) | ||
2066 | { | ||
2067 | int blockmask = inode->i_sb->s_blocksize - 1; | ||
2068 | loff_t final_size = pos + count; | ||
2069 | |||
2070 | if ((pos & blockmask) || (final_size & blockmask)) | ||
2071 | return 1; | ||
2072 | return 0; | ||
2073 | } | ||
2074 | |||
2055 | static int ocfs2_prepare_inode_for_refcount(struct inode *inode, | 2075 | static int ocfs2_prepare_inode_for_refcount(struct inode *inode, |
2056 | struct file *file, | 2076 | struct file *file, |
2057 | loff_t pos, size_t count, | 2077 | loff_t pos, size_t count, |
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, | |||
2230 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2250 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
2231 | int full_coherency = !(osb->s_mount_opt & | 2251 | int full_coherency = !(osb->s_mount_opt & |
2232 | OCFS2_MOUNT_COHERENCY_BUFFERED); | 2252 | OCFS2_MOUNT_COHERENCY_BUFFERED); |
2253 | int unaligned_dio = 0; | ||
2233 | 2254 | ||
2234 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, | 2255 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, |
2235 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 2256 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
@@ -2297,6 +2318,10 @@ relock: | |||
2297 | goto out; | 2318 | goto out; |
2298 | } | 2319 | } |
2299 | 2320 | ||
2321 | if (direct_io && !is_sync_kiocb(iocb)) | ||
2322 | unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left, | ||
2323 | *ppos); | ||
2324 | |||
2300 | /* | 2325 | /* |
2301 | * We can't complete the direct I/O as requested, fall back to | 2326 | * We can't complete the direct I/O as requested, fall back to |
2302 | * buffered I/O. | 2327 | * buffered I/O. |
@@ -2311,6 +2336,18 @@ relock: | |||
2311 | goto relock; | 2336 | goto relock; |
2312 | } | 2337 | } |
2313 | 2338 | ||
2339 | if (unaligned_dio) { | ||
2340 | /* | ||
2341 | * Wait on previous unaligned aio to complete before | ||
2342 | * proceeding. | ||
2343 | */ | ||
2344 | ocfs2_aiodio_wait(inode); | ||
2345 | |||
2346 | /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */ | ||
2347 | atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio); | ||
2348 | ocfs2_iocb_set_unaligned_aio(iocb); | ||
2349 | } | ||
2350 | |||
2314 | /* | 2351 | /* |
2315 | * To later detect whether a journal commit for sync writes is | 2352 | * To later detect whether a journal commit for sync writes is |
2316 | * necessary, we sample i_size, and cluster count here. | 2353 | * necessary, we sample i_size, and cluster count here. |
@@ -2382,8 +2419,12 @@ out_dio: | |||
2382 | if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { | 2419 | if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { |
2383 | rw_level = -1; | 2420 | rw_level = -1; |
2384 | have_alloc_sem = 0; | 2421 | have_alloc_sem = 0; |
2422 | unaligned_dio = 0; | ||
2385 | } | 2423 | } |
2386 | 2424 | ||
2425 | if (unaligned_dio) | ||
2426 | atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio); | ||
2427 | |||
2387 | out: | 2428 | out: |
2388 | if (rw_level != -1) | 2429 | if (rw_level != -1) |
2389 | ocfs2_rw_unlock(inode, rw_level); | 2430 | ocfs2_rw_unlock(inode, rw_level); |
@@ -2591,6 +2632,57 @@ bail: | |||
2591 | return ret; | 2632 | return ret; |
2592 | } | 2633 | } |
2593 | 2634 | ||
2635 | /* Refer generic_file_llseek_unlocked() */ | ||
2636 | static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin) | ||
2637 | { | ||
2638 | struct inode *inode = file->f_mapping->host; | ||
2639 | int ret = 0; | ||
2640 | |||
2641 | mutex_lock(&inode->i_mutex); | ||
2642 | |||
2643 | switch (origin) { | ||
2644 | case SEEK_SET: | ||
2645 | break; | ||
2646 | case SEEK_END: | ||
2647 | offset += inode->i_size; | ||
2648 | break; | ||
2649 | case SEEK_CUR: | ||
2650 | if (offset == 0) { | ||
2651 | offset = file->f_pos; | ||
2652 | goto out; | ||
2653 | } | ||
2654 | offset += file->f_pos; | ||
2655 | break; | ||
2656 | case SEEK_DATA: | ||
2657 | case SEEK_HOLE: | ||
2658 | ret = ocfs2_seek_data_hole_offset(file, &offset, origin); | ||
2659 | if (ret) | ||
2660 | goto out; | ||
2661 | break; | ||
2662 | default: | ||
2663 | ret = -EINVAL; | ||
2664 | goto out; | ||
2665 | } | ||
2666 | |||
2667 | if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) | ||
2668 | ret = -EINVAL; | ||
2669 | if (!ret && offset > inode->i_sb->s_maxbytes) | ||
2670 | ret = -EINVAL; | ||
2671 | if (ret) | ||
2672 | goto out; | ||
2673 | |||
2674 | if (offset != file->f_pos) { | ||
2675 | file->f_pos = offset; | ||
2676 | file->f_version = 0; | ||
2677 | } | ||
2678 | |||
2679 | out: | ||
2680 | mutex_unlock(&inode->i_mutex); | ||
2681 | if (ret) | ||
2682 | return ret; | ||
2683 | return offset; | ||
2684 | } | ||
2685 | |||
2594 | const struct inode_operations ocfs2_file_iops = { | 2686 | const struct inode_operations ocfs2_file_iops = { |
2595 | .setattr = ocfs2_setattr, | 2687 | .setattr = ocfs2_setattr, |
2596 | .getattr = ocfs2_getattr, | 2688 | .getattr = ocfs2_getattr, |
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = { | |||
2615 | * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! | 2707 | * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! |
2616 | */ | 2708 | */ |
2617 | const struct file_operations ocfs2_fops = { | 2709 | const struct file_operations ocfs2_fops = { |
2618 | .llseek = generic_file_llseek, | 2710 | .llseek = ocfs2_file_llseek, |
2619 | .read = do_sync_read, | 2711 | .read = do_sync_read, |
2620 | .write = do_sync_write, | 2712 | .write = do_sync_write, |
2621 | .mmap = ocfs2_mmap, | 2713 | .mmap = ocfs2_mmap, |
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = { | |||
2663 | * the cluster. | 2755 | * the cluster. |
2664 | */ | 2756 | */ |
2665 | const struct file_operations ocfs2_fops_no_plocks = { | 2757 | const struct file_operations ocfs2_fops_no_plocks = { |
2666 | .llseek = generic_file_llseek, | 2758 | .llseek = ocfs2_file_llseek, |
2667 | .read = do_sync_read, | 2759 | .read = do_sync_read, |
2668 | .write = do_sync_write, | 2760 | .write = do_sync_write, |
2669 | .mmap = ocfs2_mmap, | 2761 | .mmap = ocfs2_mmap, |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index a22d2c098890..17454a904d7b 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode, | |||
951 | trace_ocfs2_cleanup_delete_inode( | 951 | trace_ocfs2_cleanup_delete_inode( |
952 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); | 952 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); |
953 | if (sync_data) | 953 | if (sync_data) |
954 | write_inode_now(inode, 1); | 954 | filemap_write_and_wait(inode->i_mapping); |
955 | truncate_inode_pages(&inode->i_data, 0); | 955 | truncate_inode_pages(&inode->i_data, 0); |
956 | } | 956 | } |
957 | 957 | ||
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 1c508b149b3a..88924a3133fa 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h | |||
@@ -43,6 +43,9 @@ struct ocfs2_inode_info | |||
43 | /* protects extended attribute changes on this inode */ | 43 | /* protects extended attribute changes on this inode */ |
44 | struct rw_semaphore ip_xattr_sem; | 44 | struct rw_semaphore ip_xattr_sem; |
45 | 45 | ||
46 | /* Number of outstanding AIO's which are not page aligned */ | ||
47 | atomic_t ip_unaligned_aio; | ||
48 | |||
46 | /* These fields are protected by ip_lock */ | 49 | /* These fields are protected by ip_lock */ |
47 | spinlock_t ip_lock; | 50 | spinlock_t ip_lock; |
48 | u32 ip_open_count; | 51 | u32 ip_open_count; |
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index bc91072b7219..726ff265b296 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c | |||
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, | |||
122 | if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & | 122 | if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & |
123 | (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { | 123 | (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { |
124 | if (!capable(CAP_LINUX_IMMUTABLE)) | 124 | if (!capable(CAP_LINUX_IMMUTABLE)) |
125 | goto bail_unlock; | 125 | goto bail_commit; |
126 | } | 126 | } |
127 | 127 | ||
128 | ocfs2_inode->ip_attr = flags; | 128 | ocfs2_inode->ip_attr = flags; |
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, | |||
132 | if (status < 0) | 132 | if (status < 0) |
133 | mlog_errno(status); | 133 | mlog_errno(status); |
134 | 134 | ||
135 | bail_commit: | ||
135 | ocfs2_commit_trans(osb, handle); | 136 | ocfs2_commit_trans(osb, handle); |
136 | bail_unlock: | 137 | bail_unlock: |
137 | ocfs2_inode_unlock(inode, 1); | 138 | ocfs2_inode_unlock(inode, 1); |
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode, | |||
381 | if (!oifi) { | 382 | if (!oifi) { |
382 | status = -ENOMEM; | 383 | status = -ENOMEM; |
383 | mlog_errno(status); | 384 | mlog_errno(status); |
384 | goto bail; | 385 | goto out_err; |
385 | } | 386 | } |
386 | 387 | ||
387 | if (o2info_from_user(*oifi, req)) | 388 | if (o2info_from_user(*oifi, req)) |
@@ -431,7 +432,7 @@ bail: | |||
431 | o2info_set_request_error(&oifi->ifi_req, req); | 432 | o2info_set_request_error(&oifi->ifi_req, req); |
432 | 433 | ||
433 | kfree(oifi); | 434 | kfree(oifi); |
434 | 435 | out_err: | |
435 | return status; | 436 | return status; |
436 | } | 437 | } |
437 | 438 | ||
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode, | |||
666 | if (!oiff) { | 667 | if (!oiff) { |
667 | status = -ENOMEM; | 668 | status = -ENOMEM; |
668 | mlog_errno(status); | 669 | mlog_errno(status); |
669 | goto bail; | 670 | goto out_err; |
670 | } | 671 | } |
671 | 672 | ||
672 | if (o2info_from_user(*oiff, req)) | 673 | if (o2info_from_user(*oiff, req)) |
@@ -716,7 +717,7 @@ bail: | |||
716 | o2info_set_request_error(&oiff->iff_req, req); | 717 | o2info_set_request_error(&oiff->iff_req, req); |
717 | 718 | ||
718 | kfree(oiff); | 719 | kfree(oiff); |
719 | 720 | out_err: | |
720 | return status; | 721 | return status; |
721 | } | 722 | } |
722 | 723 | ||
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 295d56454e8b..0a42ae96dca7 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1544 | /* we need to run complete recovery for offline orphan slots */ | 1544 | /* we need to run complete recovery for offline orphan slots */ |
1545 | ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); | 1545 | ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); |
1546 | 1546 | ||
1547 | mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", | 1547 | printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\ |
1548 | node_num, slot_num, | 1548 | "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), |
1549 | MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); | 1549 | MINOR(osb->sb->s_dev)); |
1550 | 1550 | ||
1551 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); | 1551 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); |
1552 | 1552 | ||
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1601 | 1601 | ||
1602 | jbd2_journal_destroy(journal); | 1602 | jbd2_journal_destroy(journal); |
1603 | 1603 | ||
1604 | printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\ | ||
1605 | "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), | ||
1606 | MINOR(osb->sb->s_dev)); | ||
1604 | done: | 1607 | done: |
1605 | /* drop the lock on this nodes journal */ | 1608 | /* drop the lock on this nodes journal */ |
1606 | if (got_lock) | 1609 | if (got_lock) |
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void) | |||
1808 | * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This | 1811 | * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This |
1809 | * is done to catch any orphans that are left over in orphan directories. | 1812 | * is done to catch any orphans that are left over in orphan directories. |
1810 | * | 1813 | * |
1814 | * It scans all slots, even ones that are in use. It does so to handle the | ||
1815 | * case described below: | ||
1816 | * | ||
1817 | * Node 1 has an inode it was using. The dentry went away due to memory | ||
1818 | * pressure. Node 1 closes the inode, but it's on the free list. The node | ||
1819 | * has the open lock. | ||
1820 | * Node 2 unlinks the inode. It grabs the dentry lock to notify others, | ||
1821 | * but node 1 has no dentry and doesn't get the message. It trylocks the | ||
1822 | * open lock, sees that another node has a PR, and does nothing. | ||
1823 | * Later node 2 runs its orphan dir. It igets the inode, trylocks the | ||
1824 | * open lock, sees the PR still, and does nothing. | ||
1825 | * Basically, we have to trigger an orphan iput on node 1. The only way | ||
1826 | * for this to happen is if node 1 runs node 2's orphan dir. | ||
1827 | * | ||
1811 | * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT | 1828 | * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT |
1812 | * seconds. It gets an EX lock on os_lockres and checks sequence number | 1829 | * seconds. It gets an EX lock on os_lockres and checks sequence number |
1813 | * stored in LVB. If the sequence number has changed, it means some other | 1830 | * stored in LVB. If the sequence number has changed, it means some other |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 68cf2f6d3c6a..a3385b63ff5e 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir, | |||
441 | #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) | 441 | #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) |
442 | 442 | ||
443 | /* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota | 443 | /* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota |
444 | * update on dir + index leaf + dx root update for free list */ | 444 | * update on dir + index leaf + dx root update for free list + |
445 | * previous dirblock update in the free list */ | ||
445 | static inline int ocfs2_link_credits(struct super_block *sb) | 446 | static inline int ocfs2_link_credits(struct super_block *sb) |
446 | { | 447 | { |
447 | return 2*OCFS2_INODE_UPDATE_CREDITS + 3 + | 448 | return 2*OCFS2_INODE_UPDATE_CREDITS + 4 + |
448 | ocfs2_quota_trans_credits(sb); | 449 | ocfs2_quota_trans_credits(sb); |
449 | } | 450 | } |
450 | 451 | ||
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 3e9393ca39eb..9cd41083e991 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) | |||
61 | static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, | 61 | static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, |
62 | struct page *page) | 62 | struct page *page) |
63 | { | 63 | { |
64 | int ret; | 64 | int ret = VM_FAULT_NOPAGE; |
65 | struct inode *inode = file->f_path.dentry->d_inode; | 65 | struct inode *inode = file->f_path.dentry->d_inode; |
66 | struct address_space *mapping = inode->i_mapping; | 66 | struct address_space *mapping = inode->i_mapping; |
67 | loff_t pos = page_offset(page); | 67 | loff_t pos = page_offset(page); |
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, | |||
71 | void *fsdata; | 71 | void *fsdata; |
72 | loff_t size = i_size_read(inode); | 72 | loff_t size = i_size_read(inode); |
73 | 73 | ||
74 | /* | ||
75 | * Another node might have truncated while we were waiting on | ||
76 | * cluster locks. | ||
77 | * We don't check size == 0 before the shift. This is borrowed | ||
78 | * from do_generic_file_read. | ||
79 | */ | ||
80 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; | 74 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; |
81 | if (unlikely(!size || page->index > last_index)) { | ||
82 | ret = -EINVAL; | ||
83 | goto out; | ||
84 | } | ||
85 | 75 | ||
86 | /* | 76 | /* |
87 | * The i_size check above doesn't catch the case where nodes | 77 | * There are cases that lead to the page no longer bebongs to the |
88 | * truncated and then re-extended the file. We'll re-check the | 78 | * mapping. |
89 | * page mapping after taking the page lock inside of | 79 | * 1) pagecache truncates locally due to memory pressure. |
90 | * ocfs2_write_begin_nolock(). | 80 | * 2) pagecache truncates when another is taking EX lock against |
81 | * inode lock. see ocfs2_data_convert_worker. | ||
82 | * | ||
83 | * The i_size check doesn't catch the case where nodes truncated and | ||
84 | * then re-extended the file. We'll re-check the page mapping after | ||
85 | * taking the page lock inside of ocfs2_write_begin_nolock(). | ||
86 | * | ||
87 | * Let VM retry with these cases. | ||
91 | */ | 88 | */ |
92 | if (!PageUptodate(page) || page->mapping != inode->i_mapping) { | 89 | if ((page->mapping != inode->i_mapping) || |
93 | /* | 90 | (!PageUptodate(page)) || |
94 | * the page has been umapped in ocfs2_data_downconvert_worker. | 91 | (page_offset(page) >= size)) |
95 | * So return 0 here and let VFS retry. | ||
96 | */ | ||
97 | ret = 0; | ||
98 | goto out; | 92 | goto out; |
99 | } | ||
100 | 93 | ||
101 | /* | 94 | /* |
102 | * Call ocfs2_write_begin() and ocfs2_write_end() to take | 95 | * Call ocfs2_write_begin() and ocfs2_write_end() to take |
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, | |||
116 | if (ret) { | 109 | if (ret) { |
117 | if (ret != -ENOSPC) | 110 | if (ret != -ENOSPC) |
118 | mlog_errno(ret); | 111 | mlog_errno(ret); |
112 | if (ret == -ENOMEM) | ||
113 | ret = VM_FAULT_OOM; | ||
114 | else | ||
115 | ret = VM_FAULT_SIGBUS; | ||
119 | goto out; | 116 | goto out; |
120 | } | 117 | } |
121 | 118 | ||
122 | ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, | 119 | if (!locked_page) { |
123 | fsdata); | 120 | ret = VM_FAULT_NOPAGE; |
124 | if (ret < 0) { | ||
125 | mlog_errno(ret); | ||
126 | goto out; | 121 | goto out; |
127 | } | 122 | } |
123 | ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, | ||
124 | fsdata); | ||
128 | BUG_ON(ret != len); | 125 | BUG_ON(ret != len); |
129 | ret = 0; | 126 | ret = VM_FAULT_LOCKED; |
130 | out: | 127 | out: |
131 | return ret; | 128 | return ret; |
132 | } | 129 | } |
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
168 | 165 | ||
169 | out: | 166 | out: |
170 | ocfs2_unblock_signals(&oldset); | 167 | ocfs2_unblock_signals(&oldset); |
171 | if (ret) | ||
172 | ret = VM_FAULT_SIGBUS; | ||
173 | return ret; | 168 | return ret; |
174 | } | 169 | } |
175 | 170 | ||
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index d53cb706f14c..184c76b8c293 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c | |||
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, | |||
745 | */ | 745 | */ |
746 | ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, | 746 | ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, |
747 | new_phys_cpos); | 747 | new_phys_cpos); |
748 | if (!new_phys_cpos) { | 748 | if (!*new_phys_cpos) { |
749 | ret = -ENOSPC; | 749 | ret = -ENOSPC; |
750 | goto out_commit; | 750 | goto out_commit; |
751 | } | 751 | } |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 409285854f64..d355e6e36b36 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb, | |||
836 | 836 | ||
837 | static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) | 837 | static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) |
838 | { | 838 | { |
839 | __test_and_set_bit_le(bit, bitmap); | 839 | __set_bit_le(bit, bitmap); |
840 | } | 840 | } |
841 | #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) | 841 | #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) |
842 | 842 | ||
843 | static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) | 843 | static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) |
844 | { | 844 | { |
845 | __test_and_clear_bit_le(bit, bitmap); | 845 | __clear_bit_le(bit, bitmap); |
846 | } | 846 | } |
847 | #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) | 847 | #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) |
848 | 848 | ||
849 | #define ocfs2_test_bit test_bit_le | 849 | #define ocfs2_test_bit test_bit_le |
850 | #define ocfs2_find_next_zero_bit find_next_zero_bit_le | 850 | #define ocfs2_find_next_zero_bit find_next_zero_bit_le |
851 | #define ocfs2_find_next_bit find_next_bit_le | 851 | #define ocfs2_find_next_bit find_next_bit_le |
852 | |||
853 | static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr) | ||
854 | { | ||
855 | #if BITS_PER_LONG == 64 | ||
856 | *bit += ((unsigned long) addr & 7UL) << 3; | ||
857 | addr = (void *) ((unsigned long) addr & ~7UL); | ||
858 | #elif BITS_PER_LONG == 32 | ||
859 | *bit += ((unsigned long) addr & 3UL) << 3; | ||
860 | addr = (void *) ((unsigned long) addr & ~3UL); | ||
861 | #else | ||
862 | #error "how many bits you are?!" | ||
863 | #endif | ||
864 | return addr; | ||
865 | } | ||
866 | |||
867 | static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap) | ||
868 | { | ||
869 | bitmap = correct_addr_and_bit_unaligned(&bit, bitmap); | ||
870 | ocfs2_set_bit(bit, bitmap); | ||
871 | } | ||
872 | |||
873 | static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap) | ||
874 | { | ||
875 | bitmap = correct_addr_and_bit_unaligned(&bit, bitmap); | ||
876 | ocfs2_clear_bit(bit, bitmap); | ||
877 | } | ||
878 | |||
879 | static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap) | ||
880 | { | ||
881 | bitmap = correct_addr_and_bit_unaligned(&bit, bitmap); | ||
882 | return ocfs2_test_bit(bit, bitmap); | ||
883 | } | ||
884 | |||
885 | static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max, | ||
886 | int start) | ||
887 | { | ||
888 | int fix = 0, ret, tmpmax; | ||
889 | bitmap = correct_addr_and_bit_unaligned(&fix, bitmap); | ||
890 | tmpmax = max + fix; | ||
891 | start += fix; | ||
892 | |||
893 | ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix; | ||
894 | if (ret > max) | ||
895 | return max; | ||
896 | return ret; | ||
897 | } | ||
898 | |||
852 | #endif /* OCFS2_H */ | 899 | #endif /* OCFS2_H */ |
853 | 900 | ||
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index dc8007fc9247..f100bf70a906 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( | |||
404 | int status = 0; | 404 | int status = 0; |
405 | struct ocfs2_quota_recovery *rec; | 405 | struct ocfs2_quota_recovery *rec; |
406 | 406 | ||
407 | mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num); | 407 | printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for " |
408 | "slot %u\n", osb->dev_str, slot_num); | ||
409 | |||
408 | rec = ocfs2_alloc_quota_recovery(); | 410 | rec = ocfs2_alloc_quota_recovery(); |
409 | if (!rec) | 411 | if (!rec) |
410 | return ERR_PTR(-ENOMEM); | 412 | return ERR_PTR(-ENOMEM); |
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, | |||
549 | goto out_commit; | 551 | goto out_commit; |
550 | } | 552 | } |
551 | lock_buffer(qbh); | 553 | lock_buffer(qbh); |
552 | WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap)); | 554 | WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap)); |
553 | ocfs2_clear_bit(bit, dchunk->dqc_bitmap); | 555 | ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap); |
554 | le32_add_cpu(&dchunk->dqc_free, 1); | 556 | le32_add_cpu(&dchunk->dqc_free, 1); |
555 | unlock_buffer(qbh); | 557 | unlock_buffer(qbh); |
556 | ocfs2_journal_dirty(handle, qbh); | 558 | ocfs2_journal_dirty(handle, qbh); |
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, | |||
596 | struct inode *lqinode; | 598 | struct inode *lqinode; |
597 | unsigned int flags; | 599 | unsigned int flags; |
598 | 600 | ||
599 | mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num); | 601 | printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " |
602 | "slot %u\n", osb->dev_str, slot_num); | ||
603 | |||
600 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | 604 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
601 | for (type = 0; type < MAXQUOTAS; type++) { | 605 | for (type = 0; type < MAXQUOTAS; type++) { |
602 | if (list_empty(&(rec->r_list[type]))) | 606 | if (list_empty(&(rec->r_list[type]))) |
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, | |||
612 | /* Someone else is holding the lock? Then he must be | 616 | /* Someone else is holding the lock? Then he must be |
613 | * doing the recovery. Just skip the file... */ | 617 | * doing the recovery. Just skip the file... */ |
614 | if (status == -EAGAIN) { | 618 | if (status == -EAGAIN) { |
615 | mlog(ML_NOTICE, "skipping quota recovery for slot %d " | 619 | printk(KERN_NOTICE "ocfs2: Skipping quota recovery on " |
616 | "because quota file is locked.\n", slot_num); | 620 | "device (%s) for slot %d because quota file is " |
621 | "locked.\n", osb->dev_str, slot_num); | ||
617 | status = 0; | 622 | status = 0; |
618 | goto out_put; | 623 | goto out_put; |
619 | } else if (status < 0) { | 624 | } else if (status < 0) { |
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb, | |||
944 | * ol_quota_entries_per_block(sb); | 949 | * ol_quota_entries_per_block(sb); |
945 | } | 950 | } |
946 | 951 | ||
947 | found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0); | 952 | found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0); |
948 | /* We failed? */ | 953 | /* We failed? */ |
949 | if (found == len) { | 954 | if (found == len) { |
950 | mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" | 955 | mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" |
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private) | |||
1208 | struct ocfs2_local_disk_chunk *dchunk; | 1213 | struct ocfs2_local_disk_chunk *dchunk; |
1209 | 1214 | ||
1210 | dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; | 1215 | dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; |
1211 | ocfs2_set_bit(*offset, dchunk->dqc_bitmap); | 1216 | ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap); |
1212 | le32_add_cpu(&dchunk->dqc_free, -1); | 1217 | le32_add_cpu(&dchunk->dqc_free, -1); |
1213 | } | 1218 | } |
1214 | 1219 | ||
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) | |||
1289 | (od->dq_chunk->qc_headerbh->b_data); | 1294 | (od->dq_chunk->qc_headerbh->b_data); |
1290 | /* Mark structure as freed */ | 1295 | /* Mark structure as freed */ |
1291 | lock_buffer(od->dq_chunk->qc_headerbh); | 1296 | lock_buffer(od->dq_chunk->qc_headerbh); |
1292 | ocfs2_clear_bit(offset, dchunk->dqc_bitmap); | 1297 | ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap); |
1293 | le32_add_cpu(&dchunk->dqc_free, 1); | 1298 | le32_add_cpu(&dchunk->dqc_free, 1); |
1294 | unlock_buffer(od->dq_chunk->qc_headerbh); | 1299 | unlock_buffer(od->dq_chunk->qc_headerbh); |
1295 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); | 1300 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); |
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 26fc0014d509..1424c151cccc 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c | |||
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb) | |||
493 | goto bail; | 493 | goto bail; |
494 | } | 494 | } |
495 | } else | 495 | } else |
496 | mlog(ML_NOTICE, "slot %d is already allocated to this node!\n", | 496 | printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already " |
497 | slot); | 497 | "allocated to this node!\n", slot, osb->dev_str); |
498 | 498 | ||
499 | ocfs2_set_slot(si, slot, osb->node_num); | 499 | ocfs2_set_slot(si, slot, osb->node_num); |
500 | osb->slot_num = slot; | 500 | osb->slot_num = slot; |
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 19965b00c43c..94368017edb3 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "cluster/masklog.h" | 28 | #include "cluster/masklog.h" |
29 | #include "cluster/nodemanager.h" | 29 | #include "cluster/nodemanager.h" |
30 | #include "cluster/heartbeat.h" | 30 | #include "cluster/heartbeat.h" |
31 | #include "cluster/tcp.h" | ||
31 | 32 | ||
32 | #include "stackglue.h" | 33 | #include "stackglue.h" |
33 | 34 | ||
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb) | |||
256 | } | 257 | } |
257 | 258 | ||
258 | /* | 259 | /* |
260 | * Check if this node is heartbeating and is connected to all other | ||
261 | * heartbeating nodes. | ||
262 | */ | ||
263 | static int o2cb_cluster_check(void) | ||
264 | { | ||
265 | u8 node_num; | ||
266 | int i; | ||
267 | unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
268 | unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
269 | |||
270 | node_num = o2nm_this_node(); | ||
271 | if (node_num == O2NM_MAX_NODES) { | ||
272 | printk(KERN_ERR "o2cb: This node has not been configured.\n"); | ||
273 | return -EINVAL; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * o2dlm expects o2net sockets to be created. If not, then | ||
278 | * dlm_join_domain() fails with a stack of errors which are both cryptic | ||
279 | * and incomplete. The idea here is to detect upfront whether we have | ||
280 | * managed to connect to all nodes or not. If not, then list the nodes | ||
281 | * to allow the user to check the configuration (incorrect IP, firewall, | ||
282 | * etc.) Yes, this is racy. But its not the end of the world. | ||
283 | */ | ||
284 | #define O2CB_MAP_STABILIZE_COUNT 60 | ||
285 | for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) { | ||
286 | o2hb_fill_node_map(hbmap, sizeof(hbmap)); | ||
287 | if (!test_bit(node_num, hbmap)) { | ||
288 | printk(KERN_ERR "o2cb: %s heartbeat has not been " | ||
289 | "started.\n", (o2hb_global_heartbeat_active() ? | ||
290 | "Global" : "Local")); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | o2net_fill_node_map(netmap, sizeof(netmap)); | ||
294 | /* Force set the current node to allow easy compare */ | ||
295 | set_bit(node_num, netmap); | ||
296 | if (!memcmp(hbmap, netmap, sizeof(hbmap))) | ||
297 | return 0; | ||
298 | if (i < O2CB_MAP_STABILIZE_COUNT) | ||
299 | msleep(1000); | ||
300 | } | ||
301 | |||
302 | printk(KERN_ERR "o2cb: This node could not connect to nodes:"); | ||
303 | i = -1; | ||
304 | while ((i = find_next_bit(hbmap, O2NM_MAX_NODES, | ||
305 | i + 1)) < O2NM_MAX_NODES) { | ||
306 | if (!test_bit(i, netmap)) | ||
307 | printk(" %u", i); | ||
308 | } | ||
309 | printk(".\n"); | ||
310 | |||
311 | return -ENOTCONN; | ||
312 | } | ||
313 | |||
314 | /* | ||
259 | * Called from the dlm when it's about to evict a node. This is how the | 315 | * Called from the dlm when it's about to evict a node. This is how the |
260 | * classic stack signals node death. | 316 | * classic stack signals node death. |
261 | */ | 317 | */ |
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data) | |||
263 | { | 319 | { |
264 | struct ocfs2_cluster_connection *conn = data; | 320 | struct ocfs2_cluster_connection *conn = data; |
265 | 321 | ||
266 | mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n", | 322 | printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n", |
267 | node_num, conn->cc_namelen, conn->cc_name); | 323 | node_num, conn->cc_namelen, conn->cc_name); |
268 | 324 | ||
269 | conn->cc_recovery_handler(node_num, conn->cc_recovery_data); | 325 | conn->cc_recovery_handler(node_num, conn->cc_recovery_data); |
270 | } | 326 | } |
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) | |||
280 | BUG_ON(conn == NULL); | 336 | BUG_ON(conn == NULL); |
281 | BUG_ON(conn->cc_proto == NULL); | 337 | BUG_ON(conn->cc_proto == NULL); |
282 | 338 | ||
283 | /* for now we only have one cluster/node, make sure we see it | 339 | /* Ensure cluster stack is up and all nodes are connected */ |
284 | * in the heartbeat universe */ | 340 | rc = o2cb_cluster_check(); |
285 | if (!o2hb_check_local_node_heartbeating()) { | 341 | if (rc) { |
286 | if (o2hb_global_heartbeat_active()) | 342 | printk(KERN_ERR "o2cb: Cluster check failed. Fix errors " |
287 | mlog(ML_ERROR, "Global heartbeat not started\n"); | 343 | "before retrying.\n"); |
288 | rc = -EINVAL; | ||
289 | goto out; | 344 | goto out; |
290 | } | 345 | } |
291 | 346 | ||
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 56f61027236b..4994f8b0e604 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include "ocfs1_fs_compat.h" | 54 | #include "ocfs1_fs_compat.h" |
55 | 55 | ||
56 | #include "alloc.h" | 56 | #include "alloc.h" |
57 | #include "aops.h" | ||
57 | #include "blockcheck.h" | 58 | #include "blockcheck.h" |
58 | #include "dlmglue.h" | 59 | #include "dlmglue.h" |
59 | #include "export.h" | 60 | #include "export.h" |
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1107 | 1108 | ||
1108 | ocfs2_set_ro_flag(osb, 1); | 1109 | ocfs2_set_ro_flag(osb, 1); |
1109 | 1110 | ||
1110 | printk(KERN_NOTICE "Readonly device detected. No cluster " | 1111 | printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. " |
1111 | "services will be utilized for this mount. Recovery " | 1112 | "Cluster services will not be used for this mount. " |
1112 | "will be skipped.\n"); | 1113 | "Recovery will be skipped.\n", osb->dev_str); |
1113 | } | 1114 | } |
1114 | 1115 | ||
1115 | if (!ocfs2_is_hard_readonly(osb)) { | 1116 | if (!ocfs2_is_hard_readonly(osb)) { |
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1616 | return 0; | 1617 | return 0; |
1617 | } | 1618 | } |
1618 | 1619 | ||
1620 | wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; | ||
1621 | |||
1619 | static int __init ocfs2_init(void) | 1622 | static int __init ocfs2_init(void) |
1620 | { | 1623 | { |
1621 | int status; | 1624 | int status, i; |
1622 | 1625 | ||
1623 | ocfs2_print_version(); | 1626 | ocfs2_print_version(); |
1624 | 1627 | ||
1628 | for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++) | ||
1629 | init_waitqueue_head(&ocfs2__ioend_wq[i]); | ||
1630 | |||
1625 | status = init_ocfs2_uptodate_cache(); | 1631 | status = init_ocfs2_uptodate_cache(); |
1626 | if (status < 0) { | 1632 | if (status < 0) { |
1627 | mlog_errno(status); | 1633 | mlog_errno(status); |
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data) | |||
1760 | ocfs2_extent_map_init(&oi->vfs_inode); | 1766 | ocfs2_extent_map_init(&oi->vfs_inode); |
1761 | INIT_LIST_HEAD(&oi->ip_io_markers); | 1767 | INIT_LIST_HEAD(&oi->ip_io_markers); |
1762 | oi->ip_dir_start_lookup = 0; | 1768 | oi->ip_dir_start_lookup = 0; |
1763 | 1769 | atomic_set(&oi->ip_unaligned_aio, 0); | |
1764 | init_rwsem(&oi->ip_alloc_sem); | 1770 | init_rwsem(&oi->ip_alloc_sem); |
1765 | init_rwsem(&oi->ip_xattr_sem); | 1771 | init_rwsem(&oi->ip_xattr_sem); |
1766 | mutex_init(&oi->ip_io_mutex); | 1772 | mutex_init(&oi->ip_io_mutex); |
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) | |||
1974 | * If we failed before we got a uuid_str yet, we can't stop | 1980 | * If we failed before we got a uuid_str yet, we can't stop |
1975 | * heartbeat. Otherwise, do it. | 1981 | * heartbeat. Otherwise, do it. |
1976 | */ | 1982 | */ |
1977 | if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str) | 1983 | if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str && |
1984 | !ocfs2_is_hard_readonly(osb)) | ||
1978 | hangup_needed = 1; | 1985 | hangup_needed = 1; |
1979 | 1986 | ||
1980 | if (osb->cconn) | 1987 | if (osb->cconn) |
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2353 | mlog_errno(status); | 2360 | mlog_errno(status); |
2354 | goto bail; | 2361 | goto bail; |
2355 | } | 2362 | } |
2356 | cleancache_init_shared_fs((char *)&uuid_net_key, sb); | 2363 | cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb); |
2357 | 2364 | ||
2358 | bail: | 2365 | bail: |
2359 | return status; | 2366 | return status; |
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb) | |||
2462 | goto finally; | 2469 | goto finally; |
2463 | } | 2470 | } |
2464 | } else { | 2471 | } else { |
2465 | mlog(ML_NOTICE, "File system was not unmounted cleanly, " | 2472 | printk(KERN_NOTICE "ocfs2: File system on device (%s) was not " |
2466 | "recovering volume.\n"); | 2473 | "unmounted cleanly, recovering it.\n", osb->dev_str); |
2467 | } | 2474 | } |
2468 | 2475 | ||
2469 | local = ocfs2_mount_local(osb); | 2476 | local = ocfs2_mount_local(osb); |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 194fb22ef79d..aa9e8777b09a 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode, | |||
2376 | } | 2376 | } |
2377 | 2377 | ||
2378 | ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); | 2378 | ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); |
2379 | if (ret < 0) { | ||
2380 | mlog_errno(ret); | ||
2381 | break; | ||
2382 | } | ||
2383 | 2379 | ||
2384 | ocfs2_commit_trans(osb, ctxt.handle); | 2380 | ocfs2_commit_trans(osb, ctxt.handle); |
2385 | if (ctxt.meta_ac) { | 2381 | if (ctxt.meta_ac) { |
2386 | ocfs2_free_alloc_context(ctxt.meta_ac); | 2382 | ocfs2_free_alloc_context(ctxt.meta_ac); |
2387 | ctxt.meta_ac = NULL; | 2383 | ctxt.meta_ac = NULL; |
2388 | } | 2384 | } |
2385 | |||
2386 | if (ret < 0) { | ||
2387 | mlog_errno(ret); | ||
2388 | break; | ||
2389 | } | ||
2390 | |||
2389 | } | 2391 | } |
2390 | 2392 | ||
2391 | if (ctxt.meta_ac) | 2393 | if (ctxt.meta_ac) |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 2bd620f0d796..57bbf9078ac8 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi) | |||
167 | } | 167 | } |
168 | 168 | ||
169 | psinfo = psi; | 169 | psinfo = psi; |
170 | mutex_init(&psinfo->read_mutex); | ||
170 | spin_unlock(&pstore_lock); | 171 | spin_unlock(&pstore_lock); |
171 | 172 | ||
172 | if (owner && !try_module_get(owner)) { | 173 | if (owner && !try_module_get(owner)) { |
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register); | |||
195 | void pstore_get_records(int quiet) | 196 | void pstore_get_records(int quiet) |
196 | { | 197 | { |
197 | struct pstore_info *psi = psinfo; | 198 | struct pstore_info *psi = psinfo; |
199 | char *buf = NULL; | ||
198 | ssize_t size; | 200 | ssize_t size; |
199 | u64 id; | 201 | u64 id; |
200 | enum pstore_type_id type; | 202 | enum pstore_type_id type; |
201 | struct timespec time; | 203 | struct timespec time; |
202 | int failed = 0, rc; | 204 | int failed = 0, rc; |
203 | unsigned long flags; | ||
204 | 205 | ||
205 | if (!psi) | 206 | if (!psi) |
206 | return; | 207 | return; |
207 | 208 | ||
208 | spin_lock_irqsave(&psinfo->buf_lock, flags); | 209 | mutex_lock(&psi->read_mutex); |
209 | rc = psi->open(psi); | 210 | rc = psi->open(psi); |
210 | if (rc) | 211 | if (rc) |
211 | goto out; | 212 | goto out; |
212 | 213 | ||
213 | while ((size = psi->read(&id, &type, &time, psi)) > 0) { | 214 | while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) { |
214 | rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size, | 215 | rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size, |
215 | time, psi); | 216 | time, psi); |
217 | kfree(buf); | ||
218 | buf = NULL; | ||
216 | if (rc && (rc != -EEXIST || !quiet)) | 219 | if (rc && (rc != -EEXIST || !quiet)) |
217 | failed++; | 220 | failed++; |
218 | } | 221 | } |
219 | psi->close(psi); | 222 | psi->close(psi); |
220 | out: | 223 | out: |
221 | spin_unlock_irqrestore(&psinfo->buf_lock, flags); | 224 | mutex_unlock(&psi->read_mutex); |
222 | 225 | ||
223 | if (failed) | 226 | if (failed) |
224 | printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n", | 227 | printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n", |
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index d30bedfeb7ef..ddd46db65b57 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h | |||
@@ -235,6 +235,8 @@ struct drm_mode_fb_cmd { | |||
235 | #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 | 235 | #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
236 | #define DRM_MODE_FB_DIRTY_FLAGS 0x03 | 236 | #define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
237 | 237 | ||
238 | #define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 | ||
239 | |||
238 | /* | 240 | /* |
239 | * Mark a region of a framebuffer as dirty. | 241 | * Mark a region of a framebuffer as dirty. |
240 | * | 242 | * |
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index 1d161cb3aca5..12050434d57a 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h | |||
@@ -32,17 +32,16 @@ | |||
32 | /** | 32 | /** |
33 | * User-desired buffer creation information structure. | 33 | * User-desired buffer creation information structure. |
34 | * | 34 | * |
35 | * @size: requested size for the object. | 35 | * @size: user-desired memory allocation size. |
36 | * - this size value would be page-aligned internally. | 36 | * - this size value would be page-aligned internally. |
37 | * @flags: user request for setting memory type or cache attributes. | 37 | * @flags: user request for setting memory type or cache attributes. |
38 | * @handle: returned handle for the object. | 38 | * @handle: returned a handle to created gem object. |
39 | * @pad: just padding to be 64-bit aligned. | 39 | * - this handle will be set by gem module of kernel side. |
40 | */ | 40 | */ |
41 | struct drm_exynos_gem_create { | 41 | struct drm_exynos_gem_create { |
42 | unsigned int size; | 42 | uint64_t size; |
43 | unsigned int flags; | 43 | unsigned int flags; |
44 | unsigned int handle; | 44 | unsigned int handle; |
45 | unsigned int pad; | ||
46 | }; | 45 | }; |
47 | 46 | ||
48 | /** | 47 | /** |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index b65be6054a18..be94be6d6f17 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite { | |||
874 | 874 | ||
875 | #define RADEON_CHUNK_ID_RELOCS 0x01 | 875 | #define RADEON_CHUNK_ID_RELOCS 0x01 |
876 | #define RADEON_CHUNK_ID_IB 0x02 | 876 | #define RADEON_CHUNK_ID_IB 0x02 |
877 | #define RADEON_CHUNK_ID_FLAGS 0x03 | ||
878 | |||
879 | /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ | ||
880 | #define RADEON_CS_KEEP_TILING_FLAGS 0x01 | ||
877 | 881 | ||
878 | struct drm_radeon_cs_chunk { | 882 | struct drm_radeon_cs_chunk { |
879 | uint32_t chunk_id; | 883 | uint32_t chunk_id; |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index f88eacb111d4..7c05ac202d90 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
@@ -10,6 +10,12 @@ | |||
10 | #include "osdmap.h" | 10 | #include "osdmap.h" |
11 | #include "messenger.h" | 11 | #include "messenger.h" |
12 | 12 | ||
13 | /* | ||
14 | * Maximum object name size | ||
15 | * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100) | ||
16 | */ | ||
17 | #define MAX_OBJ_NAME_SIZE 100 | ||
18 | |||
13 | struct ceph_msg; | 19 | struct ceph_msg; |
14 | struct ceph_snap_context; | 20 | struct ceph_snap_context; |
15 | struct ceph_osd_request; | 21 | struct ceph_osd_request; |
@@ -75,7 +81,7 @@ struct ceph_osd_request { | |||
75 | struct inode *r_inode; /* for use by callbacks */ | 81 | struct inode *r_inode; /* for use by callbacks */ |
76 | void *r_priv; /* ditto */ | 82 | void *r_priv; /* ditto */ |
77 | 83 | ||
78 | char r_oid[40]; /* object name */ | 84 | char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */ |
79 | int r_oid_len; | 85 | int r_oid_len; |
80 | unsigned long r_stamp; /* send OR check time */ | 86 | unsigned long r_stamp; /* send OR check time */ |
81 | 87 | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 139c4db55f17..c86c940d1de3 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
156 | * @mult: cycle to nanosecond multiplier | 156 | * @mult: cycle to nanosecond multiplier |
157 | * @shift: cycle to nanosecond divisor (power of two) | 157 | * @shift: cycle to nanosecond divisor (power of two) |
158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) | 158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) |
159 | * @maxadj maximum adjustment value to mult (~11%) | ||
159 | * @flags: flags describing special properties | 160 | * @flags: flags describing special properties |
160 | * @archdata: arch-specific data | 161 | * @archdata: arch-specific data |
161 | * @suspend: suspend function for the clocksource, if necessary | 162 | * @suspend: suspend function for the clocksource, if necessary |
@@ -172,7 +173,7 @@ struct clocksource { | |||
172 | u32 mult; | 173 | u32 mult; |
173 | u32 shift; | 174 | u32 shift; |
174 | u64 max_idle_ns; | 175 | u64 max_idle_ns; |
175 | 176 | u32 maxadj; | |
176 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA | 177 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
177 | struct arch_clocksource_data archdata; | 178 | struct arch_clocksource_data archdata; |
178 | #endif | 179 | #endif |
diff --git a/include/linux/device.h b/include/linux/device.h index 52b3a4111df9..3136ede5a1e1 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -69,7 +69,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | |||
69 | * @resume: Called to bring a device on this bus out of sleep mode. | 69 | * @resume: Called to bring a device on this bus out of sleep mode. |
70 | * @pm: Power management operations of this bus, callback the specific | 70 | * @pm: Power management operations of this bus, callback the specific |
71 | * device driver's pm-ops. | 71 | * device driver's pm-ops. |
72 | * @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU | 72 | * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU |
73 | * driver implementations to a bus and allow the driver to do | 73 | * driver implementations to a bus and allow the driver to do |
74 | * bus-specific setup | 74 | * bus-specific setup |
75 | * @p: The private data of the driver core, only the driver core can | 75 | * @p: The private data of the driver core, only the driver core can |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a81bf6d23b3e..07d103a06d64 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -432,9 +432,6 @@ void i2c_unlock_adapter(struct i2c_adapter *); | |||
432 | /* Internal numbers to terminate lists */ | 432 | /* Internal numbers to terminate lists */ |
433 | #define I2C_CLIENT_END 0xfffeU | 433 | #define I2C_CLIENT_END 0xfffeU |
434 | 434 | ||
435 | /* The numbers to use to set I2C bus address */ | ||
436 | #define ANY_I2C_BUS 0xffff | ||
437 | |||
438 | /* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ | 435 | /* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ |
439 | #define I2C_ADDRS(addr, addrs...) \ | 436 | #define I2C_ADDRS(addr, addrs...) \ |
440 | ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) | 437 | ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 08ffab01e76c..94b1e356c02a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -184,7 +184,6 @@ extern struct cred init_cred; | |||
184 | [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ | 184 | [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ |
185 | }, \ | 185 | }, \ |
186 | .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ | 186 | .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ |
187 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ | ||
188 | INIT_IDS \ | 187 | INIT_IDS \ |
189 | INIT_PERF_EVENTS(tsk) \ | 188 | INIT_PERF_EVENTS(tsk) \ |
190 | INIT_TRACE_IRQFLAGS \ | 189 | INIT_TRACE_IRQFLAGS \ |
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 82b4c8801a4f..8bf2cb9502dd 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h | |||
@@ -243,7 +243,8 @@ | |||
243 | 243 | ||
244 | 244 | ||
245 | /*Registers VDD1, VDD2 voltage values definitions */ | 245 | /*Registers VDD1, VDD2 voltage values definitions */ |
246 | #define VDD1_2_NUM_VOLTS 73 | 246 | #define VDD1_2_NUM_VOLT_FINE 73 |
247 | #define VDD1_2_NUM_VOLT_COARSE 3 | ||
247 | #define VDD1_2_MIN_VOLT 6000 | 248 | #define VDD1_2_MIN_VOLT 6000 |
248 | #define VDD1_2_OFFSET 125 | 249 | #define VDD1_2_OFFSET 125 |
249 | 250 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 21440e31fdab..eef257c76a40 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2552,6 +2552,8 @@ extern void net_disable_timestamp(void); | |||
2552 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | 2552 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); |
2553 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | 2553 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); |
2554 | extern void dev_seq_stop(struct seq_file *seq, void *v); | 2554 | extern void dev_seq_stop(struct seq_file *seq, void *v); |
2555 | extern int dev_seq_open_ops(struct inode *inode, struct file *file, | ||
2556 | const struct seq_operations *ops); | ||
2555 | #endif | 2557 | #endif |
2556 | 2558 | ||
2557 | extern int netdev_class_create_file(struct class_attribute *class_attr); | 2559 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ab2c6343361a..92ecf5585fac 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations; | |||
410 | extern const struct inode_operations nfs3_file_inode_operations; | 410 | extern const struct inode_operations nfs3_file_inode_operations; |
411 | #endif /* CONFIG_NFS_V3 */ | 411 | #endif /* CONFIG_NFS_V3 */ |
412 | extern const struct file_operations nfs_file_operations; | 412 | extern const struct file_operations nfs_file_operations; |
413 | #ifdef CONFIG_NFS_V4 | ||
414 | extern const struct file_operations nfs4_file_operations; | ||
415 | #endif /* CONFIG_NFS_V4 */ | ||
413 | extern const struct address_space_operations nfs_file_aops; | 416 | extern const struct address_space_operations nfs_file_aops; |
414 | extern const struct address_space_operations nfs_dir_aops; | 417 | extern const struct address_space_operations nfs_dir_aops; |
415 | 418 | ||
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index c74595ba7094..2a7c533be5dd 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -1192,6 +1192,7 @@ struct nfs_rpc_ops { | |||
1192 | const struct dentry_operations *dentry_ops; | 1192 | const struct dentry_operations *dentry_ops; |
1193 | const struct inode_operations *dir_inode_ops; | 1193 | const struct inode_operations *dir_inode_ops; |
1194 | const struct inode_operations *file_inode_ops; | 1194 | const struct inode_operations *file_inode_ops; |
1195 | const struct file_operations *file_ops; | ||
1195 | 1196 | ||
1196 | int (*getroot) (struct nfs_server *, struct nfs_fh *, | 1197 | int (*getroot) (struct nfs_server *, struct nfs_fh *, |
1197 | struct nfs_fsinfo *); | 1198 | struct nfs_fsinfo *); |
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index e3d0b3890249..7ef68724f0f0 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h | |||
@@ -12,7 +12,7 @@ struct pci_ats { | |||
12 | unsigned int is_enabled:1; /* Enable bit is set */ | 12 | unsigned int is_enabled:1; /* Enable bit is set */ |
13 | }; | 13 | }; |
14 | 14 | ||
15 | #ifdef CONFIG_PCI_IOV | 15 | #ifdef CONFIG_PCI_ATS |
16 | 16 | ||
17 | extern int pci_enable_ats(struct pci_dev *dev, int ps); | 17 | extern int pci_enable_ats(struct pci_dev *dev, int ps); |
18 | extern void pci_disable_ats(struct pci_dev *dev); | 18 | extern void pci_disable_ats(struct pci_dev *dev); |
@@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
29 | return dev->ats && dev->ats->is_enabled; | 29 | return dev->ats && dev->ats->is_enabled; |
30 | } | 30 | } |
31 | 31 | ||
32 | #else /* CONFIG_PCI_IOV */ | 32 | #else /* CONFIG_PCI_ATS */ |
33 | 33 | ||
34 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) | 34 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) |
35 | { | 35 | { |
@@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | #endif /* CONFIG_PCI_IOV */ | 53 | #endif /* CONFIG_PCI_ATS */ |
54 | 54 | ||
55 | #ifdef CONFIG_PCI_PRI | 55 | #ifdef CONFIG_PCI_PRI |
56 | 56 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 337df0d5d5f7..7cda65b5f798 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -338,7 +338,7 @@ struct pci_dev { | |||
338 | struct list_head msi_list; | 338 | struct list_head msi_list; |
339 | #endif | 339 | #endif |
340 | struct pci_vpd *vpd; | 340 | struct pci_vpd *vpd; |
341 | #ifdef CONFIG_PCI_IOV | 341 | #ifdef CONFIG_PCI_ATS |
342 | union { | 342 | union { |
343 | struct pci_sriov *sriov; /* SR-IOV capability related */ | 343 | struct pci_sriov *sriov; /* SR-IOV capability related */ |
344 | struct pci_dev *physfn; /* the PF this VF is associated with */ | 344 | struct pci_dev *physfn; /* the PF this VF is associated with */ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 5c4c8b18c8b7..3f3ed83a9aa5 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -54,118 +54,145 @@ typedef struct pm_message { | |||
54 | /** | 54 | /** |
55 | * struct dev_pm_ops - device PM callbacks | 55 | * struct dev_pm_ops - device PM callbacks |
56 | * | 56 | * |
57 | * Several driver power state transitions are externally visible, affecting | 57 | * Several device power state transitions are externally visible, affecting |
58 | * the state of pending I/O queues and (for drivers that touch hardware) | 58 | * the state of pending I/O queues and (for drivers that touch hardware) |
59 | * interrupts, wakeups, DMA, and other hardware state. There may also be | 59 | * interrupts, wakeups, DMA, and other hardware state. There may also be |
60 | * internal transitions to various low power modes, which are transparent | 60 | * internal transitions to various low-power modes which are transparent |
61 | * to the rest of the driver stack (such as a driver that's ON gating off | 61 | * to the rest of the driver stack (such as a driver that's ON gating off |
62 | * clocks which are not in active use). | 62 | * clocks which are not in active use). |
63 | * | 63 | * |
64 | * The externally visible transitions are handled with the help of the following | 64 | * The externally visible transitions are handled with the help of callbacks |
65 | * callbacks included in this structure: | 65 | * included in this structure in such a way that two levels of callbacks are |
66 | * | 66 | * involved. First, the PM core executes callbacks provided by PM domains, |
67 | * @prepare: Prepare the device for the upcoming transition, but do NOT change | 67 | * device types, classes and bus types. They are the subsystem-level callbacks |
68 | * its hardware state. Prevent new children of the device from being | 68 | * supposed to execute callbacks provided by device drivers, although they may |
69 | * registered after @prepare() returns (the driver's subsystem and | 69 | * choose not to do that. If the driver callbacks are executed, they have to |
70 | * generally the rest of the kernel is supposed to prevent new calls to the | 70 | * collaborate with the subsystem-level callbacks to achieve the goals |
71 | * probe method from being made too once @prepare() has succeeded). If | 71 | * appropriate for the given system transition, given transition phase and the |
72 | * @prepare() detects a situation it cannot handle (e.g. registration of a | 72 | * subsystem the device belongs to. |
73 | * child already in progress), it may return -EAGAIN, so that the PM core | 73 | * |
74 | * can execute it once again (e.g. after the new child has been registered) | 74 | * @prepare: The principal role of this callback is to prevent new children of |
75 | * to recover from the race condition. This method is executed for all | 75 | * the device from being registered after it has returned (the driver's |
76 | * kinds of suspend transitions and is followed by one of the suspend | 76 | * subsystem and generally the rest of the kernel is supposed to prevent |
77 | * callbacks: @suspend(), @freeze(), or @poweroff(). | 77 | * new calls to the probe method from being made too once @prepare() has |
78 | * The PM core executes @prepare() for all devices before starting to | 78 | * succeeded). If @prepare() detects a situation it cannot handle (e.g. |
79 | * execute suspend callbacks for any of them, so drivers may assume all of | 79 | * registration of a child already in progress), it may return -EAGAIN, so |
80 | * the other devices to be present and functional while @prepare() is being | 80 | * that the PM core can execute it once again (e.g. after a new child has |
81 | * executed. In particular, it is safe to make GFP_KERNEL memory | 81 | * been registered) to recover from the race condition. |
82 | * allocations from within @prepare(). However, drivers may NOT assume | 82 | * This method is executed for all kinds of suspend transitions and is |
83 | * anything about the availability of the user space at that time and it | 83 | * followed by one of the suspend callbacks: @suspend(), @freeze(), or |
84 | * is not correct to request firmware from within @prepare() (it's too | 84 | * @poweroff(). The PM core executes subsystem-level @prepare() for all |
85 | * late to do that). [To work around this limitation, drivers may | 85 | * devices before starting to invoke suspend callbacks for any of them, so |
86 | * register suspend and hibernation notifiers that are executed before the | 86 | * generally devices may be assumed to be functional or to respond to |
87 | * freezing of tasks.] | 87 | * runtime resume requests while @prepare() is being executed. However, |
88 | * device drivers may NOT assume anything about the availability of user | ||
89 | * space at that time and it is NOT valid to request firmware from within | ||
90 | * @prepare() (it's too late to do that). It also is NOT valid to allocate | ||
91 | * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. | ||
92 | * [To work around these limitations, drivers may register suspend and | ||
93 | * hibernation notifiers to be executed before the freezing of tasks.] | ||
88 | * | 94 | * |
89 | * @complete: Undo the changes made by @prepare(). This method is executed for | 95 | * @complete: Undo the changes made by @prepare(). This method is executed for |
90 | * all kinds of resume transitions, following one of the resume callbacks: | 96 | * all kinds of resume transitions, following one of the resume callbacks: |
91 | * @resume(), @thaw(), @restore(). Also called if the state transition | 97 | * @resume(), @thaw(), @restore(). Also called if the state transition |
92 | * fails before the driver's suspend callback (@suspend(), @freeze(), | 98 | * fails before the driver's suspend callback: @suspend(), @freeze() or |
93 | * @poweroff()) can be executed (e.g. if the suspend callback fails for one | 99 | * @poweroff(), can be executed (e.g. if the suspend callback fails for one |
94 | * of the other devices that the PM core has unsuccessfully attempted to | 100 | * of the other devices that the PM core has unsuccessfully attempted to |
95 | * suspend earlier). | 101 | * suspend earlier). |
96 | * The PM core executes @complete() after it has executed the appropriate | 102 | * The PM core executes subsystem-level @complete() after it has executed |
97 | * resume callback for all devices. | 103 | * the appropriate resume callbacks for all devices. |
98 | * | 104 | * |
99 | * @suspend: Executed before putting the system into a sleep state in which the | 105 | * @suspend: Executed before putting the system into a sleep state in which the |
100 | * contents of main memory are preserved. Quiesce the device, put it into | 106 | * contents of main memory are preserved. The exact action to perform |
101 | * a low power state appropriate for the upcoming system state (such as | 107 | * depends on the device's subsystem (PM domain, device type, class or bus |
102 | * PCI_D3hot), and enable wakeup events as appropriate. | 108 | * type), but generally the device must be quiescent after subsystem-level |
109 | * @suspend() has returned, so that it doesn't do any I/O or DMA. | ||
110 | * Subsystem-level @suspend() is executed for all devices after invoking | ||
111 | * subsystem-level @prepare() for all of them. | ||
103 | * | 112 | * |
104 | * @resume: Executed after waking the system up from a sleep state in which the | 113 | * @resume: Executed after waking the system up from a sleep state in which the |
105 | * contents of main memory were preserved. Put the device into the | 114 | * contents of main memory were preserved. The exact action to perform |
106 | * appropriate state, according to the information saved in memory by the | 115 | * depends on the device's subsystem, but generally the driver is expected |
107 | * preceding @suspend(). The driver starts working again, responding to | 116 | * to start working again, responding to hardware events and software |
108 | * hardware events and software requests. The hardware may have gone | 117 | * requests (the device itself may be left in a low-power state, waiting |
109 | * through a power-off reset, or it may have maintained state from the | 118 | * for a runtime resume to occur). The state of the device at the time its |
110 | * previous suspend() which the driver may rely on while resuming. On most | 119 | * driver's @resume() callback is run depends on the platform and subsystem |
111 | * platforms, there are no restrictions on availability of resources like | 120 | * the device belongs to. On most platforms, there are no restrictions on |
112 | * clocks during @resume(). | 121 | * availability of resources like clocks during @resume(). |
122 | * Subsystem-level @resume() is executed for all devices after invoking | ||
123 | * subsystem-level @resume_noirq() for all of them. | ||
113 | * | 124 | * |
114 | * @freeze: Hibernation-specific, executed before creating a hibernation image. | 125 | * @freeze: Hibernation-specific, executed before creating a hibernation image. |
115 | * Quiesce operations so that a consistent image can be created, but do NOT | 126 | * Analogous to @suspend(), but it should not enable the device to signal |
116 | * otherwise put the device into a low power device state and do NOT emit | 127 | * wakeup events or change its power state. The majority of subsystems |
117 | * system wakeup events. Save in main memory the device settings to be | 128 | * (with the notable exception of the PCI bus type) expect the driver-level |
118 | * used by @restore() during the subsequent resume from hibernation or by | 129 | * @freeze() to save the device settings in memory to be used by @restore() |
119 | * the subsequent @thaw(), if the creation of the image or the restoration | 130 | * during the subsequent resume from hibernation. |
120 | * of main memory contents from it fails. | 131 | * Subsystem-level @freeze() is executed for all devices after invoking |
132 | * subsystem-level @prepare() for all of them. | ||
121 | * | 133 | * |
122 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR | 134 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR |
123 | * if the creation of the image fails. Also executed after a failing | 135 | * if the creation of an image has failed. Also executed after a failing |
124 | * attempt to restore the contents of main memory from such an image. | 136 | * attempt to restore the contents of main memory from such an image. |
125 | * Undo the changes made by the preceding @freeze(), so the device can be | 137 | * Undo the changes made by the preceding @freeze(), so the device can be |
126 | * operated in the same way as immediately before the call to @freeze(). | 138 | * operated in the same way as immediately before the call to @freeze(). |
139 | * Subsystem-level @thaw() is executed for all devices after invoking | ||
140 | * subsystem-level @thaw_noirq() for all of them. It also may be executed | ||
141 | * directly after @freeze() in case of a transition error. | ||
127 | * | 142 | * |
128 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. | 143 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. |
129 | * Quiesce the device, put it into a low power state appropriate for the | 144 | * Analogous to @suspend(), but it need not save the device's settings in |
130 | * upcoming system state (such as PCI_D3hot), and enable wakeup events as | 145 | * memory. |
131 | * appropriate. | 146 | * Subsystem-level @poweroff() is executed for all devices after invoking |
147 | * subsystem-level @prepare() for all of them. | ||
132 | * | 148 | * |
133 | * @restore: Hibernation-specific, executed after restoring the contents of main | 149 | * @restore: Hibernation-specific, executed after restoring the contents of main |
134 | * memory from a hibernation image. Driver starts working again, | 150 | * memory from a hibernation image, analogous to @resume(). |
135 | * responding to hardware events and software requests. Drivers may NOT | 151 | * |
136 | * make ANY assumptions about the hardware state right prior to @restore(). | 152 | * @suspend_noirq: Complete the actions started by @suspend(). Carry out any |
137 | * On most platforms, there are no restrictions on availability of | 153 | * additional operations required for suspending the device that might be |
138 | * resources like clocks during @restore(). | 154 | * racing with its driver's interrupt handler, which is guaranteed not to |
139 | * | 155 | * run while @suspend_noirq() is being executed. |
140 | * @suspend_noirq: Complete the operations of ->suspend() by carrying out any | 156 | * It generally is expected that the device will be in a low-power state |
141 | * actions required for suspending the device that need interrupts to be | 157 | * (appropriate for the target system sleep state) after subsystem-level |
142 | * disabled | 158 | * @suspend_noirq() has returned successfully. If the device can generate |
143 | * | 159 | * system wakeup signals and is enabled to wake up the system, it should be |
144 | * @resume_noirq: Prepare for the execution of ->resume() by carrying out any | 160 | * configured to do so at that time. However, depending on the platform |
145 | * actions required for resuming the device that need interrupts to be | 161 | * and device's subsystem, @suspend() may be allowed to put the device into |
146 | * disabled | 162 | * the low-power state and configure it to generate wakeup signals, in |
147 | * | 163 | * which case it generally is not necessary to define @suspend_noirq(). |
148 | * @freeze_noirq: Complete the operations of ->freeze() by carrying out any | 164 | * |
149 | * actions required for freezing the device that need interrupts to be | 165 | * @resume_noirq: Prepare for the execution of @resume() by carrying out any |
150 | * disabled | 166 | * operations required for resuming the device that might be racing with |
151 | * | 167 | * its driver's interrupt handler, which is guaranteed not to run while |
152 | * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any | 168 | * @resume_noirq() is being executed. |
153 | * actions required for thawing the device that need interrupts to be | 169 | * |
154 | * disabled | 170 | * @freeze_noirq: Complete the actions started by @freeze(). Carry out any |
155 | * | 171 | * additional operations required for freezing the device that might be |
156 | * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any | 172 | * racing with its driver's interrupt handler, which is guaranteed not to |
157 | * actions required for handling the device that need interrupts to be | 173 | * run while @freeze_noirq() is being executed. |
158 | * disabled | 174 | * The power state of the device should not be changed by either @freeze() |
159 | * | 175 | * or @freeze_noirq() and it should not be configured to signal system |
160 | * @restore_noirq: Prepare for the execution of ->restore() by carrying out any | 176 | * wakeup by any of these callbacks. |
161 | * actions required for restoring the operations of the device that need | 177 | * |
162 | * interrupts to be disabled | 178 | * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any |
179 | * operations required for thawing the device that might be racing with its | ||
180 | * driver's interrupt handler, which is guaranteed not to run while | ||
181 | * @thaw_noirq() is being executed. | ||
182 | * | ||
183 | * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to | ||
184 | * @suspend_noirq(), but it need not save the device's settings in memory. | ||
185 | * | ||
186 | * @restore_noirq: Prepare for the execution of @restore() by carrying out any | ||
187 | * operations required for thawing the device that might be racing with its | ||
188 | * driver's interrupt handler, which is guaranteed not to run while | ||
189 | * @restore_noirq() is being executed. Analogous to @resume_noirq(). | ||
163 | * | 190 | * |
164 | * All of the above callbacks, except for @complete(), return error codes. | 191 | * All of the above callbacks, except for @complete(), return error codes. |
165 | * However, the error codes returned by the resume operations, @resume(), | 192 | * However, the error codes returned by the resume operations, @resume(), |
166 | * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do | 193 | * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do |
167 | * not cause the PM core to abort the resume transition during which they are | 194 | * not cause the PM core to abort the resume transition during which they are |
168 | * returned. The error codes returned in that cases are only printed by the PM | 195 | * returned. The error codes returned in those cases are only printed by the PM |
169 | * core to the system logs for debugging purposes. Still, it is recommended | 196 | * core to the system logs for debugging purposes. Still, it is recommended |
170 | * that drivers only return error codes from their resume methods in case of an | 197 | * that drivers only return error codes from their resume methods in case of an |
171 | * unrecoverable failure (i.e. when the device being handled refuses to resume | 198 | * unrecoverable failure (i.e. when the device being handled refuses to resume |
@@ -174,31 +201,43 @@ typedef struct pm_message { | |||
174 | * their children. | 201 | * their children. |
175 | * | 202 | * |
176 | * It is allowed to unregister devices while the above callbacks are being | 203 | * It is allowed to unregister devices while the above callbacks are being |
177 | * executed. However, it is not allowed to unregister a device from within any | 204 | * executed. However, a callback routine must NOT try to unregister the device |
178 | * of its own callbacks. | 205 | * it was called for, although it may unregister children of that device (for |
206 | * example, if it detects that a child was unplugged while the system was | ||
207 | * asleep). | ||
208 | * | ||
209 | * Refer to Documentation/power/devices.txt for more information about the role | ||
210 | * of the above callbacks in the system suspend process. | ||
179 | * | 211 | * |
180 | * There also are the following callbacks related to run-time power management | 212 | * There also are callbacks related to runtime power management of devices. |
181 | * of devices: | 213 | * Again, these callbacks are executed by the PM core only for subsystems |
214 | * (PM domains, device types, classes and bus types) and the subsystem-level | ||
215 | * callbacks are supposed to invoke the driver callbacks. Moreover, the exact | ||
216 | * actions to be performed by a device driver's callbacks generally depend on | ||
217 | * the platform and subsystem the device belongs to. | ||
182 | * | 218 | * |
183 | * @runtime_suspend: Prepare the device for a condition in which it won't be | 219 | * @runtime_suspend: Prepare the device for a condition in which it won't be |
184 | * able to communicate with the CPU(s) and RAM due to power management. | 220 | * able to communicate with the CPU(s) and RAM due to power management. |
185 | * This need not mean that the device should be put into a low power state. | 221 | * This need not mean that the device should be put into a low-power state. |
186 | * For example, if the device is behind a link which is about to be turned | 222 | * For example, if the device is behind a link which is about to be turned |
187 | * off, the device may remain at full power. If the device does go to low | 223 | * off, the device may remain at full power. If the device does go to low |
188 | * power and is capable of generating run-time wake-up events, remote | 224 | * power and is capable of generating runtime wakeup events, remote wakeup |
189 | * wake-up (i.e., a hardware mechanism allowing the device to request a | 225 | * (i.e., a hardware mechanism allowing the device to request a change of |
190 | * change of its power state via a wake-up event, such as PCI PME) should | 226 | * its power state via an interrupt) should be enabled for it. |
191 | * be enabled for it. | ||
192 | * | 227 | * |
193 | * @runtime_resume: Put the device into the fully active state in response to a | 228 | * @runtime_resume: Put the device into the fully active state in response to a |
194 | * wake-up event generated by hardware or at the request of software. If | 229 | * wakeup event generated by hardware or at the request of software. If |
195 | * necessary, put the device into the full power state and restore its | 230 | * necessary, put the device into the full-power state and restore its |
196 | * registers, so that it is fully operational. | 231 | * registers, so that it is fully operational. |
197 | * | 232 | * |
198 | * @runtime_idle: Device appears to be inactive and it might be put into a low | 233 | * @runtime_idle: Device appears to be inactive and it might be put into a |
199 | * power state if all of the necessary conditions are satisfied. Check | 234 | * low-power state if all of the necessary conditions are satisfied. Check |
200 | * these conditions and handle the device as appropriate, possibly queueing | 235 | * these conditions and handle the device as appropriate, possibly queueing |
201 | * a suspend request for it. The return value is ignored by the PM core. | 236 | * a suspend request for it. The return value is ignored by the PM core. |
237 | * | ||
238 | * Refer to Documentation/power/runtime_pm.txt for more information about the | ||
239 | * role of the above callbacks in device runtime power management. | ||
240 | * | ||
202 | */ | 241 | */ |
203 | 242 | ||
204 | struct dev_pm_ops { | 243 | struct dev_pm_ops { |
diff --git a/include/linux/pstore.h b/include/linux/pstore.h index ea567321ae3c..2ca8cde5459d 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h | |||
@@ -35,10 +35,12 @@ struct pstore_info { | |||
35 | spinlock_t buf_lock; /* serialize access to 'buf' */ | 35 | spinlock_t buf_lock; /* serialize access to 'buf' */ |
36 | char *buf; | 36 | char *buf; |
37 | size_t bufsize; | 37 | size_t bufsize; |
38 | struct mutex read_mutex; /* serialize open/read/close */ | ||
38 | int (*open)(struct pstore_info *psi); | 39 | int (*open)(struct pstore_info *psi); |
39 | int (*close)(struct pstore_info *psi); | 40 | int (*close)(struct pstore_info *psi); |
40 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, | 41 | ssize_t (*read)(u64 *id, enum pstore_type_id *type, |
41 | struct timespec *time, struct pstore_info *psi); | 42 | struct timespec *time, char **buf, |
43 | struct pstore_info *psi); | ||
42 | int (*write)(enum pstore_type_id type, u64 *id, | 44 | int (*write)(enum pstore_type_id type, u64 *id, |
43 | unsigned int part, size_t size, struct pstore_info *psi); | 45 | unsigned int part, size_t size, struct pstore_info *psi); |
44 | int (*erase)(enum pstore_type_id type, u64 id, | 46 | int (*erase)(enum pstore_type_id type, u64 id, |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 68daf4f27e2c..1c4f3e9b9bc5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1521,7 +1521,6 @@ struct task_struct { | |||
1521 | #ifdef CONFIG_FAULT_INJECTION | 1521 | #ifdef CONFIG_FAULT_INJECTION |
1522 | int make_it_fail; | 1522 | int make_it_fail; |
1523 | #endif | 1523 | #endif |
1524 | struct prop_local_single dirties; | ||
1525 | /* | 1524 | /* |
1526 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call | 1525 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call |
1527 | * balance_dirty_pages() for some dirty throttling pause | 1526 | * balance_dirty_pages() for some dirty throttling pause |
diff --git a/include/linux/serial.h b/include/linux/serial.h index 97ff8e27a6cc..3d86517fe7d5 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
@@ -207,13 +207,15 @@ struct serial_icounter_struct { | |||
207 | 207 | ||
208 | struct serial_rs485 { | 208 | struct serial_rs485 { |
209 | __u32 flags; /* RS485 feature flags */ | 209 | __u32 flags; /* RS485 feature flags */ |
210 | #define SER_RS485_ENABLED (1 << 0) | 210 | #define SER_RS485_ENABLED (1 << 0) /* If enabled */ |
211 | #define SER_RS485_RTS_ON_SEND (1 << 1) | 211 | #define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for |
212 | #define SER_RS485_RTS_AFTER_SEND (1 << 2) | 212 | RTS pin when |
213 | #define SER_RS485_RTS_BEFORE_SEND (1 << 3) | 213 | sending */ |
214 | #define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for | ||
215 | RTS pin after sent*/ | ||
214 | #define SER_RS485_RX_DURING_TX (1 << 4) | 216 | #define SER_RS485_RX_DURING_TX (1 << 4) |
215 | __u32 delay_rts_before_send; /* Milliseconds */ | 217 | __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ |
216 | __u32 delay_rts_after_send; /* Milliseconds */ | 218 | __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ |
217 | __u32 padding[5]; /* Memory is cheap, new structs | 219 | __u32 padding[5]; /* Memory is cheap, new structs |
218 | are a royal PITA .. */ | 220 | are a royal PITA .. */ |
219 | }; | 221 | }; |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 63f98d0a8efa..5206d6541da5 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -85,6 +85,8 @@ | |||
85 | * @reset: reset the device | 85 | * @reset: reset the device |
86 | * vdev: the virtio device | 86 | * vdev: the virtio device |
87 | * After this, status and feature negotiation must be done again | 87 | * After this, status and feature negotiation must be done again |
88 | * Device must not be reset from its vq/config callbacks, or in | ||
89 | * parallel with being added/removed. | ||
88 | * @find_vqs: find virtqueues and instantiate them. | 90 | * @find_vqs: find virtqueues and instantiate them. |
89 | * vdev: the virtio_device | 91 | * vdev: the virtio_device |
90 | * nvqs: the number of virtqueues to find | 92 | * nvqs: the number of virtqueues to find |
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h index 27c7edefbc86..5c7b6f0daef8 100644 --- a/include/linux/virtio_mmio.h +++ b/include/linux/virtio_mmio.h | |||
@@ -63,7 +63,7 @@ | |||
63 | #define VIRTIO_MMIO_GUEST_FEATURES 0x020 | 63 | #define VIRTIO_MMIO_GUEST_FEATURES 0x020 |
64 | 64 | ||
65 | /* Activated features set selector - Write Only */ | 65 | /* Activated features set selector - Write Only */ |
66 | #define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024 | 66 | #define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024 |
67 | 67 | ||
68 | /* Guest's memory page size in bytes - Write Only */ | 68 | /* Guest's memory page size in bytes - Write Only */ |
69 | #define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 | 69 | #define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 73a5c26c01ea..06b795dd5906 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -35,6 +35,7 @@ struct inet_peer { | |||
35 | 35 | ||
36 | u32 metrics[RTAX_MAX]; | 36 | u32 metrics[RTAX_MAX]; |
37 | u32 rate_tokens; /* rate limiting for ICMP */ | 37 | u32 rate_tokens; /* rate limiting for ICMP */ |
38 | int redirect_genid; | ||
38 | unsigned long rate_last; | 39 | unsigned long rate_last; |
39 | unsigned long pmtu_expires; | 40 | unsigned long pmtu_expires; |
40 | u32 pmtu_orig; | 41 | u32 pmtu_orig; |
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 4283508b3e18..a88fb6939387 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h | |||
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier { | |||
67 | int (*fcn)(unsigned int events, struct nf_ct_event *item); | 67 | int (*fcn)(unsigned int events, struct nf_ct_event *item); |
68 | }; | 68 | }; |
69 | 69 | ||
70 | extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; | 70 | extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb); |
71 | extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb); | 71 | extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb); |
72 | extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb); | ||
73 | 72 | ||
74 | extern void nf_ct_deliver_cached_events(struct nf_conn *ct); | 73 | extern void nf_ct_deliver_cached_events(struct nf_conn *ct); |
75 | 74 | ||
76 | static inline void | 75 | static inline void |
77 | nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) | 76 | nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) |
78 | { | 77 | { |
78 | struct net *net = nf_ct_net(ct); | ||
79 | struct nf_conntrack_ecache *e; | 79 | struct nf_conntrack_ecache *e; |
80 | 80 | ||
81 | if (nf_conntrack_event_cb == NULL) | 81 | if (net->ct.nf_conntrack_event_cb == NULL) |
82 | return; | 82 | return; |
83 | 83 | ||
84 | e = nf_ct_ecache_find(ct); | 84 | e = nf_ct_ecache_find(ct); |
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask, | |||
95 | int report) | 95 | int report) |
96 | { | 96 | { |
97 | int ret = 0; | 97 | int ret = 0; |
98 | struct net *net = nf_ct_net(ct); | ||
98 | struct nf_ct_event_notifier *notify; | 99 | struct nf_ct_event_notifier *notify; |
99 | struct nf_conntrack_ecache *e; | 100 | struct nf_conntrack_ecache *e; |
100 | 101 | ||
101 | rcu_read_lock(); | 102 | rcu_read_lock(); |
102 | notify = rcu_dereference(nf_conntrack_event_cb); | 103 | notify = rcu_dereference(net->ct.nf_conntrack_event_cb); |
103 | if (notify == NULL) | 104 | if (notify == NULL) |
104 | goto out_unlock; | 105 | goto out_unlock; |
105 | 106 | ||
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier { | |||
164 | int (*fcn)(unsigned int events, struct nf_exp_event *item); | 165 | int (*fcn)(unsigned int events, struct nf_exp_event *item); |
165 | }; | 166 | }; |
166 | 167 | ||
167 | extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb; | 168 | extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb); |
168 | extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb); | 169 | extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb); |
169 | extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb); | ||
170 | 170 | ||
171 | static inline void | 171 | static inline void |
172 | nf_ct_expect_event_report(enum ip_conntrack_expect_events event, | 172 | nf_ct_expect_event_report(enum ip_conntrack_expect_events event, |
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event, | |||
174 | u32 pid, | 174 | u32 pid, |
175 | int report) | 175 | int report) |
176 | { | 176 | { |
177 | struct net *net = nf_ct_exp_net(exp); | ||
177 | struct nf_exp_event_notifier *notify; | 178 | struct nf_exp_event_notifier *notify; |
178 | struct nf_conntrack_ecache *e; | 179 | struct nf_conntrack_ecache *e; |
179 | 180 | ||
180 | rcu_read_lock(); | 181 | rcu_read_lock(); |
181 | notify = rcu_dereference(nf_expect_event_cb); | 182 | notify = rcu_dereference(net->ct.nf_expect_event_cb); |
182 | if (notify == NULL) | 183 | if (notify == NULL) |
183 | goto out_unlock; | 184 | goto out_unlock; |
184 | 185 | ||
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 0249399e51a7..7a911eca0f18 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h | |||
@@ -18,6 +18,8 @@ struct netns_ct { | |||
18 | struct hlist_nulls_head unconfirmed; | 18 | struct hlist_nulls_head unconfirmed; |
19 | struct hlist_nulls_head dying; | 19 | struct hlist_nulls_head dying; |
20 | struct ip_conntrack_stat __percpu *stat; | 20 | struct ip_conntrack_stat __percpu *stat; |
21 | struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; | ||
22 | struct nf_exp_event_notifier __rcu *nf_expect_event_cb; | ||
21 | int sysctl_events; | 23 | int sysctl_events; |
22 | unsigned int sysctl_events_retry_timeout; | 24 | unsigned int sysctl_events_retry_timeout; |
23 | int sysctl_acct; | 25 | int sysctl_acct; |
diff --git a/include/net/red.h b/include/net/red.h index 3319f16b3beb..b72a3b833936 100644 --- a/include/net/red.h +++ b/include/net/red.h | |||
@@ -116,7 +116,7 @@ struct red_parms { | |||
116 | u32 qR; /* Cached random number */ | 116 | u32 qR; /* Cached random number */ |
117 | 117 | ||
118 | unsigned long qavg; /* Average queue length: A scaled */ | 118 | unsigned long qavg; /* Average queue length: A scaled */ |
119 | psched_time_t qidlestart; /* Start of current idle period */ | 119 | ktime_t qidlestart; /* Start of current idle period */ |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static inline u32 red_rmask(u8 Plog) | 122 | static inline u32 red_rmask(u8 Plog) |
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p, | |||
148 | 148 | ||
149 | static inline int red_is_idling(struct red_parms *p) | 149 | static inline int red_is_idling(struct red_parms *p) |
150 | { | 150 | { |
151 | return p->qidlestart != PSCHED_PASTPERFECT; | 151 | return p->qidlestart.tv64 != 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | static inline void red_start_of_idle_period(struct red_parms *p) | 154 | static inline void red_start_of_idle_period(struct red_parms *p) |
155 | { | 155 | { |
156 | p->qidlestart = psched_get_time(); | 156 | p->qidlestart = ktime_get(); |
157 | } | 157 | } |
158 | 158 | ||
159 | static inline void red_end_of_idle_period(struct red_parms *p) | 159 | static inline void red_end_of_idle_period(struct red_parms *p) |
160 | { | 160 | { |
161 | p->qidlestart = PSCHED_PASTPERFECT; | 161 | p->qidlestart.tv64 = 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline void red_restart(struct red_parms *p) | 164 | static inline void red_restart(struct red_parms *p) |
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p) | |||
170 | 170 | ||
171 | static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) | 171 | static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) |
172 | { | 172 | { |
173 | psched_time_t now; | 173 | s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); |
174 | long us_idle; | 174 | long us_idle = min_t(s64, delta, p->Scell_max); |
175 | int shift; | 175 | int shift; |
176 | 176 | ||
177 | now = psched_get_time(); | ||
178 | us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); | ||
179 | |||
180 | /* | 177 | /* |
181 | * The problem: ideally, average length queue recalcultion should | 178 | * The problem: ideally, average length queue recalcultion should |
182 | * be done over constant clock intervals. This is too expensive, so | 179 | * be done over constant clock intervals. This is too expensive, so |
diff --git a/include/video/omapdss.h b/include/video/omapdss.h index b66ebb2032c6..378c7ed6760b 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h | |||
@@ -307,15 +307,8 @@ struct omap_dss_board_info { | |||
307 | void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); | 307 | void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); |
308 | }; | 308 | }; |
309 | 309 | ||
310 | #if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS) | ||
311 | /* Init with the board info */ | 310 | /* Init with the board info */ |
312 | extern int omap_display_init(struct omap_dss_board_info *board_data); | 311 | extern int omap_display_init(struct omap_dss_board_info *board_data); |
313 | #else | ||
314 | static inline int omap_display_init(struct omap_dss_board_info *board_data) | ||
315 | { | ||
316 | return 0; | ||
317 | } | ||
318 | #endif | ||
319 | 312 | ||
320 | struct omap_display_platform_data { | 313 | struct omap_display_platform_data { |
321 | struct omap_dss_board_info *board_data; | 314 | struct omap_dss_board_info *board_data; |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 5e828a2ca8e6..213c0351dad8 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss, | |||
153 | kfree(cgroup_freezer(cgroup)); | 153 | kfree(cgroup_freezer(cgroup)); |
154 | } | 154 | } |
155 | 155 | ||
156 | /* task is frozen or will freeze immediately when next it gets woken */ | ||
157 | static bool is_task_frozen_enough(struct task_struct *task) | ||
158 | { | ||
159 | return frozen(task) || | ||
160 | (task_is_stopped_or_traced(task) && freezing(task)); | ||
161 | } | ||
162 | |||
156 | /* | 163 | /* |
157 | * The call to cgroup_lock() in the freezer.state write method prevents | 164 | * The call to cgroup_lock() in the freezer.state write method prevents |
158 | * a write to that file racing against an attach, and hence the | 165 | * a write to that file racing against an attach, and hence the |
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup, | |||
231 | cgroup_iter_start(cgroup, &it); | 238 | cgroup_iter_start(cgroup, &it); |
232 | while ((task = cgroup_iter_next(cgroup, &it))) { | 239 | while ((task = cgroup_iter_next(cgroup, &it))) { |
233 | ntotal++; | 240 | ntotal++; |
234 | if (frozen(task)) | 241 | if (is_task_frozen_enough(task)) |
235 | nfrozen++; | 242 | nfrozen++; |
236 | } | 243 | } |
237 | 244 | ||
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer) | |||
284 | while ((task = cgroup_iter_next(cgroup, &it))) { | 291 | while ((task = cgroup_iter_next(cgroup, &it))) { |
285 | if (!freeze_task(task, true)) | 292 | if (!freeze_task(task, true)) |
286 | continue; | 293 | continue; |
287 | if (frozen(task)) | 294 | if (is_task_frozen_enough(task)) |
288 | continue; | 295 | continue; |
289 | if (!freezing(task) && !freezer_should_skip(task)) | 296 | if (!freezing(task) && !freezer_should_skip(task)) |
290 | num_cant_freeze_now++; | 297 | num_cant_freeze_now++; |
diff --git a/kernel/fork.c b/kernel/fork.c index ba0d17261329..da4a6a10d088 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account) | |||
162 | 162 | ||
163 | void free_task(struct task_struct *tsk) | 163 | void free_task(struct task_struct *tsk) |
164 | { | 164 | { |
165 | prop_local_destroy_single(&tsk->dirties); | ||
166 | account_kernel_stack(tsk->stack, -1); | 165 | account_kernel_stack(tsk->stack, -1); |
167 | free_thread_info(tsk->stack); | 166 | free_thread_info(tsk->stack); |
168 | rt_mutex_debug_task_free(tsk); | 167 | rt_mutex_debug_task_free(tsk); |
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
274 | 273 | ||
275 | tsk->stack = ti; | 274 | tsk->stack = ti; |
276 | 275 | ||
277 | err = prop_local_init_single(&tsk->dirties); | ||
278 | if (err) | ||
279 | goto out; | ||
280 | |||
281 | setup_thread_stack(tsk, orig); | 276 | setup_thread_stack(tsk, orig); |
282 | clear_user_return_notifier(tsk); | 277 | clear_user_return_notifier(tsk); |
283 | clear_tsk_need_resched(tsk); | 278 | clear_tsk_need_resched(tsk); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 422e567eecf6..ae34bf51682b 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
885 | struct hrtimer_clock_base *base, | 885 | struct hrtimer_clock_base *base, |
886 | unsigned long newstate, int reprogram) | 886 | unsigned long newstate, int reprogram) |
887 | { | 887 | { |
888 | struct timerqueue_node *next_timer; | ||
888 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) | 889 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) |
889 | goto out; | 890 | goto out; |
890 | 891 | ||
891 | if (&timer->node == timerqueue_getnext(&base->active)) { | 892 | next_timer = timerqueue_getnext(&base->active); |
893 | timerqueue_del(&base->active, &timer->node); | ||
894 | if (&timer->node == next_timer) { | ||
892 | #ifdef CONFIG_HIGH_RES_TIMERS | 895 | #ifdef CONFIG_HIGH_RES_TIMERS |
893 | /* Reprogram the clock event device. if enabled */ | 896 | /* Reprogram the clock event device. if enabled */ |
894 | if (reprogram && hrtimer_hres_active()) { | 897 | if (reprogram && hrtimer_hres_active()) { |
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
901 | } | 904 | } |
902 | #endif | 905 | #endif |
903 | } | 906 | } |
904 | timerqueue_del(&base->active, &timer->node); | ||
905 | if (!timerqueue_getnext(&base->active)) | 907 | if (!timerqueue_getnext(&base->active)) |
906 | base->cpu_base->active_bases &= ~(1 << base->index); | 908 | base->cpu_base->active_bases &= ~(1 << base->index); |
907 | out: | 909 | out: |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 67ce837ae52c..0e2b179bc7b3 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1596,7 +1596,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler, | |||
1596 | return -ENOMEM; | 1596 | return -ENOMEM; |
1597 | 1597 | ||
1598 | action->handler = handler; | 1598 | action->handler = handler; |
1599 | action->flags = IRQF_PERCPU; | 1599 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; |
1600 | action->name = devname; | 1600 | action->name = devname; |
1601 | action->percpu_dev_id = dev_id; | 1601 | action->percpu_dev_id = dev_id; |
1602 | 1602 | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index b5f4742693c0..dc813a948be2 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
84 | */ | 84 | */ |
85 | action = desc->action; | 85 | action = desc->action; |
86 | if (!action || !(action->flags & IRQF_SHARED) || | 86 | if (!action || !(action->flags & IRQF_SHARED) || |
87 | (action->flags & __IRQF_TIMER) || !action->next) | 87 | (action->flags & __IRQF_TIMER) || |
88 | (action->handler(irq, action->dev_id) == IRQ_HANDLED) || | ||
89 | !action->next) | ||
88 | goto out; | 90 | goto out; |
89 | 91 | ||
90 | /* Already running on another processor */ | 92 | /* Already running on another processor */ |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 196c01268ebd..a6b0503574ee 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -347,7 +347,7 @@ int hibernation_snapshot(int platform_mode) | |||
347 | 347 | ||
348 | error = freeze_kernel_threads(); | 348 | error = freeze_kernel_threads(); |
349 | if (error) | 349 | if (error) |
350 | goto Close; | 350 | goto Cleanup; |
351 | 351 | ||
352 | if (hibernation_test(TEST_FREEZER) || | 352 | if (hibernation_test(TEST_FREEZER) || |
353 | hibernation_testmode(HIBERNATION_TESTPROC)) { | 353 | hibernation_testmode(HIBERNATION_TESTPROC)) { |
@@ -357,12 +357,14 @@ int hibernation_snapshot(int platform_mode) | |||
357 | * successful freezer test. | 357 | * successful freezer test. |
358 | */ | 358 | */ |
359 | freezer_test_done = true; | 359 | freezer_test_done = true; |
360 | goto Close; | 360 | goto Cleanup; |
361 | } | 361 | } |
362 | 362 | ||
363 | error = dpm_prepare(PMSG_FREEZE); | 363 | error = dpm_prepare(PMSG_FREEZE); |
364 | if (error) | 364 | if (error) { |
365 | goto Complete_devices; | 365 | dpm_complete(msg); |
366 | goto Cleanup; | ||
367 | } | ||
366 | 368 | ||
367 | suspend_console(); | 369 | suspend_console(); |
368 | pm_restrict_gfp_mask(); | 370 | pm_restrict_gfp_mask(); |
@@ -391,8 +393,6 @@ int hibernation_snapshot(int platform_mode) | |||
391 | pm_restore_gfp_mask(); | 393 | pm_restore_gfp_mask(); |
392 | 394 | ||
393 | resume_console(); | 395 | resume_console(); |
394 | |||
395 | Complete_devices: | ||
396 | dpm_complete(msg); | 396 | dpm_complete(msg); |
397 | 397 | ||
398 | Close: | 398 | Close: |
@@ -402,6 +402,10 @@ int hibernation_snapshot(int platform_mode) | |||
402 | Recover_platform: | 402 | Recover_platform: |
403 | platform_recover(platform_mode); | 403 | platform_recover(platform_mode); |
404 | goto Resume_devices; | 404 | goto Resume_devices; |
405 | |||
406 | Cleanup: | ||
407 | swsusp_free(); | ||
408 | goto Close; | ||
405 | } | 409 | } |
406 | 410 | ||
407 | /** | 411 | /** |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index cf52fda2e096..cfc65e1eb9fb 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void) | |||
492 | } | 492 | } |
493 | 493 | ||
494 | /** | 494 | /** |
495 | * clocksource_max_adjustment- Returns max adjustment amount | ||
496 | * @cs: Pointer to clocksource | ||
497 | * | ||
498 | */ | ||
499 | static u32 clocksource_max_adjustment(struct clocksource *cs) | ||
500 | { | ||
501 | u64 ret; | ||
502 | /* | ||
503 | * We won't try to correct for more then 11% adjustments (110,000 ppm), | ||
504 | */ | ||
505 | ret = (u64)cs->mult * 11; | ||
506 | do_div(ret,100); | ||
507 | return (u32)ret; | ||
508 | } | ||
509 | |||
510 | /** | ||
495 | * clocksource_max_deferment - Returns max time the clocksource can be deferred | 511 | * clocksource_max_deferment - Returns max time the clocksource can be deferred |
496 | * @cs: Pointer to clocksource | 512 | * @cs: Pointer to clocksource |
497 | * | 513 | * |
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
503 | /* | 519 | /* |
504 | * Calculate the maximum number of cycles that we can pass to the | 520 | * Calculate the maximum number of cycles that we can pass to the |
505 | * cyc2ns function without overflowing a 64-bit signed result. The | 521 | * cyc2ns function without overflowing a 64-bit signed result. The |
506 | * maximum number of cycles is equal to ULLONG_MAX/cs->mult which | 522 | * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj) |
507 | * is equivalent to the below. | 523 | * which is equivalent to the below. |
508 | * max_cycles < (2^63)/cs->mult | 524 | * max_cycles < (2^63)/(cs->mult + cs->maxadj) |
509 | * max_cycles < 2^(log2((2^63)/cs->mult)) | 525 | * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj))) |
510 | * max_cycles < 2^(log2(2^63) - log2(cs->mult)) | 526 | * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj)) |
511 | * max_cycles < 2^(63 - log2(cs->mult)) | 527 | * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj)) |
512 | * max_cycles < 1 << (63 - log2(cs->mult)) | 528 | * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj)) |
513 | * Please note that we add 1 to the result of the log2 to account for | 529 | * Please note that we add 1 to the result of the log2 to account for |
514 | * any rounding errors, ensure the above inequality is satisfied and | 530 | * any rounding errors, ensure the above inequality is satisfied and |
515 | * no overflow will occur. | 531 | * no overflow will occur. |
516 | */ | 532 | */ |
517 | max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); | 533 | max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1)); |
518 | 534 | ||
519 | /* | 535 | /* |
520 | * The actual maximum number of cycles we can defer the clocksource is | 536 | * The actual maximum number of cycles we can defer the clocksource is |
521 | * determined by the minimum of max_cycles and cs->mask. | 537 | * determined by the minimum of max_cycles and cs->mask. |
538 | * Note: Here we subtract the maxadj to make sure we don't sleep for | ||
539 | * too long if there's a large negative adjustment. | ||
522 | */ | 540 | */ |
523 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); | 541 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); |
524 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); | 542 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj, |
543 | cs->shift); | ||
525 | 544 | ||
526 | /* | 545 | /* |
527 | * To ensure that the clocksource does not wrap whilst we are idle, | 546 | * To ensure that the clocksource does not wrap whilst we are idle, |
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
640 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | 659 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) |
641 | { | 660 | { |
642 | u64 sec; | 661 | u64 sec; |
643 | |||
644 | /* | 662 | /* |
645 | * Calc the maximum number of seconds which we can run before | 663 | * Calc the maximum number of seconds which we can run before |
646 | * wrapping around. For clocksources which have a mask > 32bit | 664 | * wrapping around. For clocksources which have a mask > 32bit |
@@ -661,6 +679,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
661 | 679 | ||
662 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, | 680 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, |
663 | NSEC_PER_SEC / scale, sec * scale); | 681 | NSEC_PER_SEC / scale, sec * scale); |
682 | |||
683 | /* | ||
684 | * for clocksources that have large mults, to avoid overflow. | ||
685 | * Since mult may be adjusted by ntp, add an safety extra margin | ||
686 | * | ||
687 | */ | ||
688 | cs->maxadj = clocksource_max_adjustment(cs); | ||
689 | while ((cs->mult + cs->maxadj < cs->mult) | ||
690 | || (cs->mult - cs->maxadj > cs->mult)) { | ||
691 | cs->mult >>= 1; | ||
692 | cs->shift--; | ||
693 | cs->maxadj = clocksource_max_adjustment(cs); | ||
694 | } | ||
695 | |||
664 | cs->max_idle_ns = clocksource_max_deferment(cs); | 696 | cs->max_idle_ns = clocksource_max_deferment(cs); |
665 | } | 697 | } |
666 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | 698 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); |
@@ -701,6 +733,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale); | |||
701 | */ | 733 | */ |
702 | int clocksource_register(struct clocksource *cs) | 734 | int clocksource_register(struct clocksource *cs) |
703 | { | 735 | { |
736 | /* calculate max adjustment for given mult/shift */ | ||
737 | cs->maxadj = clocksource_max_adjustment(cs); | ||
738 | WARN_ONCE(cs->mult + cs->maxadj < cs->mult, | ||
739 | "Clocksource %s might overflow on 11%% adjustment\n", | ||
740 | cs->name); | ||
741 | |||
704 | /* calculate max idle time permitted for this clocksource */ | 742 | /* calculate max idle time permitted for this clocksource */ |
705 | cs->max_idle_ns = clocksource_max_deferment(cs); | 743 | cs->max_idle_ns = clocksource_max_deferment(cs); |
706 | 744 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 2b021b0e8507..237841378c03 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -249,6 +249,8 @@ ktime_t ktime_get(void) | |||
249 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; | 249 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; |
250 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; | 250 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; |
251 | nsecs += timekeeping_get_ns(); | 251 | nsecs += timekeeping_get_ns(); |
252 | /* If arch requires, add in gettimeoffset() */ | ||
253 | nsecs += arch_gettimeoffset(); | ||
252 | 254 | ||
253 | } while (read_seqretry(&xtime_lock, seq)); | 255 | } while (read_seqretry(&xtime_lock, seq)); |
254 | /* | 256 | /* |
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts) | |||
280 | *ts = xtime; | 282 | *ts = xtime; |
281 | tomono = wall_to_monotonic; | 283 | tomono = wall_to_monotonic; |
282 | nsecs = timekeeping_get_ns(); | 284 | nsecs = timekeeping_get_ns(); |
285 | /* If arch requires, add in gettimeoffset() */ | ||
286 | nsecs += arch_gettimeoffset(); | ||
283 | 287 | ||
284 | } while (read_seqretry(&xtime_lock, seq)); | 288 | } while (read_seqretry(&xtime_lock, seq)); |
285 | 289 | ||
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset) | |||
802 | s64 error, interval = timekeeper.cycle_interval; | 806 | s64 error, interval = timekeeper.cycle_interval; |
803 | int adj; | 807 | int adj; |
804 | 808 | ||
809 | /* | ||
810 | * The point of this is to check if the error is greater then half | ||
811 | * an interval. | ||
812 | * | ||
813 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. | ||
814 | * | ||
815 | * Note we subtract one in the shift, so that error is really error*2. | ||
816 | * This "saves" dividing(shifting) intererval twice, but keeps the | ||
817 | * (error > interval) comparision as still measuring if error is | ||
818 | * larger then half an interval. | ||
819 | * | ||
820 | * Note: It does not "save" on aggrivation when reading the code. | ||
821 | */ | ||
805 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); | 822 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); |
806 | if (error > interval) { | 823 | if (error > interval) { |
824 | /* | ||
825 | * We now divide error by 4(via shift), which checks if | ||
826 | * the error is greater then twice the interval. | ||
827 | * If it is greater, we need a bigadjust, if its smaller, | ||
828 | * we can adjust by 1. | ||
829 | */ | ||
807 | error >>= 2; | 830 | error >>= 2; |
831 | /* | ||
832 | * XXX - In update_wall_time, we round up to the next | ||
833 | * nanosecond, and store the amount rounded up into | ||
834 | * the error. This causes the likely below to be unlikely. | ||
835 | * | ||
836 | * The properfix is to avoid rounding up by using | ||
837 | * the high precision timekeeper.xtime_nsec instead of | ||
838 | * xtime.tv_nsec everywhere. Fixing this will take some | ||
839 | * time. | ||
840 | */ | ||
808 | if (likely(error <= interval)) | 841 | if (likely(error <= interval)) |
809 | adj = 1; | 842 | adj = 1; |
810 | else | 843 | else |
811 | adj = timekeeping_bigadjust(error, &interval, &offset); | 844 | adj = timekeeping_bigadjust(error, &interval, &offset); |
812 | } else if (error < -interval) { | 845 | } else if (error < -interval) { |
846 | /* See comment above, this is just switched for the negative */ | ||
813 | error >>= 2; | 847 | error >>= 2; |
814 | if (likely(error >= -interval)) { | 848 | if (likely(error >= -interval)) { |
815 | adj = -1; | 849 | adj = -1; |
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset) | |||
817 | offset = -offset; | 851 | offset = -offset; |
818 | } else | 852 | } else |
819 | adj = timekeeping_bigadjust(error, &interval, &offset); | 853 | adj = timekeeping_bigadjust(error, &interval, &offset); |
820 | } else | 854 | } else /* No adjustment needed */ |
821 | return; | 855 | return; |
822 | 856 | ||
857 | WARN_ONCE(timekeeper.clock->maxadj && | ||
858 | (timekeeper.mult + adj > timekeeper.clock->mult + | ||
859 | timekeeper.clock->maxadj), | ||
860 | "Adjusting %s more then 11%% (%ld vs %ld)\n", | ||
861 | timekeeper.clock->name, (long)timekeeper.mult + adj, | ||
862 | (long)timekeeper.clock->mult + | ||
863 | timekeeper.clock->maxadj); | ||
864 | /* | ||
865 | * So the following can be confusing. | ||
866 | * | ||
867 | * To keep things simple, lets assume adj == 1 for now. | ||
868 | * | ||
869 | * When adj != 1, remember that the interval and offset values | ||
870 | * have been appropriately scaled so the math is the same. | ||
871 | * | ||
872 | * The basic idea here is that we're increasing the multiplier | ||
873 | * by one, this causes the xtime_interval to be incremented by | ||
874 | * one cycle_interval. This is because: | ||
875 | * xtime_interval = cycle_interval * mult | ||
876 | * So if mult is being incremented by one: | ||
877 | * xtime_interval = cycle_interval * (mult + 1) | ||
878 | * Its the same as: | ||
879 | * xtime_interval = (cycle_interval * mult) + cycle_interval | ||
880 | * Which can be shortened to: | ||
881 | * xtime_interval += cycle_interval | ||
882 | * | ||
883 | * So offset stores the non-accumulated cycles. Thus the current | ||
884 | * time (in shifted nanoseconds) is: | ||
885 | * now = (offset * adj) + xtime_nsec | ||
886 | * Now, even though we're adjusting the clock frequency, we have | ||
887 | * to keep time consistent. In other words, we can't jump back | ||
888 | * in time, and we also want to avoid jumping forward in time. | ||
889 | * | ||
890 | * So given the same offset value, we need the time to be the same | ||
891 | * both before and after the freq adjustment. | ||
892 | * now = (offset * adj_1) + xtime_nsec_1 | ||
893 | * now = (offset * adj_2) + xtime_nsec_2 | ||
894 | * So: | ||
895 | * (offset * adj_1) + xtime_nsec_1 = | ||
896 | * (offset * adj_2) + xtime_nsec_2 | ||
897 | * And we know: | ||
898 | * adj_2 = adj_1 + 1 | ||
899 | * So: | ||
900 | * (offset * adj_1) + xtime_nsec_1 = | ||
901 | * (offset * (adj_1+1)) + xtime_nsec_2 | ||
902 | * (offset * adj_1) + xtime_nsec_1 = | ||
903 | * (offset * adj_1) + offset + xtime_nsec_2 | ||
904 | * Canceling the sides: | ||
905 | * xtime_nsec_1 = offset + xtime_nsec_2 | ||
906 | * Which gives us: | ||
907 | * xtime_nsec_2 = xtime_nsec_1 - offset | ||
908 | * Which simplfies to: | ||
909 | * xtime_nsec -= offset | ||
910 | * | ||
911 | * XXX - TODO: Doc ntp_error calculation. | ||
912 | */ | ||
823 | timekeeper.mult += adj; | 913 | timekeeper.mult += adj; |
824 | timekeeper.xtime_interval += interval; | 914 | timekeeper.xtime_interval += interval; |
825 | timekeeper.xtime_nsec -= offset; | 915 | timekeeper.xtime_nsec -= offset; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index a3278f005230..71252486bc6f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit; | |||
128 | * | 128 | * |
129 | */ | 129 | */ |
130 | static struct prop_descriptor vm_completions; | 130 | static struct prop_descriptor vm_completions; |
131 | static struct prop_descriptor vm_dirties; | ||
132 | 131 | ||
133 | /* | 132 | /* |
134 | * couple the period to the dirty_ratio: | 133 | * couple the period to the dirty_ratio: |
@@ -154,7 +153,6 @@ static void update_completion_period(void) | |||
154 | { | 153 | { |
155 | int shift = calc_period_shift(); | 154 | int shift = calc_period_shift(); |
156 | prop_change_shift(&vm_completions, shift); | 155 | prop_change_shift(&vm_completions, shift); |
157 | prop_change_shift(&vm_dirties, shift); | ||
158 | 156 | ||
159 | writeback_set_ratelimit(); | 157 | writeback_set_ratelimit(); |
160 | } | 158 | } |
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) | |||
235 | } | 233 | } |
236 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 234 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); |
237 | 235 | ||
238 | void task_dirty_inc(struct task_struct *tsk) | ||
239 | { | ||
240 | prop_inc_single(&vm_dirties, &tsk->dirties); | ||
241 | } | ||
242 | |||
243 | /* | 236 | /* |
244 | * Obtain an accurate fraction of the BDI's portion. | 237 | * Obtain an accurate fraction of the BDI's portion. |
245 | */ | 238 | */ |
@@ -1133,17 +1126,17 @@ pause: | |||
1133 | pages_dirtied, | 1126 | pages_dirtied, |
1134 | pause, | 1127 | pause, |
1135 | start_time); | 1128 | start_time); |
1136 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1129 | __set_current_state(TASK_KILLABLE); |
1137 | io_schedule_timeout(pause); | 1130 | io_schedule_timeout(pause); |
1138 | 1131 | ||
1139 | dirty_thresh = hard_dirty_limit(dirty_thresh); | ||
1140 | /* | 1132 | /* |
1141 | * max-pause area. If dirty exceeded but still within this | 1133 | * This is typically equal to (nr_dirty < dirty_thresh) and can |
1142 | * area, no need to sleep for more than 200ms: (a) 8 pages per | 1134 | * also keep "1000+ dd on a slow USB stick" under control. |
1143 | * 200ms is typically more than enough to curb heavy dirtiers; | ||
1144 | * (b) the pause time limit makes the dirtiers more responsive. | ||
1145 | */ | 1135 | */ |
1146 | if (nr_dirty < dirty_thresh) | 1136 | if (task_ratelimit) |
1137 | break; | ||
1138 | |||
1139 | if (fatal_signal_pending(current)) | ||
1147 | break; | 1140 | break; |
1148 | } | 1141 | } |
1149 | 1142 | ||
@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void) | |||
1395 | 1388 | ||
1396 | shift = calc_period_shift(); | 1389 | shift = calc_period_shift(); |
1397 | prop_descriptor_init(&vm_completions, shift); | 1390 | prop_descriptor_init(&vm_completions, shift); |
1398 | prop_descriptor_init(&vm_dirties, shift); | ||
1399 | } | 1391 | } |
1400 | 1392 | ||
1401 | /** | 1393 | /** |
@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
1724 | __inc_zone_page_state(page, NR_DIRTIED); | 1716 | __inc_zone_page_state(page, NR_DIRTIED); |
1725 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 1717 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
1726 | __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); | 1718 | __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); |
1727 | task_dirty_inc(current); | ||
1728 | task_io_account_write(PAGE_CACHE_SIZE); | 1719 | task_io_account_write(PAGE_CACHE_SIZE); |
1729 | } | 1720 | } |
1730 | } | 1721 | } |
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index ea534960a04b..12a48a88c0d8 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, | |||
50 | 50 | ||
51 | if (!pages || !bitmap) { | 51 | if (!pages || !bitmap) { |
52 | if (may_alloc && !pages) | 52 | if (may_alloc && !pages) |
53 | pages = pcpu_mem_alloc(pages_size); | 53 | pages = pcpu_mem_zalloc(pages_size); |
54 | if (may_alloc && !bitmap) | 54 | if (may_alloc && !bitmap) |
55 | bitmap = pcpu_mem_alloc(bitmap_size); | 55 | bitmap = pcpu_mem_zalloc(bitmap_size); |
56 | if (!pages || !bitmap) | 56 | if (!pages || !bitmap) |
57 | return NULL; | 57 | return NULL; |
58 | } | 58 | } |
59 | 59 | ||
60 | memset(pages, 0, pages_size); | ||
61 | bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); | 60 | bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); |
62 | 61 | ||
63 | *bitmapp = bitmap; | 62 | *bitmapp = bitmap; |
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, | |||
143 | int page_start, int page_end) | 142 | int page_start, int page_end) |
144 | { | 143 | { |
145 | flush_cache_vunmap( | 144 | flush_cache_vunmap( |
146 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | 145 | pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), |
147 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | 146 | pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); |
148 | } | 147 | } |
149 | 148 | ||
150 | static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) | 149 | static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) |
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, | |||
206 | int page_start, int page_end) | 205 | int page_start, int page_end) |
207 | { | 206 | { |
208 | flush_tlb_kernel_range( | 207 | flush_tlb_kernel_range( |
209 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | 208 | pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), |
210 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | 209 | pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); |
211 | } | 210 | } |
212 | 211 | ||
213 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, | 212 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, |
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |||
284 | int page_start, int page_end) | 283 | int page_start, int page_end) |
285 | { | 284 | { |
286 | flush_cache_vmap( | 285 | flush_cache_vmap( |
287 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | 286 | pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), |
288 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | 287 | pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); |
289 | } | 288 | } |
290 | 289 | ||
291 | /** | 290 | /** |
diff --git a/mm/percpu.c b/mm/percpu.c index bf80e55dbed7..3bb810a72006 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly; | |||
116 | static int pcpu_nr_slots __read_mostly; | 116 | static int pcpu_nr_slots __read_mostly; |
117 | static size_t pcpu_chunk_struct_size __read_mostly; | 117 | static size_t pcpu_chunk_struct_size __read_mostly; |
118 | 118 | ||
119 | /* cpus with the lowest and highest unit numbers */ | 119 | /* cpus with the lowest and highest unit addresses */ |
120 | static unsigned int pcpu_first_unit_cpu __read_mostly; | 120 | static unsigned int pcpu_low_unit_cpu __read_mostly; |
121 | static unsigned int pcpu_last_unit_cpu __read_mostly; | 121 | static unsigned int pcpu_high_unit_cpu __read_mostly; |
122 | 122 | ||
123 | /* the address of the first chunk which starts with the kernel static area */ | 123 | /* the address of the first chunk which starts with the kernel static area */ |
124 | void *pcpu_base_addr __read_mostly; | 124 | void *pcpu_base_addr __read_mostly; |
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, | |||
273 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) | 273 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) |
274 | 274 | ||
275 | /** | 275 | /** |
276 | * pcpu_mem_alloc - allocate memory | 276 | * pcpu_mem_zalloc - allocate memory |
277 | * @size: bytes to allocate | 277 | * @size: bytes to allocate |
278 | * | 278 | * |
279 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, | 279 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
280 | * kzalloc() is used; otherwise, vmalloc() is used. The returned | 280 | * kzalloc() is used; otherwise, vzalloc() is used. The returned |
281 | * memory is always zeroed. | 281 | * memory is always zeroed. |
282 | * | 282 | * |
283 | * CONTEXT: | 283 | * CONTEXT: |
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, | |||
286 | * RETURNS: | 286 | * RETURNS: |
287 | * Pointer to the allocated area on success, NULL on failure. | 287 | * Pointer to the allocated area on success, NULL on failure. |
288 | */ | 288 | */ |
289 | static void *pcpu_mem_alloc(size_t size) | 289 | static void *pcpu_mem_zalloc(size_t size) |
290 | { | 290 | { |
291 | if (WARN_ON_ONCE(!slab_is_available())) | 291 | if (WARN_ON_ONCE(!slab_is_available())) |
292 | return NULL; | 292 | return NULL; |
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size) | |||
302 | * @ptr: memory to free | 302 | * @ptr: memory to free |
303 | * @size: size of the area | 303 | * @size: size of the area |
304 | * | 304 | * |
305 | * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). | 305 | * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). |
306 | */ | 306 | */ |
307 | static void pcpu_mem_free(void *ptr, size_t size) | 307 | static void pcpu_mem_free(void *ptr, size_t size) |
308 | { | 308 | { |
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |||
384 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); | 384 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); |
385 | unsigned long flags; | 385 | unsigned long flags; |
386 | 386 | ||
387 | new = pcpu_mem_alloc(new_size); | 387 | new = pcpu_mem_zalloc(new_size); |
388 | if (!new) | 388 | if (!new) |
389 | return -ENOMEM; | 389 | return -ENOMEM; |
390 | 390 | ||
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) | |||
604 | { | 604 | { |
605 | struct pcpu_chunk *chunk; | 605 | struct pcpu_chunk *chunk; |
606 | 606 | ||
607 | chunk = pcpu_mem_alloc(pcpu_chunk_struct_size); | 607 | chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); |
608 | if (!chunk) | 608 | if (!chunk) |
609 | return NULL; | 609 | return NULL; |
610 | 610 | ||
611 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); | 611 | chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * |
612 | sizeof(chunk->map[0])); | ||
612 | if (!chunk->map) { | 613 | if (!chunk->map) { |
613 | kfree(chunk); | 614 | kfree(chunk); |
614 | return NULL; | 615 | return NULL; |
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr) | |||
977 | * address. The caller is responsible for ensuring @addr stays valid | 978 | * address. The caller is responsible for ensuring @addr stays valid |
978 | * until this function finishes. | 979 | * until this function finishes. |
979 | * | 980 | * |
981 | * percpu allocator has special setup for the first chunk, which currently | ||
982 | * supports either embedding in linear address space or vmalloc mapping, | ||
983 | * and, from the second one, the backing allocator (currently either vm or | ||
984 | * km) provides translation. | ||
985 | * | ||
986 | * The addr can be tranlated simply without checking if it falls into the | ||
987 | * first chunk. But the current code reflects better how percpu allocator | ||
988 | * actually works, and the verification can discover both bugs in percpu | ||
989 | * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current | ||
990 | * code. | ||
991 | * | ||
980 | * RETURNS: | 992 | * RETURNS: |
981 | * The physical address for @addr. | 993 | * The physical address for @addr. |
982 | */ | 994 | */ |
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr) | |||
984 | { | 996 | { |
985 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | 997 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
986 | bool in_first_chunk = false; | 998 | bool in_first_chunk = false; |
987 | unsigned long first_start, first_end; | 999 | unsigned long first_low, first_high; |
988 | unsigned int cpu; | 1000 | unsigned int cpu; |
989 | 1001 | ||
990 | /* | 1002 | /* |
991 | * The following test on first_start/end isn't strictly | 1003 | * The following test on unit_low/high isn't strictly |
992 | * necessary but will speed up lookups of addresses which | 1004 | * necessary but will speed up lookups of addresses which |
993 | * aren't in the first chunk. | 1005 | * aren't in the first chunk. |
994 | */ | 1006 | */ |
995 | first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); | 1007 | first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); |
996 | first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, | 1008 | first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, |
997 | pcpu_unit_pages); | 1009 | pcpu_unit_pages); |
998 | if ((unsigned long)addr >= first_start && | 1010 | if ((unsigned long)addr >= first_low && |
999 | (unsigned long)addr < first_end) { | 1011 | (unsigned long)addr < first_high) { |
1000 | for_each_possible_cpu(cpu) { | 1012 | for_each_possible_cpu(cpu) { |
1001 | void *start = per_cpu_ptr(base, cpu); | 1013 | void *start = per_cpu_ptr(base, cpu); |
1002 | 1014 | ||
@@ -1233,7 +1245,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1233 | 1245 | ||
1234 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | 1246 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
1235 | unit_map[cpu] = UINT_MAX; | 1247 | unit_map[cpu] = UINT_MAX; |
1236 | pcpu_first_unit_cpu = NR_CPUS; | 1248 | |
1249 | pcpu_low_unit_cpu = NR_CPUS; | ||
1250 | pcpu_high_unit_cpu = NR_CPUS; | ||
1237 | 1251 | ||
1238 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { | 1252 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
1239 | const struct pcpu_group_info *gi = &ai->groups[group]; | 1253 | const struct pcpu_group_info *gi = &ai->groups[group]; |
@@ -1253,9 +1267,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1253 | unit_map[cpu] = unit + i; | 1267 | unit_map[cpu] = unit + i; |
1254 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; | 1268 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
1255 | 1269 | ||
1256 | if (pcpu_first_unit_cpu == NR_CPUS) | 1270 | /* determine low/high unit_cpu */ |
1257 | pcpu_first_unit_cpu = cpu; | 1271 | if (pcpu_low_unit_cpu == NR_CPUS || |
1258 | pcpu_last_unit_cpu = cpu; | 1272 | unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) |
1273 | pcpu_low_unit_cpu = cpu; | ||
1274 | if (pcpu_high_unit_cpu == NR_CPUS || | ||
1275 | unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) | ||
1276 | pcpu_high_unit_cpu = cpu; | ||
1259 | } | 1277 | } |
1260 | } | 1278 | } |
1261 | pcpu_nr_units = unit; | 1279 | pcpu_nr_units = unit; |
@@ -1889,7 +1907,7 @@ void __init percpu_init_late(void) | |||
1889 | 1907 | ||
1890 | BUILD_BUG_ON(size > PAGE_SIZE); | 1908 | BUILD_BUG_ON(size > PAGE_SIZE); |
1891 | 1909 | ||
1892 | map = pcpu_mem_alloc(size); | 1910 | map = pcpu_mem_zalloc(size); |
1893 | BUG_ON(!map); | 1911 | BUG_ON(!map); |
1894 | 1912 | ||
1895 | spin_lock_irqsave(&pcpu_lock, flags); | 1913 | spin_lock_irqsave(&pcpu_lock, flags); |
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s) | |||
1862 | { | 1862 | { |
1863 | struct kmem_cache_node *n = NULL; | 1863 | struct kmem_cache_node *n = NULL; |
1864 | struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); | 1864 | struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); |
1865 | struct page *page; | 1865 | struct page *page, *discard_page = NULL; |
1866 | 1866 | ||
1867 | while ((page = c->partial)) { | 1867 | while ((page = c->partial)) { |
1868 | enum slab_modes { M_PARTIAL, M_FREE }; | 1868 | enum slab_modes { M_PARTIAL, M_FREE }; |
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s) | |||
1904 | if (l == M_PARTIAL) | 1904 | if (l == M_PARTIAL) |
1905 | remove_partial(n, page); | 1905 | remove_partial(n, page); |
1906 | else | 1906 | else |
1907 | add_partial(n, page, 1); | 1907 | add_partial(n, page, |
1908 | DEACTIVATE_TO_TAIL); | ||
1908 | 1909 | ||
1909 | l = m; | 1910 | l = m; |
1910 | } | 1911 | } |
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s) | |||
1915 | "unfreezing slab")); | 1916 | "unfreezing slab")); |
1916 | 1917 | ||
1917 | if (m == M_FREE) { | 1918 | if (m == M_FREE) { |
1918 | stat(s, DEACTIVATE_EMPTY); | 1919 | page->next = discard_page; |
1919 | discard_slab(s, page); | 1920 | discard_page = page; |
1920 | stat(s, FREE_SLAB); | ||
1921 | } | 1921 | } |
1922 | } | 1922 | } |
1923 | 1923 | ||
1924 | if (n) | 1924 | if (n) |
1925 | spin_unlock(&n->list_lock); | 1925 | spin_unlock(&n->list_lock); |
1926 | |||
1927 | while (discard_page) { | ||
1928 | page = discard_page; | ||
1929 | discard_page = discard_page->next; | ||
1930 | |||
1931 | stat(s, DEACTIVATE_EMPTY); | ||
1932 | discard_slab(s, page); | ||
1933 | stat(s, FREE_SLAB); | ||
1934 | } | ||
1926 | } | 1935 | } |
1927 | 1936 | ||
1928 | /* | 1937 | /* |
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1969 | page->pobjects = pobjects; | 1978 | page->pobjects = pobjects; |
1970 | page->next = oldpage; | 1979 | page->next = oldpage; |
1971 | 1980 | ||
1972 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 1981 | } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1973 | stat(s, CPU_PARTIAL_FREE); | 1982 | stat(s, CPU_PARTIAL_FREE); |
1974 | return pobjects; | 1983 | return pobjects; |
1975 | } | 1984 | } |
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
4435 | 4444 | ||
4436 | for_each_possible_cpu(cpu) { | 4445 | for_each_possible_cpu(cpu) { |
4437 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 4446 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
4447 | int node = ACCESS_ONCE(c->node); | ||
4438 | struct page *page; | 4448 | struct page *page; |
4439 | 4449 | ||
4440 | if (!c || c->node < 0) | 4450 | if (node < 0) |
4441 | continue; | 4451 | continue; |
4442 | 4452 | page = ACCESS_ONCE(c->page); | |
4443 | if (c->page) { | 4453 | if (page) { |
4444 | if (flags & SO_TOTAL) | 4454 | if (flags & SO_TOTAL) |
4445 | x = c->page->objects; | 4455 | x = page->objects; |
4446 | else if (flags & SO_OBJECTS) | 4456 | else if (flags & SO_OBJECTS) |
4447 | x = c->page->inuse; | 4457 | x = page->inuse; |
4448 | else | 4458 | else |
4449 | x = 1; | 4459 | x = 1; |
4450 | 4460 | ||
4451 | total += x; | 4461 | total += x; |
4452 | nodes[c->node] += x; | 4462 | nodes[node] += x; |
4453 | } | 4463 | } |
4454 | page = c->partial; | 4464 | page = c->partial; |
4455 | 4465 | ||
4456 | if (page) { | 4466 | if (page) { |
4457 | x = page->pobjects; | 4467 | x = page->pobjects; |
4458 | total += x; | 4468 | total += x; |
4459 | nodes[c->node] += x; | 4469 | nodes[node] += x; |
4460 | } | 4470 | } |
4461 | per_cpu[c->node]++; | 4471 | per_cpu[node]++; |
4462 | } | 4472 | } |
4463 | } | 4473 | } |
4464 | 4474 | ||
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e5f9ece3c9a0..a1daf8227ed1 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | 19 | ||
20 | #include "br_private.h" | 20 | #include "br_private.h" |
21 | #include "br_private_stp.h" | ||
21 | 22 | ||
22 | static inline size_t br_nlmsg_size(void) | 23 | static inline size_t br_nlmsg_size(void) |
23 | { | 24 | { |
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
188 | 189 | ||
189 | p->state = new_state; | 190 | p->state = new_state; |
190 | br_log_state(p); | 191 | br_log_state(p); |
192 | |||
193 | spin_lock_bh(&p->br->lock); | ||
194 | br_port_state_selection(p->br); | ||
195 | spin_unlock_bh(&p->br->lock); | ||
196 | |||
191 | br_ifinfo_notify(RTM_NEWLINK, p); | 197 | br_ifinfo_notify(RTM_NEWLINK, p); |
192 | 198 | ||
193 | return 0; | 199 | return 0; |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index ad0a3f7cf6cc..dd147d78a588 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br) | |||
399 | struct net_bridge_port *p; | 399 | struct net_bridge_port *p; |
400 | unsigned int liveports = 0; | 400 | unsigned int liveports = 0; |
401 | 401 | ||
402 | /* Don't change port states if userspace is handling STP */ | ||
403 | if (br->stp_enabled == BR_USER_STP) | ||
404 | return; | ||
405 | |||
406 | list_for_each_entry(p, &br->port_list, list) { | 402 | list_for_each_entry(p, &br->port_list, list) { |
407 | if (p->state == BR_STATE_DISABLED) | 403 | if (p->state == BR_STATE_DISABLED) |
408 | continue; | 404 | continue; |
409 | 405 | ||
410 | if (p->port_no == br->root_port) { | 406 | /* Don't change port states if userspace is handling STP */ |
411 | p->config_pending = 0; | 407 | if (br->stp_enabled != BR_USER_STP) { |
412 | p->topology_change_ack = 0; | 408 | if (p->port_no == br->root_port) { |
413 | br_make_forwarding(p); | 409 | p->config_pending = 0; |
414 | } else if (br_is_designated_port(p)) { | 410 | p->topology_change_ack = 0; |
415 | del_timer(&p->message_age_timer); | 411 | br_make_forwarding(p); |
416 | br_make_forwarding(p); | 412 | } else if (br_is_designated_port(p)) { |
417 | } else { | 413 | del_timer(&p->message_age_timer); |
418 | p->config_pending = 0; | 414 | br_make_forwarding(p); |
419 | p->topology_change_ack = 0; | 415 | } else { |
420 | br_make_blocking(p); | 416 | p->config_pending = 0; |
417 | p->topology_change_ack = 0; | ||
418 | br_make_blocking(p); | ||
419 | } | ||
421 | } | 420 | } |
422 | 421 | ||
423 | if (p->state == BR_STATE_FORWARDING) | 422 | if (p->state == BR_STATE_FORWARDING) |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 733e46008b89..f4f3f58f5234 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -244,7 +244,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
244 | ceph_pagelist_init(req->r_trail); | 244 | ceph_pagelist_init(req->r_trail); |
245 | } | 245 | } |
246 | /* create request message; allow space for oid */ | 246 | /* create request message; allow space for oid */ |
247 | msg_size += 40; | 247 | msg_size += MAX_OBJ_NAME_SIZE; |
248 | if (snapc) | 248 | if (snapc) |
249 | msg_size += sizeof(u64) * snapc->num_snaps; | 249 | msg_size += sizeof(u64) * snapc->num_snaps; |
250 | if (use_mempool) | 250 | if (use_mempool) |
diff --git a/net/core/dev.c b/net/core/dev.c index e0c3deec59b0..f494675471a9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1387,7 +1387,7 @@ rollback: | |||
1387 | for_each_net(net) { | 1387 | for_each_net(net) { |
1388 | for_each_netdev(net, dev) { | 1388 | for_each_netdev(net, dev) { |
1389 | if (dev == last) | 1389 | if (dev == last) |
1390 | break; | 1390 | goto outroll; |
1391 | 1391 | ||
1392 | if (dev->flags & IFF_UP) { | 1392 | if (dev->flags & IFF_UP) { |
1393 | nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); | 1393 | nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); |
@@ -1398,6 +1398,7 @@ rollback: | |||
1398 | } | 1398 | } |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | outroll: | ||
1401 | raw_notifier_chain_unregister(&netdev_chain, nb); | 1402 | raw_notifier_chain_unregister(&netdev_chain, nb); |
1402 | goto unlock; | 1403 | goto unlock; |
1403 | } | 1404 | } |
@@ -4209,6 +4210,12 @@ static int dev_seq_open(struct inode *inode, struct file *file) | |||
4209 | sizeof(struct dev_iter_state)); | 4210 | sizeof(struct dev_iter_state)); |
4210 | } | 4211 | } |
4211 | 4212 | ||
4213 | int dev_seq_open_ops(struct inode *inode, struct file *file, | ||
4214 | const struct seq_operations *ops) | ||
4215 | { | ||
4216 | return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state)); | ||
4217 | } | ||
4218 | |||
4212 | static const struct file_operations dev_seq_fops = { | 4219 | static const struct file_operations dev_seq_fops = { |
4213 | .owner = THIS_MODULE, | 4220 | .owner = THIS_MODULE, |
4214 | .open = dev_seq_open, | 4221 | .open = dev_seq_open, |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 277faef9148d..febba516db62 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = { | |||
696 | 696 | ||
697 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | 697 | static int dev_mc_seq_open(struct inode *inode, struct file *file) |
698 | { | 698 | { |
699 | return seq_open_net(inode, file, &dev_mc_seq_ops, | 699 | return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); |
700 | sizeof(struct seq_net_private)); | ||
701 | } | 700 | } |
702 | 701 | ||
703 | static const struct file_operations dev_mc_seq_fops = { | 702 | static const struct file_operations dev_mc_seq_fops = { |
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c index 67f691bd4acf..d9c150cc59a9 100644 --- a/net/decnet/dn_timer.c +++ b/net/decnet/dn_timer.c | |||
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg); | |||
36 | 36 | ||
37 | void dn_start_slow_timer(struct sock *sk) | 37 | void dn_start_slow_timer(struct sock *sk) |
38 | { | 38 | { |
39 | sk->sk_timer.expires = jiffies + SLOW_INTERVAL; | 39 | setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk); |
40 | sk->sk_timer.function = dn_slow_timer; | 40 | sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); |
41 | sk->sk_timer.data = (unsigned long)sk; | ||
42 | |||
43 | add_timer(&sk->sk_timer); | ||
44 | } | 41 | } |
45 | 42 | ||
46 | void dn_stop_slow_timer(struct sock *sk) | 43 | void dn_stop_slow_timer(struct sock *sk) |
47 | { | 44 | { |
48 | del_timer(&sk->sk_timer); | 45 | sk_stop_timer(sk, &sk->sk_timer); |
49 | } | 46 | } |
50 | 47 | ||
51 | static void dn_slow_timer(unsigned long arg) | 48 | static void dn_slow_timer(unsigned long arg) |
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg) | |||
53 | struct sock *sk = (struct sock *)arg; | 50 | struct sock *sk = (struct sock *)arg; |
54 | struct dn_scp *scp = DN_SK(sk); | 51 | struct dn_scp *scp = DN_SK(sk); |
55 | 52 | ||
56 | sock_hold(sk); | ||
57 | bh_lock_sock(sk); | 53 | bh_lock_sock(sk); |
58 | 54 | ||
59 | if (sock_owned_by_user(sk)) { | 55 | if (sock_owned_by_user(sk)) { |
60 | sk->sk_timer.expires = jiffies + HZ / 10; | 56 | sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); |
61 | add_timer(&sk->sk_timer); | ||
62 | goto out; | 57 | goto out; |
63 | } | 58 | } |
64 | 59 | ||
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg) | |||
100 | scp->keepalive_fxn(sk); | 95 | scp->keepalive_fxn(sk); |
101 | } | 96 | } |
102 | 97 | ||
103 | sk->sk_timer.expires = jiffies + SLOW_INTERVAL; | 98 | sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); |
104 | |||
105 | add_timer(&sk->sk_timer); | ||
106 | out: | 99 | out: |
107 | bh_unlock_sock(sk); | 100 | bh_unlock_sock(sk); |
108 | sock_put(sk); | 101 | sock_put(sk); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c6b5092f29a1..65f01dc47565 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1490 | void __user *buffer, | 1490 | void __user *buffer, |
1491 | size_t *lenp, loff_t *ppos) | 1491 | size_t *lenp, loff_t *ppos) |
1492 | { | 1492 | { |
1493 | int old_value = *(int *)ctl->data; | ||
1493 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); | 1494 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
1495 | int new_value = *(int *)ctl->data; | ||
1494 | 1496 | ||
1495 | if (write) { | 1497 | if (write) { |
1496 | struct ipv4_devconf *cnf = ctl->extra1; | 1498 | struct ipv4_devconf *cnf = ctl->extra1; |
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1501 | 1503 | ||
1502 | if (cnf == net->ipv4.devconf_dflt) | 1504 | if (cnf == net->ipv4.devconf_dflt) |
1503 | devinet_copy_dflt_conf(net, i); | 1505 | devinet_copy_dflt_conf(net, i); |
1506 | if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1) | ||
1507 | if ((new_value == 0) && (old_value != 0)) | ||
1508 | rt_cache_flush(net, 0); | ||
1504 | } | 1509 | } |
1505 | 1510 | ||
1506 | return ret; | 1511 | return ret; |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 9899619ab9b8..4f47e064e262 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
64 | /* Change in oif may mean change in hh_len. */ | 64 | /* Change in oif may mean change in hh_len. */ |
65 | hh_len = skb_dst(skb)->dev->hard_header_len; | 65 | hh_len = skb_dst(skb)->dev->hard_header_len; |
66 | if (skb_headroom(skb) < hh_len && | 66 | if (skb_headroom(skb) < hh_len && |
67 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | 67 | pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), |
68 | 0, GFP_ATOMIC)) | ||
68 | return -1; | 69 | return -1; |
69 | 70 | ||
70 | return 0; | 71 | return 0; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 9a20663d5969..7047069cf967 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -130,6 +130,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | |||
130 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 130 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
131 | static int ip_rt_min_advmss __read_mostly = 256; | 131 | static int ip_rt_min_advmss __read_mostly = 256; |
132 | static int rt_chain_length_max __read_mostly = 20; | 132 | static int rt_chain_length_max __read_mostly = 20; |
133 | static int redirect_genid; | ||
133 | 134 | ||
134 | /* | 135 | /* |
135 | * Interface to generic destination cache. | 136 | * Interface to generic destination cache. |
@@ -415,9 +416,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
415 | else { | 416 | else { |
416 | struct rtable *r = v; | 417 | struct rtable *r = v; |
417 | struct neighbour *n; | 418 | struct neighbour *n; |
418 | int len; | 419 | int len, HHUptod; |
419 | 420 | ||
421 | rcu_read_lock(); | ||
420 | n = dst_get_neighbour(&r->dst); | 422 | n = dst_get_neighbour(&r->dst); |
423 | HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0; | ||
424 | rcu_read_unlock(); | ||
425 | |||
421 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" | 426 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" |
422 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", | 427 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", |
423 | r->dst.dev ? r->dst.dev->name : "*", | 428 | r->dst.dev ? r->dst.dev->name : "*", |
@@ -431,7 +436,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
431 | dst_metric(&r->dst, RTAX_RTTVAR)), | 436 | dst_metric(&r->dst, RTAX_RTTVAR)), |
432 | r->rt_key_tos, | 437 | r->rt_key_tos, |
433 | -1, | 438 | -1, |
434 | (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0, | 439 | HHUptod, |
435 | r->rt_spec_dst, &len); | 440 | r->rt_spec_dst, &len); |
436 | 441 | ||
437 | seq_printf(seq, "%*s\n", 127 - len, ""); | 442 | seq_printf(seq, "%*s\n", 127 - len, ""); |
@@ -836,6 +841,7 @@ static void rt_cache_invalidate(struct net *net) | |||
836 | 841 | ||
837 | get_random_bytes(&shuffle, sizeof(shuffle)); | 842 | get_random_bytes(&shuffle, sizeof(shuffle)); |
838 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); | 843 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); |
844 | redirect_genid++; | ||
839 | } | 845 | } |
840 | 846 | ||
841 | /* | 847 | /* |
@@ -1385,8 +1391,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1385 | 1391 | ||
1386 | peer = rt->peer; | 1392 | peer = rt->peer; |
1387 | if (peer) { | 1393 | if (peer) { |
1388 | if (peer->redirect_learned.a4 != new_gw) { | 1394 | if (peer->redirect_learned.a4 != new_gw || |
1395 | peer->redirect_genid != redirect_genid) { | ||
1389 | peer->redirect_learned.a4 = new_gw; | 1396 | peer->redirect_learned.a4 = new_gw; |
1397 | peer->redirect_genid = redirect_genid; | ||
1390 | atomic_inc(&__rt_peer_genid); | 1398 | atomic_inc(&__rt_peer_genid); |
1391 | } | 1399 | } |
1392 | check_peer_redir(&rt->dst, peer); | 1400 | check_peer_redir(&rt->dst, peer); |
@@ -1679,12 +1687,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
1679 | } | 1687 | } |
1680 | 1688 | ||
1681 | 1689 | ||
1682 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | 1690 | static struct rtable *ipv4_validate_peer(struct rtable *rt) |
1683 | { | 1691 | { |
1684 | struct rtable *rt = (struct rtable *) dst; | ||
1685 | |||
1686 | if (rt_is_expired(rt)) | ||
1687 | return NULL; | ||
1688 | if (rt->rt_peer_genid != rt_peer_genid()) { | 1692 | if (rt->rt_peer_genid != rt_peer_genid()) { |
1689 | struct inet_peer *peer; | 1693 | struct inet_peer *peer; |
1690 | 1694 | ||
@@ -1693,17 +1697,29 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | |||
1693 | 1697 | ||
1694 | peer = rt->peer; | 1698 | peer = rt->peer; |
1695 | if (peer) { | 1699 | if (peer) { |
1696 | check_peer_pmtu(dst, peer); | 1700 | check_peer_pmtu(&rt->dst, peer); |
1697 | 1701 | ||
1702 | if (peer->redirect_genid != redirect_genid) | ||
1703 | peer->redirect_learned.a4 = 0; | ||
1698 | if (peer->redirect_learned.a4 && | 1704 | if (peer->redirect_learned.a4 && |
1699 | peer->redirect_learned.a4 != rt->rt_gateway) { | 1705 | peer->redirect_learned.a4 != rt->rt_gateway) { |
1700 | if (check_peer_redir(dst, peer)) | 1706 | if (check_peer_redir(&rt->dst, peer)) |
1701 | return NULL; | 1707 | return NULL; |
1702 | } | 1708 | } |
1703 | } | 1709 | } |
1704 | 1710 | ||
1705 | rt->rt_peer_genid = rt_peer_genid(); | 1711 | rt->rt_peer_genid = rt_peer_genid(); |
1706 | } | 1712 | } |
1713 | return rt; | ||
1714 | } | ||
1715 | |||
1716 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | ||
1717 | { | ||
1718 | struct rtable *rt = (struct rtable *) dst; | ||
1719 | |||
1720 | if (rt_is_expired(rt)) | ||
1721 | return NULL; | ||
1722 | dst = (struct dst_entry *) ipv4_validate_peer(rt); | ||
1707 | return dst; | 1723 | return dst; |
1708 | } | 1724 | } |
1709 | 1725 | ||
@@ -1851,6 +1867,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, | |||
1851 | dst_init_metrics(&rt->dst, peer->metrics, false); | 1867 | dst_init_metrics(&rt->dst, peer->metrics, false); |
1852 | 1868 | ||
1853 | check_peer_pmtu(&rt->dst, peer); | 1869 | check_peer_pmtu(&rt->dst, peer); |
1870 | if (peer->redirect_genid != redirect_genid) | ||
1871 | peer->redirect_learned.a4 = 0; | ||
1854 | if (peer->redirect_learned.a4 && | 1872 | if (peer->redirect_learned.a4 && |
1855 | peer->redirect_learned.a4 != rt->rt_gateway) { | 1873 | peer->redirect_learned.a4 != rt->rt_gateway) { |
1856 | rt->rt_gateway = peer->redirect_learned.a4; | 1874 | rt->rt_gateway = peer->redirect_learned.a4; |
@@ -2356,6 +2374,9 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2356 | rth->rt_mark == skb->mark && | 2374 | rth->rt_mark == skb->mark && |
2357 | net_eq(dev_net(rth->dst.dev), net) && | 2375 | net_eq(dev_net(rth->dst.dev), net) && |
2358 | !rt_is_expired(rth)) { | 2376 | !rt_is_expired(rth)) { |
2377 | rth = ipv4_validate_peer(rth); | ||
2378 | if (!rth) | ||
2379 | continue; | ||
2359 | if (noref) { | 2380 | if (noref) { |
2360 | dst_use_noref(&rth->dst, jiffies); | 2381 | dst_use_noref(&rth->dst, jiffies); |
2361 | skb_dst_set_noref(skb, &rth->dst); | 2382 | skb_dst_set_noref(skb, &rth->dst); |
@@ -2731,6 +2752,9 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4) | |||
2731 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2752 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2732 | net_eq(dev_net(rth->dst.dev), net) && | 2753 | net_eq(dev_net(rth->dst.dev), net) && |
2733 | !rt_is_expired(rth)) { | 2754 | !rt_is_expired(rth)) { |
2755 | rth = ipv4_validate_peer(rth); | ||
2756 | if (!rth) | ||
2757 | continue; | ||
2734 | dst_use(&rth->dst, jiffies); | 2758 | dst_use(&rth->dst, jiffies); |
2735 | RT_CACHE_STAT_INC(out_hit); | 2759 | RT_CACHE_STAT_INC(out_hit); |
2736 | rcu_read_unlock_bh(); | 2760 | rcu_read_unlock_bh(); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b867ea23ece9..ad481b32f1e3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1164 | struct inet_sock *inet = inet_sk(sk); | 1164 | struct inet_sock *inet = inet_sk(sk); |
1165 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | 1165 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; |
1166 | struct sk_buff *skb; | 1166 | struct sk_buff *skb; |
1167 | unsigned int ulen; | 1167 | unsigned int ulen, copied; |
1168 | int peeked; | 1168 | int peeked; |
1169 | int err; | 1169 | int err; |
1170 | int is_udplite = IS_UDPLITE(sk); | 1170 | int is_udplite = IS_UDPLITE(sk); |
@@ -1186,9 +1186,10 @@ try_again: | |||
1186 | goto out; | 1186 | goto out; |
1187 | 1187 | ||
1188 | ulen = skb->len - sizeof(struct udphdr); | 1188 | ulen = skb->len - sizeof(struct udphdr); |
1189 | if (len > ulen) | 1189 | copied = len; |
1190 | len = ulen; | 1190 | if (copied > ulen) |
1191 | else if (len < ulen) | 1191 | copied = ulen; |
1192 | else if (copied < ulen) | ||
1192 | msg->msg_flags |= MSG_TRUNC; | 1193 | msg->msg_flags |= MSG_TRUNC; |
1193 | 1194 | ||
1194 | /* | 1195 | /* |
@@ -1197,14 +1198,14 @@ try_again: | |||
1197 | * coverage checksum (UDP-Lite), do it before the copy. | 1198 | * coverage checksum (UDP-Lite), do it before the copy. |
1198 | */ | 1199 | */ |
1199 | 1200 | ||
1200 | if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { | 1201 | if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { |
1201 | if (udp_lib_checksum_complete(skb)) | 1202 | if (udp_lib_checksum_complete(skb)) |
1202 | goto csum_copy_err; | 1203 | goto csum_copy_err; |
1203 | } | 1204 | } |
1204 | 1205 | ||
1205 | if (skb_csum_unnecessary(skb)) | 1206 | if (skb_csum_unnecessary(skb)) |
1206 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | 1207 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
1207 | msg->msg_iov, len); | 1208 | msg->msg_iov, copied); |
1208 | else { | 1209 | else { |
1209 | err = skb_copy_and_csum_datagram_iovec(skb, | 1210 | err = skb_copy_and_csum_datagram_iovec(skb, |
1210 | sizeof(struct udphdr), | 1211 | sizeof(struct udphdr), |
@@ -1233,7 +1234,7 @@ try_again: | |||
1233 | if (inet->cmsg_flags) | 1234 | if (inet->cmsg_flags) |
1234 | ip_cmsg_recv(msg, skb); | 1235 | ip_cmsg_recv(msg, skb); |
1235 | 1236 | ||
1236 | err = len; | 1237 | err = copied; |
1237 | if (flags & MSG_TRUNC) | 1238 | if (flags & MSG_TRUNC) |
1238 | err = ulen; | 1239 | err = ulen; |
1239 | 1240 | ||
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 29993b7079a5..18a2719003c3 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -503,7 +503,7 @@ done: | |||
503 | goto e_inval; | 503 | goto e_inval; |
504 | if (val > 255 || val < -1) | 504 | if (val > 255 || val < -1) |
505 | goto e_inval; | 505 | goto e_inval; |
506 | np->mcast_hops = val; | 506 | np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); |
507 | retv = 0; | 507 | retv = 0; |
508 | break; | 508 | break; |
509 | 509 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 84ec9db86ee0..adfe26a7fc63 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
340 | struct ipv6_pinfo *np = inet6_sk(sk); | 340 | struct ipv6_pinfo *np = inet6_sk(sk); |
341 | struct inet_sock *inet = inet_sk(sk); | 341 | struct inet_sock *inet = inet_sk(sk); |
342 | struct sk_buff *skb; | 342 | struct sk_buff *skb; |
343 | unsigned int ulen; | 343 | unsigned int ulen, copied; |
344 | int peeked; | 344 | int peeked; |
345 | int err; | 345 | int err; |
346 | int is_udplite = IS_UDPLITE(sk); | 346 | int is_udplite = IS_UDPLITE(sk); |
@@ -363,9 +363,10 @@ try_again: | |||
363 | goto out; | 363 | goto out; |
364 | 364 | ||
365 | ulen = skb->len - sizeof(struct udphdr); | 365 | ulen = skb->len - sizeof(struct udphdr); |
366 | if (len > ulen) | 366 | copied = len; |
367 | len = ulen; | 367 | if (copied > ulen) |
368 | else if (len < ulen) | 368 | copied = ulen; |
369 | else if (copied < ulen) | ||
369 | msg->msg_flags |= MSG_TRUNC; | 370 | msg->msg_flags |= MSG_TRUNC; |
370 | 371 | ||
371 | is_udp4 = (skb->protocol == htons(ETH_P_IP)); | 372 | is_udp4 = (skb->protocol == htons(ETH_P_IP)); |
@@ -376,14 +377,14 @@ try_again: | |||
376 | * coverage checksum (UDP-Lite), do it before the copy. | 377 | * coverage checksum (UDP-Lite), do it before the copy. |
377 | */ | 378 | */ |
378 | 379 | ||
379 | if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { | 380 | if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { |
380 | if (udp_lib_checksum_complete(skb)) | 381 | if (udp_lib_checksum_complete(skb)) |
381 | goto csum_copy_err; | 382 | goto csum_copy_err; |
382 | } | 383 | } |
383 | 384 | ||
384 | if (skb_csum_unnecessary(skb)) | 385 | if (skb_csum_unnecessary(skb)) |
385 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | 386 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
386 | msg->msg_iov,len); | 387 | msg->msg_iov, copied ); |
387 | else { | 388 | else { |
388 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | 389 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); |
389 | if (err == -EINVAL) | 390 | if (err == -EINVAL) |
@@ -431,7 +432,7 @@ try_again: | |||
431 | datagram_recv_ctl(sk, msg, skb); | 432 | datagram_recv_ctl(sk, msg, skb); |
432 | } | 433 | } |
433 | 434 | ||
434 | err = len; | 435 | err = copied; |
435 | if (flags & MSG_TRUNC) | 436 | if (flags & MSG_TRUNC) |
436 | err = ulen; | 437 | err = ulen; |
437 | 438 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index cf0f308abf5e..89ff8c67943e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
1072 | 1072 | ||
1073 | /* Get routing info from the tunnel socket */ | 1073 | /* Get routing info from the tunnel socket */ |
1074 | skb_dst_drop(skb); | 1074 | skb_dst_drop(skb); |
1075 | skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); | 1075 | skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); |
1076 | 1076 | ||
1077 | inet = inet_sk(sk); | 1077 | inet = inet_sk(sk); |
1078 | fl = &inet->cork.fl; | 1078 | fl = &inet->cork.fl; |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 39d72ccaffb3..556765749b9c 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
162 | return -ENOENT; | 162 | return -ENOENT; |
163 | } | 163 | } |
164 | 164 | ||
165 | /* if we're already stopping ignore any new requests to stop */ | ||
166 | if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | ||
167 | spin_unlock_bh(&sta->lock); | ||
168 | return -EALREADY; | ||
169 | } | ||
170 | |||
165 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | 171 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { |
166 | /* not even started yet! */ | 172 | /* not even started yet! */ |
167 | ieee80211_assign_tid_tx(sta, tid, NULL); | 173 | ieee80211_assign_tid_tx(sta, tid, NULL); |
@@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
170 | return 0; | 176 | return 0; |
171 | } | 177 | } |
172 | 178 | ||
179 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | ||
180 | |||
173 | spin_unlock_bh(&sta->lock); | 181 | spin_unlock_bh(&sta->lock); |
174 | 182 | ||
175 | #ifdef CONFIG_MAC80211_HT_DEBUG | 183 | #ifdef CONFIG_MAC80211_HT_DEBUG |
@@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
177 | sta->sta.addr, tid); | 185 | sta->sta.addr, tid); |
178 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 186 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
179 | 187 | ||
180 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | ||
181 | |||
182 | del_timer_sync(&tid_tx->addba_resp_timer); | 188 | del_timer_sync(&tid_tx->addba_resp_timer); |
183 | 189 | ||
184 | /* | 190 | /* |
@@ -188,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
188 | */ | 194 | */ |
189 | clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); | 195 | clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); |
190 | 196 | ||
197 | /* | ||
198 | * There might be a few packets being processed right now (on | ||
199 | * another CPU) that have already gotten past the aggregation | ||
200 | * check when it was still OPERATIONAL and consequently have | ||
201 | * IEEE80211_TX_CTL_AMPDU set. In that case, this code might | ||
202 | * call into the driver at the same time or even before the | ||
203 | * TX paths calls into it, which could confuse the driver. | ||
204 | * | ||
205 | * Wait for all currently running TX paths to finish before | ||
206 | * telling the driver. New packets will not go through since | ||
207 | * the aggregation session is no longer OPERATIONAL. | ||
208 | */ | ||
209 | synchronize_net(); | ||
210 | |||
191 | tid_tx->stop_initiator = initiator; | 211 | tid_tx->stop_initiator = initiator; |
192 | tid_tx->tx_stop = tx; | 212 | tid_tx->tx_stop = tx; |
193 | 213 | ||
@@ -753,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
753 | goto out; | 773 | goto out; |
754 | } | 774 | } |
755 | 775 | ||
756 | del_timer(&tid_tx->addba_resp_timer); | 776 | del_timer_sync(&tid_tx->addba_resp_timer); |
757 | 777 | ||
758 | #ifdef CONFIG_MAC80211_HT_DEBUG | 778 | #ifdef CONFIG_MAC80211_HT_DEBUG |
759 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); | 779 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); |
760 | #endif | 780 | #endif |
781 | |||
782 | /* | ||
783 | * addba_resp_timer may have fired before we got here, and | ||
784 | * caused WANT_STOP to be set. If the stop then was already | ||
785 | * processed further, STOPPING might be set. | ||
786 | */ | ||
787 | if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || | ||
788 | test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | ||
789 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
790 | printk(KERN_DEBUG | ||
791 | "got addBA resp for tid %d but we already gave up\n", | ||
792 | tid); | ||
793 | #endif | ||
794 | goto out; | ||
795 | } | ||
796 | |||
761 | /* | 797 | /* |
762 | * IEEE 802.11-2007 7.3.1.14: | 798 | * IEEE 802.11-2007 7.3.1.14: |
763 | * In an ADDBA Response frame, when the Status Code field | 799 | * In an ADDBA Response frame, when the Status Code field |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index e8f379692294..d5597b759ba3 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST | |||
201 | 201 | ||
202 | config NF_CONNTRACK_NETBIOS_NS | 202 | config NF_CONNTRACK_NETBIOS_NS |
203 | tristate "NetBIOS name service protocol support" | 203 | tristate "NetBIOS name service protocol support" |
204 | depends on NETFILTER_ADVANCED | ||
205 | select NF_CONNTRACK_BROADCAST | 204 | select NF_CONNTRACK_BROADCAST |
206 | help | 205 | help |
207 | NetBIOS name service requests are sent as broadcast messages from an | 206 | NetBIOS name service requests are sent as broadcast messages from an |
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 6ee10f5d59bd..37d667e3f6f8 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c | |||
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
158 | const struct ip_set_hash *h = set->data; | 158 | const struct ip_set_hash *h = set->data; |
159 | ipset_adtfn adtfn = set->variant->adt[adt]; | 159 | ipset_adtfn adtfn = set->variant->adt[adt]; |
160 | struct hash_ipport4_elem data = { }; | 160 | struct hash_ipport4_elem data = { }; |
161 | u32 ip, ip_to, p = 0, port, port_to; | 161 | u32 ip, ip_to = 0, p = 0, port, port_to; |
162 | u32 timeout = h->timeout; | 162 | u32 timeout = h->timeout; |
163 | bool with_ports = false; | 163 | bool with_ports = false; |
164 | int ret; | 164 | int ret; |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index fb90e344e907..e69e2718fbe1 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c | |||
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
162 | const struct ip_set_hash *h = set->data; | 162 | const struct ip_set_hash *h = set->data; |
163 | ipset_adtfn adtfn = set->variant->adt[adt]; | 163 | ipset_adtfn adtfn = set->variant->adt[adt]; |
164 | struct hash_ipportip4_elem data = { }; | 164 | struct hash_ipportip4_elem data = { }; |
165 | u32 ip, ip_to, p = 0, port, port_to; | 165 | u32 ip, ip_to = 0, p = 0, port, port_to; |
166 | u32 timeout = h->timeout; | 166 | u32 timeout = h->timeout; |
167 | bool with_ports = false; | 167 | bool with_ports = false; |
168 | int ret; | 168 | int ret; |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index deb3e3dfa5fc..64199b4e93c9 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
184 | const struct ip_set_hash *h = set->data; | 184 | const struct ip_set_hash *h = set->data; |
185 | ipset_adtfn adtfn = set->variant->adt[adt]; | 185 | ipset_adtfn adtfn = set->variant->adt[adt]; |
186 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; | 186 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; |
187 | u32 ip, ip_to, p = 0, port, port_to; | 187 | u32 ip, ip_to = 0, p = 0, port, port_to; |
188 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; | 188 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; |
189 | u32 timeout = h->timeout; | 189 | u32 timeout = h->timeout; |
190 | bool with_ports = false; | 190 | bool with_ports = false; |
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 6b368be937c6..b62c4148b921 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -27,22 +27,17 @@ | |||
27 | 27 | ||
28 | static DEFINE_MUTEX(nf_ct_ecache_mutex); | 28 | static DEFINE_MUTEX(nf_ct_ecache_mutex); |
29 | 29 | ||
30 | struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly; | ||
31 | EXPORT_SYMBOL_GPL(nf_conntrack_event_cb); | ||
32 | |||
33 | struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly; | ||
34 | EXPORT_SYMBOL_GPL(nf_expect_event_cb); | ||
35 | |||
36 | /* deliver cached events and clear cache entry - must be called with locally | 30 | /* deliver cached events and clear cache entry - must be called with locally |
37 | * disabled softirqs */ | 31 | * disabled softirqs */ |
38 | void nf_ct_deliver_cached_events(struct nf_conn *ct) | 32 | void nf_ct_deliver_cached_events(struct nf_conn *ct) |
39 | { | 33 | { |
34 | struct net *net = nf_ct_net(ct); | ||
40 | unsigned long events; | 35 | unsigned long events; |
41 | struct nf_ct_event_notifier *notify; | 36 | struct nf_ct_event_notifier *notify; |
42 | struct nf_conntrack_ecache *e; | 37 | struct nf_conntrack_ecache *e; |
43 | 38 | ||
44 | rcu_read_lock(); | 39 | rcu_read_lock(); |
45 | notify = rcu_dereference(nf_conntrack_event_cb); | 40 | notify = rcu_dereference(net->ct.nf_conntrack_event_cb); |
46 | if (notify == NULL) | 41 | if (notify == NULL) |
47 | goto out_unlock; | 42 | goto out_unlock; |
48 | 43 | ||
@@ -83,19 +78,20 @@ out_unlock: | |||
83 | } | 78 | } |
84 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); | 79 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); |
85 | 80 | ||
86 | int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new) | 81 | int nf_conntrack_register_notifier(struct net *net, |
82 | struct nf_ct_event_notifier *new) | ||
87 | { | 83 | { |
88 | int ret = 0; | 84 | int ret = 0; |
89 | struct nf_ct_event_notifier *notify; | 85 | struct nf_ct_event_notifier *notify; |
90 | 86 | ||
91 | mutex_lock(&nf_ct_ecache_mutex); | 87 | mutex_lock(&nf_ct_ecache_mutex); |
92 | notify = rcu_dereference_protected(nf_conntrack_event_cb, | 88 | notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, |
93 | lockdep_is_held(&nf_ct_ecache_mutex)); | 89 | lockdep_is_held(&nf_ct_ecache_mutex)); |
94 | if (notify != NULL) { | 90 | if (notify != NULL) { |
95 | ret = -EBUSY; | 91 | ret = -EBUSY; |
96 | goto out_unlock; | 92 | goto out_unlock; |
97 | } | 93 | } |
98 | RCU_INIT_POINTER(nf_conntrack_event_cb, new); | 94 | RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new); |
99 | mutex_unlock(&nf_ct_ecache_mutex); | 95 | mutex_unlock(&nf_ct_ecache_mutex); |
100 | return ret; | 96 | return ret; |
101 | 97 | ||
@@ -105,32 +101,34 @@ out_unlock: | |||
105 | } | 101 | } |
106 | EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); | 102 | EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); |
107 | 103 | ||
108 | void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new) | 104 | void nf_conntrack_unregister_notifier(struct net *net, |
105 | struct nf_ct_event_notifier *new) | ||
109 | { | 106 | { |
110 | struct nf_ct_event_notifier *notify; | 107 | struct nf_ct_event_notifier *notify; |
111 | 108 | ||
112 | mutex_lock(&nf_ct_ecache_mutex); | 109 | mutex_lock(&nf_ct_ecache_mutex); |
113 | notify = rcu_dereference_protected(nf_conntrack_event_cb, | 110 | notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, |
114 | lockdep_is_held(&nf_ct_ecache_mutex)); | 111 | lockdep_is_held(&nf_ct_ecache_mutex)); |
115 | BUG_ON(notify != new); | 112 | BUG_ON(notify != new); |
116 | RCU_INIT_POINTER(nf_conntrack_event_cb, NULL); | 113 | RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); |
117 | mutex_unlock(&nf_ct_ecache_mutex); | 114 | mutex_unlock(&nf_ct_ecache_mutex); |
118 | } | 115 | } |
119 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); | 116 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); |
120 | 117 | ||
121 | int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new) | 118 | int nf_ct_expect_register_notifier(struct net *net, |
119 | struct nf_exp_event_notifier *new) | ||
122 | { | 120 | { |
123 | int ret = 0; | 121 | int ret = 0; |
124 | struct nf_exp_event_notifier *notify; | 122 | struct nf_exp_event_notifier *notify; |
125 | 123 | ||
126 | mutex_lock(&nf_ct_ecache_mutex); | 124 | mutex_lock(&nf_ct_ecache_mutex); |
127 | notify = rcu_dereference_protected(nf_expect_event_cb, | 125 | notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, |
128 | lockdep_is_held(&nf_ct_ecache_mutex)); | 126 | lockdep_is_held(&nf_ct_ecache_mutex)); |
129 | if (notify != NULL) { | 127 | if (notify != NULL) { |
130 | ret = -EBUSY; | 128 | ret = -EBUSY; |
131 | goto out_unlock; | 129 | goto out_unlock; |
132 | } | 130 | } |
133 | RCU_INIT_POINTER(nf_expect_event_cb, new); | 131 | RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new); |
134 | mutex_unlock(&nf_ct_ecache_mutex); | 132 | mutex_unlock(&nf_ct_ecache_mutex); |
135 | return ret; | 133 | return ret; |
136 | 134 | ||
@@ -140,15 +138,16 @@ out_unlock: | |||
140 | } | 138 | } |
141 | EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); | 139 | EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); |
142 | 140 | ||
143 | void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new) | 141 | void nf_ct_expect_unregister_notifier(struct net *net, |
142 | struct nf_exp_event_notifier *new) | ||
144 | { | 143 | { |
145 | struct nf_exp_event_notifier *notify; | 144 | struct nf_exp_event_notifier *notify; |
146 | 145 | ||
147 | mutex_lock(&nf_ct_ecache_mutex); | 146 | mutex_lock(&nf_ct_ecache_mutex); |
148 | notify = rcu_dereference_protected(nf_expect_event_cb, | 147 | notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, |
149 | lockdep_is_held(&nf_ct_ecache_mutex)); | 148 | lockdep_is_held(&nf_ct_ecache_mutex)); |
150 | BUG_ON(notify != new); | 149 | BUG_ON(notify != new); |
151 | RCU_INIT_POINTER(nf_expect_event_cb, NULL); | 150 | RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); |
152 | mutex_unlock(&nf_ct_ecache_mutex); | 151 | mutex_unlock(&nf_ct_ecache_mutex); |
153 | } | 152 | } |
154 | EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); | 153 | EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index e58aa9b1fe8a..ef21b221f036 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> | 4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> |
5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> | 5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> |
6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> | 6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> |
7 | * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> | 7 | * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org> |
8 | * | 8 | * |
9 | * Initial connection tracking via netlink development funded and | 9 | * Initial connection tracking via netlink development funded and |
10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink"); | |||
2163 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 2163 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
2164 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | 2164 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); |
2165 | 2165 | ||
2166 | static int __net_init ctnetlink_net_init(struct net *net) | ||
2167 | { | ||
2168 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2169 | int ret; | ||
2170 | |||
2171 | ret = nf_conntrack_register_notifier(net, &ctnl_notifier); | ||
2172 | if (ret < 0) { | ||
2173 | pr_err("ctnetlink_init: cannot register notifier.\n"); | ||
2174 | goto err_out; | ||
2175 | } | ||
2176 | |||
2177 | ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp); | ||
2178 | if (ret < 0) { | ||
2179 | pr_err("ctnetlink_init: cannot expect register notifier.\n"); | ||
2180 | goto err_unreg_notifier; | ||
2181 | } | ||
2182 | #endif | ||
2183 | return 0; | ||
2184 | |||
2185 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2186 | err_unreg_notifier: | ||
2187 | nf_conntrack_unregister_notifier(net, &ctnl_notifier); | ||
2188 | err_out: | ||
2189 | return ret; | ||
2190 | #endif | ||
2191 | } | ||
2192 | |||
2193 | static void ctnetlink_net_exit(struct net *net) | ||
2194 | { | ||
2195 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2196 | nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp); | ||
2197 | nf_conntrack_unregister_notifier(net, &ctnl_notifier); | ||
2198 | #endif | ||
2199 | } | ||
2200 | |||
2201 | static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list) | ||
2202 | { | ||
2203 | struct net *net; | ||
2204 | |||
2205 | list_for_each_entry(net, net_exit_list, exit_list) | ||
2206 | ctnetlink_net_exit(net); | ||
2207 | } | ||
2208 | |||
2209 | static struct pernet_operations ctnetlink_net_ops = { | ||
2210 | .init = ctnetlink_net_init, | ||
2211 | .exit_batch = ctnetlink_net_exit_batch, | ||
2212 | }; | ||
2213 | |||
2166 | static int __init ctnetlink_init(void) | 2214 | static int __init ctnetlink_init(void) |
2167 | { | 2215 | { |
2168 | int ret; | 2216 | int ret; |
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void) | |||
2180 | goto err_unreg_subsys; | 2228 | goto err_unreg_subsys; |
2181 | } | 2229 | } |
2182 | 2230 | ||
2183 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2231 | if (register_pernet_subsys(&ctnetlink_net_ops)) { |
2184 | ret = nf_conntrack_register_notifier(&ctnl_notifier); | 2232 | pr_err("ctnetlink_init: cannot register pernet operations\n"); |
2185 | if (ret < 0) { | ||
2186 | pr_err("ctnetlink_init: cannot register notifier.\n"); | ||
2187 | goto err_unreg_exp_subsys; | 2233 | goto err_unreg_exp_subsys; |
2188 | } | 2234 | } |
2189 | 2235 | ||
2190 | ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); | ||
2191 | if (ret < 0) { | ||
2192 | pr_err("ctnetlink_init: cannot expect register notifier.\n"); | ||
2193 | goto err_unreg_notifier; | ||
2194 | } | ||
2195 | #endif | ||
2196 | |||
2197 | return 0; | 2236 | return 0; |
2198 | 2237 | ||
2199 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2200 | err_unreg_notifier: | ||
2201 | nf_conntrack_unregister_notifier(&ctnl_notifier); | ||
2202 | err_unreg_exp_subsys: | 2238 | err_unreg_exp_subsys: |
2203 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2239 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2204 | #endif | ||
2205 | err_unreg_subsys: | 2240 | err_unreg_subsys: |
2206 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2241 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2207 | err_out: | 2242 | err_out: |
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void) | |||
2213 | pr_info("ctnetlink: unregistering from nfnetlink.\n"); | 2248 | pr_info("ctnetlink: unregistering from nfnetlink.\n"); |
2214 | 2249 | ||
2215 | nf_ct_remove_userspace_expectations(); | 2250 | nf_ct_remove_userspace_expectations(); |
2216 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2251 | unregister_pernet_subsys(&ctnetlink_net_ops); |
2217 | nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); | ||
2218 | nf_conntrack_unregister_notifier(&ctnl_notifier); | ||
2219 | #endif | ||
2220 | |||
2221 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2252 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2222 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2253 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2223 | } | 2254 | } |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 3735297c524d..5952237c0c86 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
111 | struct netlbl_domaddr_map *addrmap = NULL; | 111 | struct netlbl_domaddr_map *addrmap = NULL; |
112 | struct netlbl_domaddr4_map *map4 = NULL; | 112 | struct netlbl_domaddr4_map *map4 = NULL; |
113 | struct netlbl_domaddr6_map *map6 = NULL; | 113 | struct netlbl_domaddr6_map *map6 = NULL; |
114 | const struct in_addr *addr4, *mask4; | ||
115 | const struct in6_addr *addr6, *mask6; | ||
116 | 114 | ||
117 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 115 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
118 | if (entry == NULL) | 116 | if (entry == NULL) |
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
133 | INIT_LIST_HEAD(&addrmap->list6); | 131 | INIT_LIST_HEAD(&addrmap->list6); |
134 | 132 | ||
135 | switch (family) { | 133 | switch (family) { |
136 | case AF_INET: | 134 | case AF_INET: { |
137 | addr4 = addr; | 135 | const struct in_addr *addr4 = addr; |
138 | mask4 = mask; | 136 | const struct in_addr *mask4 = mask; |
139 | map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); | 137 | map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); |
140 | if (map4 == NULL) | 138 | if (map4 == NULL) |
141 | goto cfg_unlbl_map_add_failure; | 139 | goto cfg_unlbl_map_add_failure; |
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
148 | if (ret_val != 0) | 146 | if (ret_val != 0) |
149 | goto cfg_unlbl_map_add_failure; | 147 | goto cfg_unlbl_map_add_failure; |
150 | break; | 148 | break; |
151 | case AF_INET6: | 149 | } |
152 | addr6 = addr; | 150 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
153 | mask6 = mask; | 151 | case AF_INET6: { |
152 | const struct in6_addr *addr6 = addr; | ||
153 | const struct in6_addr *mask6 = mask; | ||
154 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); | 154 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); |
155 | if (map6 == NULL) | 155 | if (map6 == NULL) |
156 | goto cfg_unlbl_map_add_failure; | 156 | goto cfg_unlbl_map_add_failure; |
@@ -167,6 +167,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
167 | if (ret_val != 0) | 167 | if (ret_val != 0) |
168 | goto cfg_unlbl_map_add_failure; | 168 | goto cfg_unlbl_map_add_failure; |
169 | break; | 169 | break; |
170 | } | ||
171 | #endif /* IPv6 */ | ||
170 | default: | 172 | default: |
171 | goto cfg_unlbl_map_add_failure; | 173 | goto cfg_unlbl_map_add_failure; |
172 | break; | 174 | break; |
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net, | |||
225 | case AF_INET: | 227 | case AF_INET: |
226 | addr_len = sizeof(struct in_addr); | 228 | addr_len = sizeof(struct in_addr); |
227 | break; | 229 | break; |
230 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
228 | case AF_INET6: | 231 | case AF_INET6: |
229 | addr_len = sizeof(struct in6_addr); | 232 | addr_len = sizeof(struct in6_addr); |
230 | break; | 233 | break; |
234 | #endif /* IPv6 */ | ||
231 | default: | 235 | default: |
232 | return -EPFNOSUPPORT; | 236 | return -EPFNOSUPPORT; |
233 | } | 237 | } |
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net, | |||
266 | case AF_INET: | 270 | case AF_INET: |
267 | addr_len = sizeof(struct in_addr); | 271 | addr_len = sizeof(struct in_addr); |
268 | break; | 272 | break; |
273 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
269 | case AF_INET6: | 274 | case AF_INET6: |
270 | addr_len = sizeof(struct in6_addr); | 275 | addr_len = sizeof(struct in6_addr); |
271 | break; | 276 | break; |
277 | #endif /* IPv6 */ | ||
272 | default: | 278 | default: |
273 | return -EPFNOSUPPORT; | 279 | return -EPFNOSUPPORT; |
274 | } | 280 | } |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 6649463da1b6..d617161f8dd3 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
209 | ctl->Plog, ctl->Scell_log, | 209 | ctl->Plog, ctl->Scell_log, |
210 | nla_data(tb[TCA_RED_STAB])); | 210 | nla_data(tb[TCA_RED_STAB])); |
211 | 211 | ||
212 | if (skb_queue_empty(&sch->q)) | 212 | if (!q->qdisc->q.qlen) |
213 | red_end_of_idle_period(&q->parms); | 213 | red_start_of_idle_period(&q->parms); |
214 | 214 | ||
215 | sch_tree_unlock(sch); | 215 | sch_tree_unlock(sch); |
216 | return 0; | 216 | return 0; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 283bfe3de59d..ed1336e15920 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | |||
225 | 225 | ||
226 | 226 | ||
227 | static int | 227 | static int |
228 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) | 228 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, |
229 | struct net_device *dev, struct netdev_queue *txq, | ||
230 | struct neighbour *mn) | ||
229 | { | 231 | { |
230 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); | 232 | struct teql_sched_data *q = qdisc_priv(txq->qdisc); |
231 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); | ||
232 | struct neighbour *mn = dst_get_neighbour(skb_dst(skb)); | ||
233 | struct neighbour *n = q->ncache; | 233 | struct neighbour *n = q->ncache; |
234 | 234 | ||
235 | if (mn->tbl == NULL) | 235 | if (mn->tbl == NULL) |
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
262 | } | 262 | } |
263 | 263 | ||
264 | static inline int teql_resolve(struct sk_buff *skb, | 264 | static inline int teql_resolve(struct sk_buff *skb, |
265 | struct sk_buff *skb_res, struct net_device *dev) | 265 | struct sk_buff *skb_res, |
266 | struct net_device *dev, | ||
267 | struct netdev_queue *txq) | ||
266 | { | 268 | { |
267 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 269 | struct dst_entry *dst = skb_dst(skb); |
270 | struct neighbour *mn; | ||
271 | int res; | ||
272 | |||
268 | if (txq->qdisc == &noop_qdisc) | 273 | if (txq->qdisc == &noop_qdisc) |
269 | return -ENODEV; | 274 | return -ENODEV; |
270 | 275 | ||
271 | if (dev->header_ops == NULL || | 276 | if (!dev->header_ops || !dst) |
272 | skb_dst(skb) == NULL || | ||
273 | dst_get_neighbour(skb_dst(skb)) == NULL) | ||
274 | return 0; | 277 | return 0; |
275 | return __teql_resolve(skb, skb_res, dev); | 278 | |
279 | rcu_read_lock(); | ||
280 | mn = dst_get_neighbour(dst); | ||
281 | res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0; | ||
282 | rcu_read_unlock(); | ||
283 | |||
284 | return res; | ||
276 | } | 285 | } |
277 | 286 | ||
278 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 287 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -307,7 +316,7 @@ restart: | |||
307 | continue; | 316 | continue; |
308 | } | 317 | } |
309 | 318 | ||
310 | switch (teql_resolve(skb, skb_res, slave)) { | 319 | switch (teql_resolve(skb, skb_res, slave, slave_txq)) { |
311 | case 0: | 320 | case 0: |
312 | if (__netif_tx_trylock(slave_txq)) { | 321 | if (__netif_tx_trylock(slave_txq)) { |
313 | unsigned int length = qdisc_pkt_len(skb); | 322 | unsigned int length = qdisc_pkt_len(skb); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 865e68fef21c..bf812048cf6f 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) | |||
82 | struct sctp_auth_bytes *key; | 82 | struct sctp_auth_bytes *key; |
83 | 83 | ||
84 | /* Verify that we are not going to overflow INT_MAX */ | 84 | /* Verify that we are not going to overflow INT_MAX */ |
85 | if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) | 85 | if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes))) |
86 | return NULL; | 86 | return NULL; |
87 | 87 | ||
88 | /* Allocate the shared key */ | 88 | /* Allocate the shared key */ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d7f97ef26590..55472c48825e 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task) | |||
496 | struct rpc_rqst *req = task->tk_rqstp; | 496 | struct rpc_rqst *req = task->tk_rqstp; |
497 | struct rpc_xprt *xprt = req->rq_xprt; | 497 | struct rpc_xprt *xprt = req->rq_xprt; |
498 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 498 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
499 | int ret = 0; | 499 | int ret = -EAGAIN; |
500 | 500 | ||
501 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", | 501 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", |
502 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 502 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task) | |||
508 | /* Don't race with disconnect */ | 508 | /* Don't race with disconnect */ |
509 | if (xprt_connected(xprt)) { | 509 | if (xprt_connected(xprt)) { |
510 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { | 510 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { |
511 | ret = -EAGAIN; | ||
512 | /* | 511 | /* |
513 | * Notify TCP that we're limited by the application | 512 | * Notify TCP that we're limited by the application |
514 | * window size | 513 | * window size |
@@ -2530,8 +2529,10 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | |||
2530 | int err; | 2529 | int err; |
2531 | err = xs_init_anyaddr(args->dstaddr->sa_family, | 2530 | err = xs_init_anyaddr(args->dstaddr->sa_family, |
2532 | (struct sockaddr *)&new->srcaddr); | 2531 | (struct sockaddr *)&new->srcaddr); |
2533 | if (err != 0) | 2532 | if (err != 0) { |
2533 | xprt_free(xprt); | ||
2534 | return ERR_PTR(err); | 2534 | return ERR_PTR(err); |
2535 | } | ||
2535 | } | 2536 | } |
2536 | 2537 | ||
2537 | return xprt; | 2538 | return xprt; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 466fbcc5cf77..b595a3d8679f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1957 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1957 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1958 | (UNIXCB(skb).cred != siocb->scm->cred)) { | 1958 | (UNIXCB(skb).cred != siocb->scm->cred)) { |
1959 | skb_queue_head(&sk->sk_receive_queue, skb); | 1959 | skb_queue_head(&sk->sk_receive_queue, skb); |
1960 | sk->sk_data_ready(sk, skb->len); | ||
1960 | break; | 1961 | break; |
1961 | } | 1962 | } |
1962 | } else { | 1963 | } else { |
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1974 | chunk = min_t(unsigned int, skb->len, size); | 1975 | chunk = min_t(unsigned int, skb->len, size); |
1975 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { | 1976 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { |
1976 | skb_queue_head(&sk->sk_receive_queue, skb); | 1977 | skb_queue_head(&sk->sk_receive_queue, skb); |
1978 | sk->sk_data_ready(sk, skb->len); | ||
1977 | if (copied == 0) | 1979 | if (copied == 0) |
1978 | copied = -EFAULT; | 1980 | copied = -EFAULT; |
1979 | break; | 1981 | break; |
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1991 | /* put the skb back if we didn't use it up.. */ | 1993 | /* put the skb back if we didn't use it up.. */ |
1992 | if (skb->len) { | 1994 | if (skb->len) { |
1993 | skb_queue_head(&sk->sk_receive_queue, skb); | 1995 | skb_queue_head(&sk->sk_receive_queue, skb); |
1996 | sk->sk_data_ready(sk, skb->len); | ||
1994 | break; | 1997 | break; |
1995 | } | 1998 | } |
1996 | 1999 | ||
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
2006 | 2009 | ||
2007 | /* put message back and return */ | 2010 | /* put message back and return */ |
2008 | skb_queue_head(&sk->sk_receive_queue, skb); | 2011 | skb_queue_head(&sk->sk_receive_queue, skb); |
2012 | sk->sk_data_ready(sk, skb->len); | ||
2009 | break; | 2013 | break; |
2010 | } | 2014 | } |
2011 | } while (size); | 2015 | } while (size); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a1cabde7cb5f..eee9ccc7adaf 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { | |||
89 | [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, | 89 | [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, |
90 | [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, | 90 | [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, |
91 | 91 | ||
92 | [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, | 92 | [NL80211_ATTR_MAC] = { .len = ETH_ALEN }, |
93 | [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, | 93 | [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN }, |
94 | 94 | ||
95 | [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, | 95 | [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, |
96 | [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, | 96 | [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, |
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c index e083122ca55a..dbf94b189e75 100644 --- a/sound/pci/cs5535audio/cs5535audio_pcm.c +++ b/sound/pci/cs5535audio/cs5535audio_pcm.c | |||
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au, | |||
148 | struct cs5535audio_dma_desc *desc = | 148 | struct cs5535audio_dma_desc *desc = |
149 | &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i]; | 149 | &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i]; |
150 | desc->addr = cpu_to_le32(addr); | 150 | desc->addr = cpu_to_le32(addr); |
151 | desc->size = cpu_to_le32(period_bytes); | 151 | desc->size = cpu_to_le16(period_bytes); |
152 | desc->ctlreserved = cpu_to_le16(PRD_EOP); | 152 | desc->ctlreserved = cpu_to_le16(PRD_EOP); |
153 | desc_addr += sizeof(struct cs5535audio_dma_desc); | 153 | desc_addr += sizeof(struct cs5535audio_dma_desc); |
154 | addr += period_bytes; | 154 | addr += period_bytes; |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index e44b107fdc75..4562e9de6a1a 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec, | |||
4046 | 4046 | ||
4047 | /* Search for codec ID */ | 4047 | /* Search for codec ID */ |
4048 | for (q = tbl; q->subvendor; q++) { | 4048 | for (q = tbl; q->subvendor; q++) { |
4049 | unsigned long vendorid = (q->subdevice) | (q->subvendor << 16); | 4049 | unsigned int mask = 0xffff0000 | q->subdevice_mask; |
4050 | 4050 | unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask; | |
4051 | if (vendorid == codec->subsystem_id) | 4051 | if ((codec->subsystem_id & mask) == id) |
4052 | break; | 4052 | break; |
4053 | } | 4053 | } |
4054 | 4054 | ||
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 7ae7578bdcc0..c1da422e085a 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c | |||
@@ -347,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld, | |||
347 | 347 | ||
348 | for (i = 0; i < size; i++) { | 348 | for (i = 0; i < size; i++) { |
349 | unsigned int val = hdmi_get_eld_data(codec, nid, i); | 349 | unsigned int val = hdmi_get_eld_data(codec, nid, i); |
350 | /* | ||
351 | * Graphics driver might be writing to ELD buffer right now. | ||
352 | * Just abort. The caller will repoll after a while. | ||
353 | */ | ||
350 | if (!(val & AC_ELDD_ELD_VALID)) { | 354 | if (!(val & AC_ELDD_ELD_VALID)) { |
351 | if (!i) { | ||
352 | snd_printd(KERN_INFO | ||
353 | "HDMI: invalid ELD data\n"); | ||
354 | ret = -EINVAL; | ||
355 | goto error; | ||
356 | } | ||
357 | snd_printd(KERN_INFO | 355 | snd_printd(KERN_INFO |
358 | "HDMI: invalid ELD data byte %d\n", i); | 356 | "HDMI: invalid ELD data byte %d\n", i); |
359 | val = 0; | 357 | ret = -EINVAL; |
360 | } else | 358 | goto error; |
361 | val &= AC_ELDD_ELD_DATA; | 359 | } |
360 | val &= AC_ELDD_ELD_DATA; | ||
361 | /* | ||
362 | * The first byte cannot be zero. This can happen on some DVI | ||
363 | * connections. Some Intel chips may also need some 250ms delay | ||
364 | * to return non-zero ELD data, even when the graphics driver | ||
365 | * correctly writes ELD content before setting ELD_valid bit. | ||
366 | */ | ||
367 | if (!val && !i) { | ||
368 | snd_printdd(KERN_INFO "HDMI: 0 ELD data\n"); | ||
369 | ret = -EINVAL; | ||
370 | goto error; | ||
371 | } | ||
362 | buf[i] = val; | 372 | buf[i] = val; |
363 | } | 373 | } |
364 | 374 | ||
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 2fbab8e29576..70a7abda7e22 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -58,6 +58,8 @@ struct cs_spec { | |||
58 | unsigned int gpio_mask; | 58 | unsigned int gpio_mask; |
59 | unsigned int gpio_dir; | 59 | unsigned int gpio_dir; |
60 | unsigned int gpio_data; | 60 | unsigned int gpio_data; |
61 | unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */ | ||
62 | unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */ | ||
61 | 63 | ||
62 | struct hda_pcm pcm_rec[2]; /* PCM information */ | 64 | struct hda_pcm pcm_rec[2]; /* PCM information */ |
63 | 65 | ||
@@ -76,6 +78,7 @@ enum { | |||
76 | CS420X_MBP53, | 78 | CS420X_MBP53, |
77 | CS420X_MBP55, | 79 | CS420X_MBP55, |
78 | CS420X_IMAC27, | 80 | CS420X_IMAC27, |
81 | CS420X_APPLE, | ||
79 | CS420X_AUTO, | 82 | CS420X_AUTO, |
80 | CS420X_MODELS | 83 | CS420X_MODELS |
81 | }; | 84 | }; |
@@ -928,10 +931,9 @@ static void cs_automute(struct hda_codec *codec) | |||
928 | spdif_present ? 0 : PIN_OUT); | 931 | spdif_present ? 0 : PIN_OUT); |
929 | } | 932 | } |
930 | } | 933 | } |
931 | if (spec->board_config == CS420X_MBP53 || | 934 | if (spec->gpio_eapd_hp) { |
932 | spec->board_config == CS420X_MBP55 || | 935 | unsigned int gpio = hp_present ? |
933 | spec->board_config == CS420X_IMAC27) { | 936 | spec->gpio_eapd_hp : spec->gpio_eapd_speaker; |
934 | unsigned int gpio = hp_present ? 0x02 : 0x08; | ||
935 | snd_hda_codec_write(codec, 0x01, 0, | 937 | snd_hda_codec_write(codec, 0x01, 0, |
936 | AC_VERB_SET_GPIO_DATA, gpio); | 938 | AC_VERB_SET_GPIO_DATA, gpio); |
937 | } | 939 | } |
@@ -1276,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = { | |||
1276 | [CS420X_MBP53] = "mbp53", | 1278 | [CS420X_MBP53] = "mbp53", |
1277 | [CS420X_MBP55] = "mbp55", | 1279 | [CS420X_MBP55] = "mbp55", |
1278 | [CS420X_IMAC27] = "imac27", | 1280 | [CS420X_IMAC27] = "imac27", |
1281 | [CS420X_APPLE] = "apple", | ||
1279 | [CS420X_AUTO] = "auto", | 1282 | [CS420X_AUTO] = "auto", |
1280 | }; | 1283 | }; |
1281 | 1284 | ||
@@ -1285,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = { | |||
1285 | SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55), | 1288 | SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55), |
1286 | SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55), | 1289 | SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55), |
1287 | SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55), | 1290 | SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55), |
1288 | SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27), | 1291 | /* this conflicts with too many other models */ |
1292 | /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ | ||
1293 | {} /* terminator */ | ||
1294 | }; | ||
1295 | |||
1296 | static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = { | ||
1297 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), | ||
1289 | {} /* terminator */ | 1298 | {} /* terminator */ |
1290 | }; | 1299 | }; |
1291 | 1300 | ||
@@ -1367,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec) | |||
1367 | spec->board_config = | 1376 | spec->board_config = |
1368 | snd_hda_check_board_config(codec, CS420X_MODELS, | 1377 | snd_hda_check_board_config(codec, CS420X_MODELS, |
1369 | cs420x_models, cs420x_cfg_tbl); | 1378 | cs420x_models, cs420x_cfg_tbl); |
1379 | if (spec->board_config < 0) | ||
1380 | spec->board_config = | ||
1381 | snd_hda_check_board_codec_sid_config(codec, | ||
1382 | CS420X_MODELS, NULL, cs420x_codec_cfg_tbl); | ||
1370 | if (spec->board_config >= 0) | 1383 | if (spec->board_config >= 0) |
1371 | fix_pincfg(codec, spec->board_config, cs_pincfgs); | 1384 | fix_pincfg(codec, spec->board_config, cs_pincfgs); |
1372 | 1385 | ||
@@ -1374,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec) | |||
1374 | case CS420X_IMAC27: | 1387 | case CS420X_IMAC27: |
1375 | case CS420X_MBP53: | 1388 | case CS420X_MBP53: |
1376 | case CS420X_MBP55: | 1389 | case CS420X_MBP55: |
1377 | /* GPIO1 = headphones */ | 1390 | case CS420X_APPLE: |
1378 | /* GPIO3 = speakers */ | 1391 | spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */ |
1379 | spec->gpio_mask = 0x0a; | 1392 | spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */ |
1380 | spec->gpio_dir = 0x0a; | 1393 | spec->gpio_mask = spec->gpio_dir = |
1394 | spec->gpio_eapd_hp | spec->gpio_eapd_speaker; | ||
1381 | break; | 1395 | break; |
1382 | } | 1396 | } |
1383 | 1397 | ||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 9850c5b481ea..c505fd5d338c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -69,6 +69,7 @@ struct hdmi_spec_per_pin { | |||
69 | struct hda_codec *codec; | 69 | struct hda_codec *codec; |
70 | struct hdmi_eld sink_eld; | 70 | struct hdmi_eld sink_eld; |
71 | struct delayed_work work; | 71 | struct delayed_work work; |
72 | int repoll_count; | ||
72 | }; | 73 | }; |
73 | 74 | ||
74 | struct hdmi_spec { | 75 | struct hdmi_spec { |
@@ -748,7 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx, | |||
748 | * Unsolicited events | 749 | * Unsolicited events |
749 | */ | 750 | */ |
750 | 751 | ||
751 | static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry); | 752 | static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll); |
752 | 753 | ||
753 | static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) | 754 | static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) |
754 | { | 755 | { |
@@ -766,7 +767,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) | |||
766 | if (pin_idx < 0) | 767 | if (pin_idx < 0) |
767 | return; | 768 | return; |
768 | 769 | ||
769 | hdmi_present_sense(&spec->pins[pin_idx], true); | 770 | hdmi_present_sense(&spec->pins[pin_idx], 1); |
770 | } | 771 | } |
771 | 772 | ||
772 | static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) | 773 | static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) |
@@ -960,7 +961,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx) | |||
960 | return 0; | 961 | return 0; |
961 | } | 962 | } |
962 | 963 | ||
963 | static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry) | 964 | static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) |
964 | { | 965 | { |
965 | struct hda_codec *codec = per_pin->codec; | 966 | struct hda_codec *codec = per_pin->codec; |
966 | struct hdmi_eld *eld = &per_pin->sink_eld; | 967 | struct hdmi_eld *eld = &per_pin->sink_eld; |
@@ -989,7 +990,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry) | |||
989 | if (eld_valid) { | 990 | if (eld_valid) { |
990 | if (!snd_hdmi_get_eld(eld, codec, pin_nid)) | 991 | if (!snd_hdmi_get_eld(eld, codec, pin_nid)) |
991 | snd_hdmi_show_eld(eld); | 992 | snd_hdmi_show_eld(eld); |
992 | else if (retry) { | 993 | else if (repoll) { |
993 | queue_delayed_work(codec->bus->workq, | 994 | queue_delayed_work(codec->bus->workq, |
994 | &per_pin->work, | 995 | &per_pin->work, |
995 | msecs_to_jiffies(300)); | 996 | msecs_to_jiffies(300)); |
@@ -1004,7 +1005,10 @@ static void hdmi_repoll_eld(struct work_struct *work) | |||
1004 | struct hdmi_spec_per_pin *per_pin = | 1005 | struct hdmi_spec_per_pin *per_pin = |
1005 | container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); | 1006 | container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); |
1006 | 1007 | ||
1007 | hdmi_present_sense(per_pin, false); | 1008 | if (per_pin->repoll_count++ > 6) |
1009 | per_pin->repoll_count = 0; | ||
1010 | |||
1011 | hdmi_present_sense(per_pin, per_pin->repoll_count); | ||
1008 | } | 1012 | } |
1009 | 1013 | ||
1010 | static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) | 1014 | static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) |
@@ -1235,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx) | |||
1235 | if (err < 0) | 1239 | if (err < 0) |
1236 | return err; | 1240 | return err; |
1237 | 1241 | ||
1238 | hdmi_present_sense(per_pin, false); | 1242 | hdmi_present_sense(per_pin, 0); |
1239 | return 0; | 1243 | return 0; |
1240 | } | 1244 | } |
1241 | 1245 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 336d14eb72af..cbde019d3d52 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur) | |||
277 | return false; | 277 | return false; |
278 | } | 278 | } |
279 | 279 | ||
280 | static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx) | ||
281 | { | ||
282 | return spec->capsrc_nids ? | ||
283 | spec->capsrc_nids[idx] : spec->adc_nids[idx]; | ||
284 | } | ||
285 | |||
280 | /* select the given imux item; either unmute exclusively or select the route */ | 286 | /* select the given imux item; either unmute exclusively or select the route */ |
281 | static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx, | 287 | static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx, |
282 | unsigned int idx, bool force) | 288 | unsigned int idx, bool force) |
@@ -303,8 +309,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx, | |||
303 | adc_idx = spec->dyn_adc_idx[idx]; | 309 | adc_idx = spec->dyn_adc_idx[idx]; |
304 | } | 310 | } |
305 | 311 | ||
306 | nid = spec->capsrc_nids ? | 312 | nid = get_capsrc(spec, adc_idx); |
307 | spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx]; | ||
308 | 313 | ||
309 | /* no selection? */ | 314 | /* no selection? */ |
310 | num_conns = snd_hda_get_conn_list(codec, nid, NULL); | 315 | num_conns = snd_hda_get_conn_list(codec, nid, NULL); |
@@ -1054,8 +1059,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec) | |||
1054 | spec->imux_pins[2] = spec->dock_mic_pin; | 1059 | spec->imux_pins[2] = spec->dock_mic_pin; |
1055 | for (i = 0; i < 3; i++) { | 1060 | for (i = 0; i < 3; i++) { |
1056 | strcpy(imux->items[i].label, texts[i]); | 1061 | strcpy(imux->items[i].label, texts[i]); |
1057 | if (spec->imux_pins[i]) | 1062 | if (spec->imux_pins[i]) { |
1063 | hda_nid_t pin = spec->imux_pins[i]; | ||
1064 | int c; | ||
1065 | for (c = 0; c < spec->num_adc_nids; c++) { | ||
1066 | hda_nid_t cap = get_capsrc(spec, c); | ||
1067 | int idx = get_connection_index(codec, cap, pin); | ||
1068 | if (idx >= 0) { | ||
1069 | imux->items[i].index = idx; | ||
1070 | break; | ||
1071 | } | ||
1072 | } | ||
1058 | imux->num_items = i + 1; | 1073 | imux->num_items = i + 1; |
1074 | } | ||
1059 | } | 1075 | } |
1060 | spec->num_mux_defs = 1; | 1076 | spec->num_mux_defs = 1; |
1061 | spec->input_mux = imux; | 1077 | spec->input_mux = imux; |
@@ -1957,10 +1973,8 @@ static int alc_build_controls(struct hda_codec *codec) | |||
1957 | if (!kctl) | 1973 | if (!kctl) |
1958 | kctl = snd_hda_find_mixer_ctl(codec, "Input Source"); | 1974 | kctl = snd_hda_find_mixer_ctl(codec, "Input Source"); |
1959 | for (i = 0; kctl && i < kctl->count; i++) { | 1975 | for (i = 0; kctl && i < kctl->count; i++) { |
1960 | const hda_nid_t *nids = spec->capsrc_nids; | 1976 | err = snd_hda_add_nid(codec, kctl, i, |
1961 | if (!nids) | 1977 | get_capsrc(spec, i)); |
1962 | nids = spec->adc_nids; | ||
1963 | err = snd_hda_add_nid(codec, kctl, i, nids[i]); | ||
1964 | if (err < 0) | 1978 | if (err < 0) |
1965 | return err; | 1979 | return err; |
1966 | } | 1980 | } |
@@ -2747,8 +2761,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec) | |||
2747 | } | 2761 | } |
2748 | 2762 | ||
2749 | for (c = 0; c < num_adcs; c++) { | 2763 | for (c = 0; c < num_adcs; c++) { |
2750 | hda_nid_t cap = spec->capsrc_nids ? | 2764 | hda_nid_t cap = get_capsrc(spec, c); |
2751 | spec->capsrc_nids[c] : spec->adc_nids[c]; | ||
2752 | idx = get_connection_index(codec, cap, pin); | 2765 | idx = get_connection_index(codec, cap, pin); |
2753 | if (idx >= 0) { | 2766 | if (idx >= 0) { |
2754 | spec->imux_pins[imux->num_items] = pin; | 2767 | spec->imux_pins[imux->num_items] = pin; |
@@ -3694,8 +3707,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin) | |||
3694 | if (!pin) | 3707 | if (!pin) |
3695 | return 0; | 3708 | return 0; |
3696 | for (i = 0; i < spec->num_adc_nids; i++) { | 3709 | for (i = 0; i < spec->num_adc_nids; i++) { |
3697 | hda_nid_t cap = spec->capsrc_nids ? | 3710 | hda_nid_t cap = get_capsrc(spec, i); |
3698 | spec->capsrc_nids[i] : spec->adc_nids[i]; | ||
3699 | int idx; | 3711 | int idx; |
3700 | 3712 | ||
3701 | idx = get_connection_index(codec, cap, pin); | 3713 | idx = get_connection_index(codec, cap, pin); |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 470f6f286e81..f3658658548e 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -1641,6 +1641,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = { | |||
1641 | "Alienware M17x", STAC_ALIENWARE_M17X), | 1641 | "Alienware M17x", STAC_ALIENWARE_M17X), |
1642 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, | 1642 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, |
1643 | "Alienware M17x", STAC_ALIENWARE_M17X), | 1643 | "Alienware M17x", STAC_ALIENWARE_M17X), |
1644 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, | ||
1645 | "Alienware M17x", STAC_ALIENWARE_M17X), | ||
1644 | {} /* terminator */ | 1646 | {} /* terminator */ |
1645 | }; | 1647 | }; |
1646 | 1648 | ||
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 431c0d417eeb..b5137629f8e9 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
@@ -208,6 +208,7 @@ struct via_spec { | |||
208 | /* work to check hp jack state */ | 208 | /* work to check hp jack state */ |
209 | struct hda_codec *codec; | 209 | struct hda_codec *codec; |
210 | struct delayed_work vt1708_hp_work; | 210 | struct delayed_work vt1708_hp_work; |
211 | int hp_work_active; | ||
211 | int vt1708_jack_detect; | 212 | int vt1708_jack_detect; |
212 | int vt1708_hp_present; | 213 | int vt1708_hp_present; |
213 | 214 | ||
@@ -305,27 +306,35 @@ enum { | |||
305 | static void analog_low_current_mode(struct hda_codec *codec); | 306 | static void analog_low_current_mode(struct hda_codec *codec); |
306 | static bool is_aa_path_mute(struct hda_codec *codec); | 307 | static bool is_aa_path_mute(struct hda_codec *codec); |
307 | 308 | ||
308 | static void vt1708_start_hp_work(struct via_spec *spec) | 309 | #define hp_detect_with_aa(codec) \ |
310 | (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \ | ||
311 | !is_aa_path_mute(codec)) | ||
312 | |||
313 | static void vt1708_stop_hp_work(struct via_spec *spec) | ||
309 | { | 314 | { |
310 | if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) | 315 | if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) |
311 | return; | 316 | return; |
312 | snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, | 317 | if (spec->hp_work_active) { |
313 | !spec->vt1708_jack_detect); | 318 | snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1); |
314 | if (!delayed_work_pending(&spec->vt1708_hp_work)) | 319 | cancel_delayed_work_sync(&spec->vt1708_hp_work); |
315 | schedule_delayed_work(&spec->vt1708_hp_work, | 320 | spec->hp_work_active = 0; |
316 | msecs_to_jiffies(100)); | 321 | } |
317 | } | 322 | } |
318 | 323 | ||
319 | static void vt1708_stop_hp_work(struct via_spec *spec) | 324 | static void vt1708_update_hp_work(struct via_spec *spec) |
320 | { | 325 | { |
321 | if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) | 326 | if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) |
322 | return; | 327 | return; |
323 | if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1 | 328 | if (spec->vt1708_jack_detect && |
324 | && !is_aa_path_mute(spec->codec)) | 329 | (spec->active_streams || hp_detect_with_aa(spec->codec))) { |
325 | return; | 330 | if (!spec->hp_work_active) { |
326 | snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, | 331 | snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0); |
327 | !spec->vt1708_jack_detect); | 332 | schedule_delayed_work(&spec->vt1708_hp_work, |
328 | cancel_delayed_work_sync(&spec->vt1708_hp_work); | 333 | msecs_to_jiffies(100)); |
334 | spec->hp_work_active = 1; | ||
335 | } | ||
336 | } else if (!hp_detect_with_aa(spec->codec)) | ||
337 | vt1708_stop_hp_work(spec); | ||
329 | } | 338 | } |
330 | 339 | ||
331 | static void set_widgets_power_state(struct hda_codec *codec) | 340 | static void set_widgets_power_state(struct hda_codec *codec) |
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol, | |||
343 | 352 | ||
344 | set_widgets_power_state(codec); | 353 | set_widgets_power_state(codec); |
345 | analog_low_current_mode(snd_kcontrol_chip(kcontrol)); | 354 | analog_low_current_mode(snd_kcontrol_chip(kcontrol)); |
346 | if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) { | 355 | vt1708_update_hp_work(codec->spec); |
347 | if (is_aa_path_mute(codec)) | ||
348 | vt1708_start_hp_work(codec->spec); | ||
349 | else | ||
350 | vt1708_stop_hp_work(codec->spec); | ||
351 | } | ||
352 | return change; | 356 | return change; |
353 | } | 357 | } |
354 | 358 | ||
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
1154 | spec->cur_dac_stream_tag = stream_tag; | 1158 | spec->cur_dac_stream_tag = stream_tag; |
1155 | spec->cur_dac_format = format; | 1159 | spec->cur_dac_format = format; |
1156 | mutex_unlock(&spec->config_mutex); | 1160 | mutex_unlock(&spec->config_mutex); |
1157 | vt1708_start_hp_work(spec); | 1161 | vt1708_update_hp_work(spec); |
1158 | return 0; | 1162 | return 0; |
1159 | } | 1163 | } |
1160 | 1164 | ||
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
1174 | spec->cur_hp_stream_tag = stream_tag; | 1178 | spec->cur_hp_stream_tag = stream_tag; |
1175 | spec->cur_hp_format = format; | 1179 | spec->cur_hp_format = format; |
1176 | mutex_unlock(&spec->config_mutex); | 1180 | mutex_unlock(&spec->config_mutex); |
1177 | vt1708_start_hp_work(spec); | 1181 | vt1708_update_hp_work(spec); |
1178 | return 0; | 1182 | return 0; |
1179 | } | 1183 | } |
1180 | 1184 | ||
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo, | |||
1188 | snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); | 1192 | snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); |
1189 | spec->active_streams &= ~STREAM_MULTI_OUT; | 1193 | spec->active_streams &= ~STREAM_MULTI_OUT; |
1190 | mutex_unlock(&spec->config_mutex); | 1194 | mutex_unlock(&spec->config_mutex); |
1191 | vt1708_stop_hp_work(spec); | 1195 | vt1708_update_hp_work(spec); |
1192 | return 0; | 1196 | return 0; |
1193 | } | 1197 | } |
1194 | 1198 | ||
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo, | |||
1203 | snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0); | 1207 | snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0); |
1204 | spec->active_streams &= ~STREAM_INDEP_HP; | 1208 | spec->active_streams &= ~STREAM_INDEP_HP; |
1205 | mutex_unlock(&spec->config_mutex); | 1209 | mutex_unlock(&spec->config_mutex); |
1206 | vt1708_stop_hp_work(spec); | 1210 | vt1708_update_hp_work(spec); |
1207 | return 0; | 1211 | return 0; |
1208 | } | 1212 | } |
1209 | 1213 | ||
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec) | |||
1645 | int nums; | 1649 | int nums; |
1646 | struct via_spec *spec = codec->spec; | 1650 | struct via_spec *spec = codec->spec; |
1647 | 1651 | ||
1648 | if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0]) | 1652 | if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] && |
1653 | (spec->codec_type != VT1708 || spec->vt1708_jack_detect)) | ||
1649 | present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]); | 1654 | present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]); |
1650 | 1655 | ||
1651 | if (spec->smart51_enabled) | 1656 | if (spec->smart51_enabled) |
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol, | |||
2612 | 2617 | ||
2613 | if (spec->codec_type != VT1708) | 2618 | if (spec->codec_type != VT1708) |
2614 | return 0; | 2619 | return 0; |
2615 | spec->vt1708_jack_detect = | ||
2616 | !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1); | ||
2617 | ucontrol->value.integer.value[0] = spec->vt1708_jack_detect; | 2620 | ucontrol->value.integer.value[0] = spec->vt1708_jack_detect; |
2618 | return 0; | 2621 | return 0; |
2619 | } | 2622 | } |
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol, | |||
2623 | { | 2626 | { |
2624 | struct hda_codec *codec = snd_kcontrol_chip(kcontrol); | 2627 | struct hda_codec *codec = snd_kcontrol_chip(kcontrol); |
2625 | struct via_spec *spec = codec->spec; | 2628 | struct via_spec *spec = codec->spec; |
2626 | int change; | 2629 | int val; |
2627 | 2630 | ||
2628 | if (spec->codec_type != VT1708) | 2631 | if (spec->codec_type != VT1708) |
2629 | return 0; | 2632 | return 0; |
2630 | spec->vt1708_jack_detect = ucontrol->value.integer.value[0]; | 2633 | val = !!ucontrol->value.integer.value[0]; |
2631 | change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8)) | 2634 | if (spec->vt1708_jack_detect == val) |
2632 | == !spec->vt1708_jack_detect; | 2635 | return 0; |
2633 | if (spec->vt1708_jack_detect) { | 2636 | spec->vt1708_jack_detect = val; |
2637 | if (spec->vt1708_jack_detect && | ||
2638 | snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) { | ||
2634 | mute_aa_path(codec, 1); | 2639 | mute_aa_path(codec, 1); |
2635 | notify_aa_path_ctls(codec); | 2640 | notify_aa_path_ctls(codec); |
2636 | } | 2641 | } |
2637 | return change; | 2642 | via_hp_automute(codec); |
2643 | vt1708_update_hp_work(spec); | ||
2644 | return 1; | ||
2638 | } | 2645 | } |
2639 | 2646 | ||
2640 | static const struct snd_kcontrol_new vt1708_jack_detect_ctl = { | 2647 | static const struct snd_kcontrol_new vt1708_jack_detect_ctl = { |
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec) | |||
2771 | via_auto_init_unsol_event(codec); | 2778 | via_auto_init_unsol_event(codec); |
2772 | 2779 | ||
2773 | via_hp_automute(codec); | 2780 | via_hp_automute(codec); |
2781 | vt1708_update_hp_work(spec); | ||
2774 | 2782 | ||
2775 | return 0; | 2783 | return 0; |
2776 | } | 2784 | } |
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work) | |||
2787 | spec->vt1708_hp_present ^= 1; | 2795 | spec->vt1708_hp_present ^= 1; |
2788 | via_hp_automute(spec->codec); | 2796 | via_hp_automute(spec->codec); |
2789 | } | 2797 | } |
2790 | vt1708_start_hp_work(spec); | 2798 | if (spec->vt1708_jack_detect) |
2799 | schedule_delayed_work(&spec->vt1708_hp_work, | ||
2800 | msecs_to_jiffies(100)); | ||
2791 | } | 2801 | } |
2792 | 2802 | ||
2793 | static int get_mux_nids(struct hda_codec *codec) | 2803 | static int get_mux_nids(struct hda_codec *codec) |
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c index 5c8717e29eeb..8c3e7fcefd99 100644 --- a/sound/pci/lx6464es/lx_core.c +++ b/sound/pci/lx6464es/lx_core.c | |||
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port) | |||
78 | return ioread32(address); | 78 | return ioread32(address); |
79 | } | 79 | } |
80 | 80 | ||
81 | void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len) | 81 | static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, |
82 | u32 len) | ||
82 | { | 83 | { |
83 | void __iomem *address = lx_dsp_register(chip, port); | 84 | u32 __iomem *address = lx_dsp_register(chip, port); |
84 | memcpy_fromio(data, address, len*sizeof(u32)); | 85 | int i; |
86 | |||
87 | /* we cannot use memcpy_fromio */ | ||
88 | for (i = 0; i != len; ++i) | ||
89 | data[i] = ioread32(address + i); | ||
85 | } | 90 | } |
86 | 91 | ||
87 | 92 | ||
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data) | |||
91 | iowrite32(data, address); | 96 | iowrite32(data, address); |
92 | } | 97 | } |
93 | 98 | ||
94 | void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data, | 99 | static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, |
95 | u32 len) | 100 | const u32 *data, u32 len) |
96 | { | 101 | { |
97 | void __iomem *address = lx_dsp_register(chip, port); | 102 | u32 __iomem *address = lx_dsp_register(chip, port); |
98 | memcpy_toio(address, data, len*sizeof(u32)); | 103 | int i; |
104 | |||
105 | /* we cannot use memcpy_to */ | ||
106 | for (i = 0; i != len; ++i) | ||
107 | iowrite32(data[i], address + i); | ||
99 | } | 108 | } |
100 | 109 | ||
101 | 110 | ||
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h index 1dd562980b6c..4d7ff797a646 100644 --- a/sound/pci/lx6464es/lx_core.h +++ b/sound/pci/lx6464es/lx_core.h | |||
@@ -72,10 +72,7 @@ enum { | |||
72 | }; | 72 | }; |
73 | 73 | ||
74 | unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port); | 74 | unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port); |
75 | void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len); | ||
76 | void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data); | 75 | void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data); |
77 | void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data, | ||
78 | u32 len); | ||
79 | 76 | ||
80 | /* plx register access */ | 77 | /* plx register access */ |
81 | enum { | 78 | enum { |
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index e760adad9523..19ee2203cbb5 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c | |||
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card, | |||
6518 | hdspm->io_type = AES32; | 6518 | hdspm->io_type = AES32; |
6519 | hdspm->card_name = "RME AES32"; | 6519 | hdspm->card_name = "RME AES32"; |
6520 | hdspm->midiPorts = 2; | 6520 | hdspm->midiPorts = 2; |
6521 | } else if ((hdspm->firmware_rev == 0xd5) || | 6521 | } else if ((hdspm->firmware_rev == 0xd2) || |
6522 | ((hdspm->firmware_rev >= 0xc8) && | 6522 | ((hdspm->firmware_rev >= 0xc8) && |
6523 | (hdspm->firmware_rev <= 0xcf))) { | 6523 | (hdspm->firmware_rev <= 0xcf))) { |
6524 | hdspm->io_type = MADI; | 6524 | hdspm->io_type = MADI; |
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c index 1ccf8dd47576..45c63028b40d 100644 --- a/sound/soc/codecs/adau1373.c +++ b/sound/soc/codecs/adau1373.c | |||
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = { | |||
245 | }; | 245 | }; |
246 | 246 | ||
247 | static const unsigned int adau1373_bass_tlv[] = { | 247 | static const unsigned int adau1373_bass_tlv[] = { |
248 | TLV_DB_RANGE_HEAD(4), | 248 | TLV_DB_RANGE_HEAD(3), |
249 | 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1), | 249 | 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1), |
250 | 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0), | 250 | 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0), |
251 | 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0), | 251 | 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0), |
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 23d1bd5dadda..69fde1506fe1 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c | |||
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg) | |||
434 | { | 434 | { |
435 | int ret; | 435 | int ret; |
436 | /* Set power-down bit */ | 436 | /* Set power-down bit */ |
437 | ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN); | 437 | ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, |
438 | CS4271_MODE2_PDN); | ||
438 | if (ret < 0) | 439 | if (ret < 0) |
439 | return ret; | 440 | return ret; |
440 | return 0; | 441 | return 0; |
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec) | |||
501 | return ret; | 502 | return ret; |
502 | } | 503 | } |
503 | 504 | ||
504 | ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, | 505 | ret = snd_soc_update_bits(codec, CS4271_MODE2, |
505 | CS4271_MODE2_PDN | CS4271_MODE2_CPEN); | 506 | CS4271_MODE2_PDN | CS4271_MODE2_CPEN, |
507 | CS4271_MODE2_PDN | CS4271_MODE2_CPEN); | ||
506 | if (ret < 0) | 508 | if (ret < 0) |
507 | return ret; | 509 | return ret; |
508 | ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0); | 510 | ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0); |
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c index 27a078cbb6eb..4646e808b90a 100644 --- a/sound/soc/codecs/rt5631.c +++ b/sound/soc/codecs/rt5631.c | |||
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0); | |||
177 | static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); | 177 | static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); |
178 | /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */ | 178 | /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */ |
179 | static unsigned int mic_bst_tlv[] = { | 179 | static unsigned int mic_bst_tlv[] = { |
180 | TLV_DB_RANGE_HEAD(6), | 180 | TLV_DB_RANGE_HEAD(7), |
181 | 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), | 181 | 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), |
182 | 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0), | 182 | 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0), |
183 | 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0), | 183 | 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0), |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index d15695d1c273..bbcf921166f7 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0); | |||
365 | 365 | ||
366 | /* tlv for mic gain, 0db 20db 30db 40db */ | 366 | /* tlv for mic gain, 0db 20db 30db 40db */ |
367 | static const unsigned int mic_gain_tlv[] = { | 367 | static const unsigned int mic_gain_tlv[] = { |
368 | TLV_DB_RANGE_HEAD(4), | 368 | TLV_DB_RANGE_HEAD(2), |
369 | 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), | 369 | 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), |
370 | 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0), | 370 | 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0), |
371 | }; | 371 | }; |
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c index bb82408ab8e1..d2f37152f940 100644 --- a/sound/soc/codecs/sta32x.c +++ b/sound/soc/codecs/sta32x.c | |||
@@ -76,6 +76,8 @@ struct sta32x_priv { | |||
76 | 76 | ||
77 | unsigned int mclk; | 77 | unsigned int mclk; |
78 | unsigned int format; | 78 | unsigned int format; |
79 | |||
80 | u32 coef_shadow[STA32X_COEF_COUNT]; | ||
79 | }; | 81 | }; |
80 | 82 | ||
81 | static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1); | 83 | static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1); |
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol, | |||
227 | struct snd_ctl_elem_value *ucontrol) | 229 | struct snd_ctl_elem_value *ucontrol) |
228 | { | 230 | { |
229 | struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); | 231 | struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); |
232 | struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec); | ||
230 | int numcoef = kcontrol->private_value >> 16; | 233 | int numcoef = kcontrol->private_value >> 16; |
231 | int index = kcontrol->private_value & 0xffff; | 234 | int index = kcontrol->private_value & 0xffff; |
232 | unsigned int cfud; | 235 | unsigned int cfud; |
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol, | |||
239 | snd_soc_write(codec, STA32X_CFUD, cfud); | 242 | snd_soc_write(codec, STA32X_CFUD, cfud); |
240 | 243 | ||
241 | snd_soc_write(codec, STA32X_CFADDR2, index); | 244 | snd_soc_write(codec, STA32X_CFADDR2, index); |
245 | for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++) | ||
246 | sta32x->coef_shadow[index + i] = | ||
247 | (ucontrol->value.bytes.data[3 * i] << 16) | ||
248 | | (ucontrol->value.bytes.data[3 * i + 1] << 8) | ||
249 | | (ucontrol->value.bytes.data[3 * i + 2]); | ||
242 | for (i = 0; i < 3 * numcoef; i++) | 250 | for (i = 0; i < 3 * numcoef; i++) |
243 | snd_soc_write(codec, STA32X_B1CF1 + i, | 251 | snd_soc_write(codec, STA32X_B1CF1 + i, |
244 | ucontrol->value.bytes.data[i]); | 252 | ucontrol->value.bytes.data[i]); |
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol, | |||
252 | return 0; | 260 | return 0; |
253 | } | 261 | } |
254 | 262 | ||
263 | int sta32x_sync_coef_shadow(struct snd_soc_codec *codec) | ||
264 | { | ||
265 | struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec); | ||
266 | unsigned int cfud; | ||
267 | int i; | ||
268 | |||
269 | /* preserve reserved bits in STA32X_CFUD */ | ||
270 | cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0; | ||
271 | |||
272 | for (i = 0; i < STA32X_COEF_COUNT; i++) { | ||
273 | snd_soc_write(codec, STA32X_CFADDR2, i); | ||
274 | snd_soc_write(codec, STA32X_B1CF1, | ||
275 | (sta32x->coef_shadow[i] >> 16) & 0xff); | ||
276 | snd_soc_write(codec, STA32X_B1CF2, | ||
277 | (sta32x->coef_shadow[i] >> 8) & 0xff); | ||
278 | snd_soc_write(codec, STA32X_B1CF3, | ||
279 | (sta32x->coef_shadow[i]) & 0xff); | ||
280 | /* chip documentation does not say if the bits are | ||
281 | * self-clearing, so do it explicitly */ | ||
282 | snd_soc_write(codec, STA32X_CFUD, cfud); | ||
283 | snd_soc_write(codec, STA32X_CFUD, cfud | 0x01); | ||
284 | } | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | int sta32x_cache_sync(struct snd_soc_codec *codec) | ||
289 | { | ||
290 | unsigned int mute; | ||
291 | int rc; | ||
292 | |||
293 | if (!codec->cache_sync) | ||
294 | return 0; | ||
295 | |||
296 | /* mute during register sync */ | ||
297 | mute = snd_soc_read(codec, STA32X_MMUTE); | ||
298 | snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE); | ||
299 | sta32x_sync_coef_shadow(codec); | ||
300 | rc = snd_soc_cache_sync(codec); | ||
301 | snd_soc_write(codec, STA32X_MMUTE, mute); | ||
302 | return rc; | ||
303 | } | ||
304 | |||
255 | #define SINGLE_COEF(xname, index) \ | 305 | #define SINGLE_COEF(xname, index) \ |
256 | { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ | 306 | { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ |
257 | .info = sta32x_coefficient_info, \ | 307 | .info = sta32x_coefficient_info, \ |
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec, | |||
661 | return ret; | 711 | return ret; |
662 | } | 712 | } |
663 | 713 | ||
664 | snd_soc_cache_sync(codec); | 714 | sta32x_cache_sync(codec); |
665 | } | 715 | } |
666 | 716 | ||
667 | /* Power up to mute */ | 717 | /* Power up to mute */ |
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec) | |||
790 | STA32X_CxCFG_OM_MASK, | 840 | STA32X_CxCFG_OM_MASK, |
791 | 2 << STA32X_CxCFG_OM_SHIFT); | 841 | 2 << STA32X_CxCFG_OM_SHIFT); |
792 | 842 | ||
843 | /* initialize coefficient shadow RAM with reset values */ | ||
844 | for (i = 4; i <= 49; i += 5) | ||
845 | sta32x->coef_shadow[i] = 0x400000; | ||
846 | for (i = 50; i <= 54; i++) | ||
847 | sta32x->coef_shadow[i] = 0x7fffff; | ||
848 | sta32x->coef_shadow[55] = 0x5a9df7; | ||
849 | sta32x->coef_shadow[56] = 0x7fffff; | ||
850 | sta32x->coef_shadow[59] = 0x7fffff; | ||
851 | sta32x->coef_shadow[60] = 0x400000; | ||
852 | sta32x->coef_shadow[61] = 0x400000; | ||
853 | |||
793 | sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY); | 854 | sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY); |
794 | /* Bias level configuration will have done an extra enable */ | 855 | /* Bias level configuration will have done an extra enable */ |
795 | regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies); | 856 | regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies); |
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h index b97ee5a75667..d8e32a6262ee 100644 --- a/sound/soc/codecs/sta32x.h +++ b/sound/soc/codecs/sta32x.h | |||
@@ -19,6 +19,7 @@ | |||
19 | /* STA326 register addresses */ | 19 | /* STA326 register addresses */ |
20 | 20 | ||
21 | #define STA32X_REGISTER_COUNT 0x2d | 21 | #define STA32X_REGISTER_COUNT 0x2d |
22 | #define STA32X_COEF_COUNT 62 | ||
22 | 23 | ||
23 | #define STA32X_CONFA 0x00 | 24 | #define STA32X_CONFA 0x00 |
24 | #define STA32X_CONFB 0x01 | 25 | #define STA32X_CONFB 0x01 |
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 7e5ec03f6f8d..a7c9ae17fc7e 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c | |||
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec, | |||
453 | snd_soc_write(codec, WM8731_PWR, 0xffff); | 453 | snd_soc_write(codec, WM8731_PWR, 0xffff); |
454 | regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), | 454 | regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), |
455 | wm8731->supplies); | 455 | wm8731->supplies); |
456 | codec->cache_sync = 1; | ||
456 | break; | 457 | break; |
457 | } | 458 | } |
458 | codec->dapm.bias_level = level; | 459 | codec->dapm.bias_level = level; |
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index a9504710bb69..3a629d0d690e 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c | |||
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol, | |||
190 | struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); | 190 | struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); |
191 | u16 ioctl; | 191 | u16 ioctl; |
192 | 192 | ||
193 | if (wm8753->dai_func == ucontrol->value.integer.value[0]) | ||
194 | return 0; | ||
195 | |||
193 | if (codec->active) | 196 | if (codec->active) |
194 | return -EBUSY; | 197 | return -EBUSY; |
195 | 198 | ||
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 91d3c6dbeba3..53edd9a8c758 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec) | |||
1973 | static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0); | 1973 | static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0); |
1974 | static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0); | 1974 | static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0); |
1975 | static const unsigned int mixinpga_tlv[] = { | 1975 | static const unsigned int mixinpga_tlv[] = { |
1976 | TLV_DB_RANGE_HEAD(7), | 1976 | TLV_DB_RANGE_HEAD(5), |
1977 | 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0), | 1977 | 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0), |
1978 | 2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0), | 1978 | 2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0), |
1979 | 3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0), | 1979 | 3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0), |
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); | |||
1988 | static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); | 1988 | static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); |
1989 | static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0); | 1989 | static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0); |
1990 | static const unsigned int classd_tlv[] = { | 1990 | static const unsigned int classd_tlv[] = { |
1991 | TLV_DB_RANGE_HEAD(7), | 1991 | TLV_DB_RANGE_HEAD(2), |
1992 | 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), | 1992 | 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), |
1993 | 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), | 1993 | 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), |
1994 | }; | 1994 | }; |
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c index eec8e1435116..d1a142f48b09 100644 --- a/sound/soc/codecs/wm8993.c +++ b/sound/soc/codecs/wm8993.c | |||
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0); | |||
512 | static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0); | 512 | static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0); |
513 | static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0); | 513 | static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0); |
514 | static const unsigned int drc_max_tlv[] = { | 514 | static const unsigned int drc_max_tlv[] = { |
515 | TLV_DB_RANGE_HEAD(4), | 515 | TLV_DB_RANGE_HEAD(2), |
516 | 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0), | 516 | 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0), |
517 | 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0), | 517 | 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0), |
518 | }; | 518 | }; |
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c index 3cd35a02c28c..4a398c3bfe84 100644 --- a/sound/soc/codecs/wm9081.c +++ b/sound/soc/codecs/wm9081.c | |||
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec, | |||
807 | mdelay(100); | 807 | mdelay(100); |
808 | 808 | ||
809 | /* Normal bias enable & soft start off */ | 809 | /* Normal bias enable & soft start off */ |
810 | reg |= WM9081_BIAS_ENA; | ||
811 | reg &= ~WM9081_VMID_RAMP; | 810 | reg &= ~WM9081_VMID_RAMP; |
812 | snd_soc_write(codec, WM9081_VMID_CONTROL, reg); | 811 | snd_soc_write(codec, WM9081_VMID_CONTROL, reg); |
813 | 812 | ||
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec, | |||
818 | } | 817 | } |
819 | 818 | ||
820 | /* VMID 2*240k */ | 819 | /* VMID 2*240k */ |
821 | reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); | 820 | reg = snd_soc_read(codec, WM9081_VMID_CONTROL); |
822 | reg &= ~WM9081_VMID_SEL_MASK; | 821 | reg &= ~WM9081_VMID_SEL_MASK; |
823 | reg |= 0x04; | 822 | reg |= 0x04; |
824 | snd_soc_write(codec, WM9081_VMID_CONTROL, reg); | 823 | snd_soc_write(codec, WM9081_VMID_CONTROL, reg); |
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec, | |||
830 | break; | 829 | break; |
831 | 830 | ||
832 | case SND_SOC_BIAS_OFF: | 831 | case SND_SOC_BIAS_OFF: |
833 | /* Startup bias source */ | 832 | /* Startup bias source and disable bias */ |
834 | reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); | 833 | reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); |
835 | reg |= WM9081_BIAS_SRC; | 834 | reg |= WM9081_BIAS_SRC; |
835 | reg &= ~WM9081_BIAS_ENA; | ||
836 | snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg); | 836 | snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg); |
837 | 837 | ||
838 | /* Disable VMID and biases with soft ramping */ | 838 | /* Disable VMID with soft ramping */ |
839 | reg = snd_soc_read(codec, WM9081_VMID_CONTROL); | 839 | reg = snd_soc_read(codec, WM9081_VMID_CONTROL); |
840 | reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA); | 840 | reg &= ~WM9081_VMID_SEL_MASK; |
841 | reg |= WM9081_VMID_RAMP; | 841 | reg |= WM9081_VMID_RAMP; |
842 | snd_soc_write(codec, WM9081_VMID_CONTROL, reg); | 842 | snd_soc_write(codec, WM9081_VMID_CONTROL, reg); |
843 | 843 | ||
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c index 2b5252c9e377..f94c06057c64 100644 --- a/sound/soc/codecs/wm9090.c +++ b/sound/soc/codecs/wm9090.c | |||
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | static const unsigned int in_tlv[] = { | 179 | static const unsigned int in_tlv[] = { |
180 | TLV_DB_RANGE_HEAD(6), | 180 | TLV_DB_RANGE_HEAD(3), |
181 | 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0), | 181 | 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0), |
182 | 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0), | 182 | 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0), |
183 | 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0), | 183 | 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0), |
184 | }; | 184 | }; |
185 | static const unsigned int mix_tlv[] = { | 185 | static const unsigned int mix_tlv[] = { |
186 | TLV_DB_RANGE_HEAD(4), | 186 | TLV_DB_RANGE_HEAD(2), |
187 | 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0), | 187 | 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0), |
188 | 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0), | 188 | 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0), |
189 | }; | 189 | }; |
190 | static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); | 190 | static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); |
191 | static const unsigned int spkboost_tlv[] = { | 191 | static const unsigned int spkboost_tlv[] = { |
192 | TLV_DB_RANGE_HEAD(7), | 192 | TLV_DB_RANGE_HEAD(2), |
193 | 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), | 193 | 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), |
194 | 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), | 194 | 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), |
195 | }; | 195 | }; |
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 84f33d4ea2cd..48e61e912400 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c | |||
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0); | |||
40 | static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1); | 40 | static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1); |
41 | static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0); | 41 | static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0); |
42 | static const unsigned int spkboost_tlv[] = { | 42 | static const unsigned int spkboost_tlv[] = { |
43 | TLV_DB_RANGE_HEAD(7), | 43 | TLV_DB_RANGE_HEAD(2), |
44 | 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), | 44 | 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), |
45 | 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), | 45 | 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), |
46 | }; | 46 | }; |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 0268cf989736..83c4bd5b2dd7 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev) | |||
694 | 694 | ||
695 | /* Initialize the the device_attribute structure */ | 695 | /* Initialize the the device_attribute structure */ |
696 | dev_attr = &ssi_private->dev_attr; | 696 | dev_attr = &ssi_private->dev_attr; |
697 | sysfs_attr_init(&dev_attr->attr); | ||
697 | dev_attr->attr.name = "statistics"; | 698 | dev_attr->attr.name = "statistics"; |
698 | dev_attr->attr.mode = S_IRUGO; | 699 | dev_attr->attr.mode = S_IRUGO; |
699 | dev_attr->show = fsl_sysfs_ssi_show; | 700 | dev_attr->show = fsl_sysfs_ssi_show; |
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c index 9c0edad90d8b..a4e3237956e2 100644 --- a/sound/soc/nuc900/nuc900-ac97.c +++ b/sound/soc/nuc900/nuc900-ac97.c | |||
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev) | |||
365 | if (ret) | 365 | if (ret) |
366 | goto out3; | 366 | goto out3; |
367 | 367 | ||
368 | mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/ | 368 | /* enbale ac97 multifunction pin */ |
369 | mfp_set_groupg(nuc900_audio->dev, "nuc900-audio"); | ||
369 | 370 | ||
370 | return 0; | 371 | return 0; |
371 | 372 | ||
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 30e2befd6f2a..8b4c2535b266 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl | |||
@@ -747,6 +747,18 @@ sub __eval_option { | |||
747 | # Add space to evaluate the character before $ | 747 | # Add space to evaluate the character before $ |
748 | $option = " $option"; | 748 | $option = " $option"; |
749 | my $retval = ""; | 749 | my $retval = ""; |
750 | my $repeated = 0; | ||
751 | my $parent = 0; | ||
752 | |||
753 | foreach my $test (keys %repeat_tests) { | ||
754 | if ($i >= $test && | ||
755 | $i < $test + $repeat_tests{$test}) { | ||
756 | |||
757 | $repeated = 1; | ||
758 | $parent = $test; | ||
759 | last; | ||
760 | } | ||
761 | } | ||
750 | 762 | ||
751 | while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) { | 763 | while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) { |
752 | my $start = $1; | 764 | my $start = $1; |
@@ -760,10 +772,14 @@ sub __eval_option { | |||
760 | # otherwise see if the default OPT (without [$i]) exists. | 772 | # otherwise see if the default OPT (without [$i]) exists. |
761 | 773 | ||
762 | my $o = "$var\[$i\]"; | 774 | my $o = "$var\[$i\]"; |
775 | my $parento = "$var\[$parent\]"; | ||
763 | 776 | ||
764 | if (defined($opt{$o})) { | 777 | if (defined($opt{$o})) { |
765 | $o = $opt{$o}; | 778 | $o = $opt{$o}; |
766 | $retval = "$retval$o"; | 779 | $retval = "$retval$o"; |
780 | } elsif ($repeated && defined($opt{$parento})) { | ||
781 | $o = $opt{$parento}; | ||
782 | $retval = "$retval$o"; | ||
767 | } elsif (defined($opt{$var})) { | 783 | } elsif (defined($opt{$var})) { |
768 | $o = $opt{$var}; | 784 | $o = $opt{$var}; |
769 | $retval = "$retval$o"; | 785 | $retval = "$retval$o"; |