diff options
513 files changed, 11638 insertions, 5075 deletions
| @@ -3554,12 +3554,12 @@ E: cvance@nai.com | |||
| 3554 | D: portions of the Linux Security Module (LSM) framework and security modules | 3554 | D: portions of the Linux Security Module (LSM) framework and security modules |
| 3555 | 3555 | ||
| 3556 | N: Petr Vandrovec | 3556 | N: Petr Vandrovec |
| 3557 | E: vandrove@vc.cvut.cz | 3557 | E: petr@vandrovec.name |
| 3558 | D: Small contributions to ncpfs | 3558 | D: Small contributions to ncpfs |
| 3559 | D: Matrox framebuffer driver | 3559 | D: Matrox framebuffer driver |
| 3560 | S: Chudenicka 8 | 3560 | S: 21513 Conradia Ct |
| 3561 | S: 10200 Prague 10, Hostivar | 3561 | S: Cupertino, CA 95014 |
| 3562 | S: Czech Republic | 3562 | S: USA |
| 3563 | 3563 | ||
| 3564 | N: Thibaut Varene | 3564 | N: Thibaut Varene |
| 3565 | E: T-Bone@parisc-linux.org | 3565 | E: T-Bone@parisc-linux.org |
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt index 2df71861e578..d9271e74e488 100644 --- a/Documentation/networking/e1000.txt +++ b/Documentation/networking/e1000.txt | |||
| @@ -1,82 +1,35 @@ | |||
| 1 | Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters | 1 | Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters |
| 2 | =============================================================== | 2 | =============================================================== |
| 3 | 3 | ||
| 4 | September 26, 2006 | 4 | Intel Gigabit Linux driver. |
| 5 | 5 | Copyright(c) 1999 - 2010 Intel Corporation. | |
| 6 | 6 | ||
| 7 | Contents | 7 | Contents |
| 8 | ======== | 8 | ======== |
| 9 | 9 | ||
| 10 | - In This Release | ||
| 11 | - Identifying Your Adapter | 10 | - Identifying Your Adapter |
| 12 | - Building and Installation | ||
| 13 | - Command Line Parameters | 11 | - Command Line Parameters |
| 14 | - Speed and Duplex Configuration | 12 | - Speed and Duplex Configuration |
| 15 | - Additional Configurations | 13 | - Additional Configurations |
| 16 | - Known Issues | ||
| 17 | - Support | 14 | - Support |
| 18 | 15 | ||
| 19 | |||
| 20 | In This Release | ||
| 21 | =============== | ||
| 22 | |||
| 23 | This file describes the Linux* Base Driver for the Intel(R) PRO/1000 Family | ||
| 24 | of Adapters. This driver includes support for Itanium(R)2-based systems. | ||
| 25 | |||
| 26 | For questions related to hardware requirements, refer to the documentation | ||
| 27 | supplied with your Intel PRO/1000 adapter. All hardware requirements listed | ||
| 28 | apply to use with Linux. | ||
| 29 | |||
| 30 | The following features are now available in supported kernels: | ||
| 31 | - Native VLANs | ||
| 32 | - Channel Bonding (teaming) | ||
| 33 | - SNMP | ||
| 34 | |||
| 35 | Channel Bonding documentation can be found in the Linux kernel source: | ||
| 36 | /Documentation/networking/bonding.txt | ||
| 37 | |||
| 38 | The driver information previously displayed in the /proc filesystem is not | ||
| 39 | supported in this release. Alternatively, you can use ethtool (version 1.6 | ||
| 40 | or later), lspci, and ifconfig to obtain the same information. | ||
| 41 | |||
| 42 | Instructions on updating ethtool can be found in the section "Additional | ||
| 43 | Configurations" later in this document. | ||
| 44 | |||
| 45 | NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100 | ||
| 46 | support. | ||
| 47 | |||
| 48 | |||
| 49 | Identifying Your Adapter | 16 | Identifying Your Adapter |
| 50 | ======================== | 17 | ======================== |
| 51 | 18 | ||
| 52 | For more information on how to identify your adapter, go to the Adapter & | 19 | For more information on how to identify your adapter, go to the Adapter & |
| 53 | Driver ID Guide at: | 20 | Driver ID Guide at: |
| 54 | 21 | ||
| 55 | http://support.intel.com/support/network/adapter/pro100/21397.htm | 22 | http://support.intel.com/support/go/network/adapter/idguide.htm |
| 56 | 23 | ||
| 57 | For the latest Intel network drivers for Linux, refer to the following | 24 | For the latest Intel network drivers for Linux, refer to the following |
| 58 | website. In the search field, enter your adapter name or type, or use the | 25 | website. In the search field, enter your adapter name or type, or use the |
| 59 | networking link on the left to search for your adapter: | 26 | networking link on the left to search for your adapter: |
| 60 | 27 | ||
| 61 | http://downloadfinder.intel.com/scripts-df/support_intel.asp | 28 | http://support.intel.com/support/go/network/adapter/home.htm |
| 62 | |||
| 63 | 29 | ||
| 64 | Command Line Parameters | 30 | Command Line Parameters |
| 65 | ======================= | 31 | ======================= |
| 66 | 32 | ||
| 67 | If the driver is built as a module, the following optional parameters | ||
| 68 | are used by entering them on the command line with the modprobe command | ||
| 69 | using this syntax: | ||
| 70 | |||
| 71 | modprobe e1000 [<option>=<VAL1>,<VAL2>,...] | ||
| 72 | |||
| 73 | For example, with two PRO/1000 PCI adapters, entering: | ||
| 74 | |||
| 75 | modprobe e1000 TxDescriptors=80,128 | ||
| 76 | |||
| 77 | loads the e1000 driver with 80 TX descriptors for the first adapter and | ||
| 78 | 128 TX descriptors for the second adapter. | ||
| 79 | |||
| 80 | The default value for each parameter is generally the recommended setting, | 33 | The default value for each parameter is generally the recommended setting, |
| 81 | unless otherwise noted. | 34 | unless otherwise noted. |
| 82 | 35 | ||
| @@ -89,10 +42,6 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed | |||
| 89 | parameters, see the application note at: | 42 | parameters, see the application note at: |
| 90 | http://www.intel.com/design/network/applnots/ap450.htm | 43 | http://www.intel.com/design/network/applnots/ap450.htm |
| 91 | 44 | ||
| 92 | A descriptor describes a data buffer and attributes related to | ||
| 93 | the data buffer. This information is accessed by the hardware. | ||
| 94 | |||
| 95 | |||
| 96 | AutoNeg | 45 | AutoNeg |
| 97 | ------- | 46 | ------- |
| 98 | (Supported only on adapters with copper connections) | 47 | (Supported only on adapters with copper connections) |
| @@ -106,7 +55,6 @@ Duplex parameters must not be specified. | |||
| 106 | NOTE: Refer to the Speed and Duplex section of this readme for more | 55 | NOTE: Refer to the Speed and Duplex section of this readme for more |
| 107 | information on the AutoNeg parameter. | 56 | information on the AutoNeg parameter. |
| 108 | 57 | ||
| 109 | |||
| 110 | Duplex | 58 | Duplex |
| 111 | ------ | 59 | ------ |
| 112 | (Supported only on adapters with copper connections) | 60 | (Supported only on adapters with copper connections) |
| @@ -119,7 +67,6 @@ set to auto-negotiate, the board auto-detects the correct duplex. If the | |||
| 119 | link partner is forced (either full or half), Duplex defaults to half- | 67 | link partner is forced (either full or half), Duplex defaults to half- |
| 120 | duplex. | 68 | duplex. |
| 121 | 69 | ||
| 122 | |||
| 123 | FlowControl | 70 | FlowControl |
| 124 | ----------- | 71 | ----------- |
| 125 | Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) | 72 | Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) |
| @@ -128,16 +75,16 @@ Default Value: Reads flow control settings from the EEPROM | |||
| 128 | This parameter controls the automatic generation(Tx) and response(Rx) | 75 | This parameter controls the automatic generation(Tx) and response(Rx) |
| 129 | to Ethernet PAUSE frames. | 76 | to Ethernet PAUSE frames. |
| 130 | 77 | ||
| 131 | |||
| 132 | InterruptThrottleRate | 78 | InterruptThrottleRate |
| 133 | --------------------- | 79 | --------------------- |
| 134 | (not supported on Intel(R) 82542, 82543 or 82544-based adapters) | 80 | (not supported on Intel(R) 82542, 82543 or 82544-based adapters) |
| 135 | Valid Range: 0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative) | 81 | Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, |
| 82 | 4=simplified balancing) | ||
| 136 | Default Value: 3 | 83 | Default Value: 3 |
| 137 | 84 | ||
| 138 | The driver can limit the amount of interrupts per second that the adapter | 85 | The driver can limit the amount of interrupts per second that the adapter |
| 139 | will generate for incoming packets. It does this by writing a value to the | 86 | will generate for incoming packets. It does this by writing a value to the |
| 140 | adapter that is based on the maximum amount of interrupts that the adapter | 87 | adapter that is based on the maximum amount of interrupts that the adapter |
| 141 | will generate per second. | 88 | will generate per second. |
| 142 | 89 | ||
| 143 | Setting InterruptThrottleRate to a value greater or equal to 100 | 90 | Setting InterruptThrottleRate to a value greater or equal to 100 |
| @@ -146,37 +93,43 @@ per second, even if more packets have come in. This reduces interrupt | |||
| 146 | load on the system and can lower CPU utilization under heavy load, | 93 | load on the system and can lower CPU utilization under heavy load, |
| 147 | but will increase latency as packets are not processed as quickly. | 94 | but will increase latency as packets are not processed as quickly. |
| 148 | 95 | ||
| 149 | The default behaviour of the driver previously assumed a static | 96 | The default behaviour of the driver previously assumed a static |
| 150 | InterruptThrottleRate value of 8000, providing a good fallback value for | 97 | InterruptThrottleRate value of 8000, providing a good fallback value for |
| 151 | all traffic types,but lacking in small packet performance and latency. | 98 | all traffic types,but lacking in small packet performance and latency. |
| 152 | The hardware can handle many more small packets per second however, and | 99 | The hardware can handle many more small packets per second however, and |
| 153 | for this reason an adaptive interrupt moderation algorithm was implemented. | 100 | for this reason an adaptive interrupt moderation algorithm was implemented. |
| 154 | 101 | ||
| 155 | Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which | 102 | Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which |
| 156 | it dynamically adjusts the InterruptThrottleRate value based on the traffic | 103 | it dynamically adjusts the InterruptThrottleRate value based on the traffic |
| 157 | that it receives. After determining the type of incoming traffic in the last | 104 | that it receives. After determining the type of incoming traffic in the last |
| 158 | timeframe, it will adjust the InterruptThrottleRate to an appropriate value | 105 | timeframe, it will adjust the InterruptThrottleRate to an appropriate value |
| 159 | for that traffic. | 106 | for that traffic. |
| 160 | 107 | ||
| 161 | The algorithm classifies the incoming traffic every interval into | 108 | The algorithm classifies the incoming traffic every interval into |
| 162 | classes. Once the class is determined, the InterruptThrottleRate value is | 109 | classes. Once the class is determined, the InterruptThrottleRate value is |
| 163 | adjusted to suit that traffic type the best. There are three classes defined: | 110 | adjusted to suit that traffic type the best. There are three classes defined: |
| 164 | "Bulk traffic", for large amounts of packets of normal size; "Low latency", | 111 | "Bulk traffic", for large amounts of packets of normal size; "Low latency", |
| 165 | for small amounts of traffic and/or a significant percentage of small | 112 | for small amounts of traffic and/or a significant percentage of small |
| 166 | packets; and "Lowest latency", for almost completely small packets or | 113 | packets; and "Lowest latency", for almost completely small packets or |
| 167 | minimal traffic. | 114 | minimal traffic. |
| 168 | 115 | ||
| 169 | In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 | 116 | In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 |
| 170 | for traffic that falls in class "Bulk traffic". If traffic falls in the "Low | 117 | for traffic that falls in class "Bulk traffic". If traffic falls in the "Low |
| 171 | latency" or "Lowest latency" class, the InterruptThrottleRate is increased | 118 | latency" or "Lowest latency" class, the InterruptThrottleRate is increased |
| 172 | stepwise to 20000. This default mode is suitable for most applications. | 119 | stepwise to 20000. This default mode is suitable for most applications. |
| 173 | 120 | ||
| 174 | For situations where low latency is vital such as cluster or | 121 | For situations where low latency is vital such as cluster or |
| 175 | grid computing, the algorithm can reduce latency even more when | 122 | grid computing, the algorithm can reduce latency even more when |
| 176 | InterruptThrottleRate is set to mode 1. In this mode, which operates | 123 | InterruptThrottleRate is set to mode 1. In this mode, which operates |
| 177 | the same as mode 3, the InterruptThrottleRate will be increased stepwise to | 124 | the same as mode 3, the InterruptThrottleRate will be increased stepwise to |
| 178 | 70000 for traffic in class "Lowest latency". | 125 | 70000 for traffic in class "Lowest latency". |
| 179 | 126 | ||
| 127 | In simplified mode the interrupt rate is based on the ratio of Tx and | ||
| 128 | Rx traffic. If the bytes per second rate is approximately equal, the | ||
| 129 | interrupt rate will drop as low as 2000 interrupts per second. If the | ||
| 130 | traffic is mostly transmit or mostly receive, the interrupt rate could | ||
| 131 | be as high as 8000. | ||
| 132 | |||
| 180 | Setting InterruptThrottleRate to 0 turns off any interrupt moderation | 133 | Setting InterruptThrottleRate to 0 turns off any interrupt moderation |
| 181 | and may improve small packet latency, but is generally not suitable | 134 | and may improve small packet latency, but is generally not suitable |
| 182 | for bulk throughput traffic. | 135 | for bulk throughput traffic. |
| @@ -212,8 +165,6 @@ NOTE: When e1000 is loaded with default settings and multiple adapters | |||
| 212 | be platform-specific. If CPU utilization is not a concern, use | 165 | be platform-specific. If CPU utilization is not a concern, use |
| 213 | RX_POLLING (NAPI) and default driver settings. | 166 | RX_POLLING (NAPI) and default driver settings. |
| 214 | 167 | ||
| 215 | |||
| 216 | |||
| 217 | RxDescriptors | 168 | RxDescriptors |
| 218 | ------------- | 169 | ------------- |
| 219 | Valid Range: 80-256 for 82542 and 82543-based adapters | 170 | Valid Range: 80-256 for 82542 and 82543-based adapters |
| @@ -225,15 +176,14 @@ by the driver. Increasing this value allows the driver to buffer more | |||
| 225 | incoming packets, at the expense of increased system memory utilization. | 176 | incoming packets, at the expense of increased system memory utilization. |
| 226 | 177 | ||
| 227 | Each descriptor is 16 bytes. A receive buffer is also allocated for each | 178 | Each descriptor is 16 bytes. A receive buffer is also allocated for each |
| 228 | descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending | 179 | descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending |
| 229 | on the MTU setting. The maximum MTU size is 16110. | 180 | on the MTU setting. The maximum MTU size is 16110. |
| 230 | 181 | ||
| 231 | NOTE: MTU designates the frame size. It only needs to be set for Jumbo | 182 | NOTE: MTU designates the frame size. It only needs to be set for Jumbo |
| 232 | Frames. Depending on the available system resources, the request | 183 | Frames. Depending on the available system resources, the request |
| 233 | for a higher number of receive descriptors may be denied. In this | 184 | for a higher number of receive descriptors may be denied. In this |
| 234 | case, use a lower number. | 185 | case, use a lower number. |
| 235 | 186 | ||
| 236 | |||
| 237 | RxIntDelay | 187 | RxIntDelay |
| 238 | ---------- | 188 | ---------- |
| 239 | Valid Range: 0-65535 (0=off) | 189 | Valid Range: 0-65535 (0=off) |
| @@ -254,7 +204,6 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may | |||
| 254 | restoring the network connection. To eliminate the potential | 204 | restoring the network connection. To eliminate the potential |
| 255 | for the hang ensure that RxIntDelay is set to 0. | 205 | for the hang ensure that RxIntDelay is set to 0. |
| 256 | 206 | ||
| 257 | |||
| 258 | RxAbsIntDelay | 207 | RxAbsIntDelay |
| 259 | ------------- | 208 | ------------- |
| 260 | (This parameter is supported only on 82540, 82545 and later adapters.) | 209 | (This parameter is supported only on 82540, 82545 and later adapters.) |
| @@ -268,7 +217,6 @@ packet is received within the set amount of time. Proper tuning, | |||
| 268 | along with RxIntDelay, may improve traffic throughput in specific network | 217 | along with RxIntDelay, may improve traffic throughput in specific network |
| 269 | conditions. | 218 | conditions. |
| 270 | 219 | ||
| 271 | |||
| 272 | Speed | 220 | Speed |
| 273 | ----- | 221 | ----- |
| 274 | (This parameter is supported only on adapters with copper connections.) | 222 | (This parameter is supported only on adapters with copper connections.) |
| @@ -280,7 +228,6 @@ Speed forces the line speed to the specified value in megabits per second | |||
| 280 | partner is set to auto-negotiate, the board will auto-detect the correct | 228 | partner is set to auto-negotiate, the board will auto-detect the correct |
| 281 | speed. Duplex should also be set when Speed is set to either 10 or 100. | 229 | speed. Duplex should also be set when Speed is set to either 10 or 100. |
| 282 | 230 | ||
| 283 | |||
| 284 | TxDescriptors | 231 | TxDescriptors |
| 285 | ------------- | 232 | ------------- |
| 286 | Valid Range: 80-256 for 82542 and 82543-based adapters | 233 | Valid Range: 80-256 for 82542 and 82543-based adapters |
| @@ -295,6 +242,36 @@ NOTE: Depending on the available system resources, the request for a | |||
| 295 | higher number of transmit descriptors may be denied. In this case, | 242 | higher number of transmit descriptors may be denied. In this case, |
| 296 | use a lower number. | 243 | use a lower number. |
| 297 | 244 | ||
| 245 | TxDescriptorStep | ||
| 246 | ---------------- | ||
| 247 | Valid Range: 1 (use every Tx Descriptor) | ||
| 248 | 4 (use every 4th Tx Descriptor) | ||
| 249 | |||
| 250 | Default Value: 1 (use every Tx Descriptor) | ||
| 251 | |||
| 252 | On certain non-Intel architectures, it has been observed that intense TX | ||
| 253 | traffic bursts of short packets may result in an improper descriptor | ||
| 254 | writeback. If this occurs, the driver will report a "TX Timeout" and reset | ||
| 255 | the adapter, after which the transmit flow will restart, though data may | ||
| 256 | have stalled for as much as 10 seconds before it resumes. | ||
| 257 | |||
| 258 | The improper writeback does not occur on the first descriptor in a system | ||
| 259 | memory cache-line, which is typically 32 bytes, or 4 descriptors long. | ||
| 260 | |||
| 261 | Setting TxDescriptorStep to a value of 4 will ensure that all TX descriptors | ||
| 262 | are aligned to the start of a system memory cache line, and so this problem | ||
| 263 | will not occur. | ||
| 264 | |||
| 265 | NOTES: Setting TxDescriptorStep to 4 effectively reduces the number of | ||
| 266 | TxDescriptors available for transmits to 1/4 of the normal allocation. | ||
| 267 | This has a possible negative performance impact, which may be | ||
| 268 | compensated for by allocating more descriptors using the TxDescriptors | ||
| 269 | module parameter. | ||
| 270 | |||
| 271 | There are other conditions which may result in "TX Timeout", which will | ||
| 272 | not be resolved by the use of the TxDescriptorStep parameter. As the | ||
| 273 | issue addressed by this parameter has never been observed on Intel | ||
| 274 | Architecture platforms, it should not be used on Intel platforms. | ||
| 298 | 275 | ||
| 299 | TxIntDelay | 276 | TxIntDelay |
| 300 | ---------- | 277 | ---------- |
| @@ -307,7 +284,6 @@ efficiency if properly tuned for specific network traffic. If the | |||
| 307 | system is reporting dropped transmits, this value may be set too high | 284 | system is reporting dropped transmits, this value may be set too high |
| 308 | causing the driver to run out of available transmit descriptors. | 285 | causing the driver to run out of available transmit descriptors. |
| 309 | 286 | ||
| 310 | |||
| 311 | TxAbsIntDelay | 287 | TxAbsIntDelay |
| 312 | ------------- | 288 | ------------- |
| 313 | (This parameter is supported only on 82540, 82545 and later adapters.) | 289 | (This parameter is supported only on 82540, 82545 and later adapters.) |
| @@ -330,6 +306,35 @@ Default Value: 1 | |||
| 330 | A value of '1' indicates that the driver should enable IP checksum | 306 | A value of '1' indicates that the driver should enable IP checksum |
| 331 | offload for received packets (both UDP and TCP) to the adapter hardware. | 307 | offload for received packets (both UDP and TCP) to the adapter hardware. |
| 332 | 308 | ||
| 309 | Copybreak | ||
| 310 | --------- | ||
| 311 | Valid Range: 0-xxxxxxx (0=off) | ||
| 312 | Default Value: 256 | ||
| 313 | Usage: insmod e1000.ko copybreak=128 | ||
| 314 | |||
| 315 | Driver copies all packets below or equaling this size to a fresh Rx | ||
| 316 | buffer before handing it up the stack. | ||
| 317 | |||
| 318 | This parameter is different than other parameters, in that it is a | ||
| 319 | single (not 1,1,1 etc.) parameter applied to all driver instances and | ||
| 320 | it is also available during runtime at | ||
| 321 | /sys/module/e1000/parameters/copybreak | ||
| 322 | |||
| 323 | SmartPowerDownEnable | ||
| 324 | -------------------- | ||
| 325 | Valid Range: 0-1 | ||
| 326 | Default Value: 0 (disabled) | ||
| 327 | |||
| 328 | Allows PHY to turn off in lower power states. The user can turn off | ||
| 329 | this parameter in supported chipsets. | ||
| 330 | |||
| 331 | KumeranLockLoss | ||
| 332 | --------------- | ||
| 333 | Valid Range: 0-1 | ||
| 334 | Default Value: 1 (enabled) | ||
| 335 | |||
| 336 | This workaround skips resetting the PHY at shutdown for the initial | ||
| 337 | silicon releases of ICH8 systems. | ||
| 333 | 338 | ||
| 334 | Speed and Duplex Configuration | 339 | Speed and Duplex Configuration |
| 335 | ============================== | 340 | ============================== |
| @@ -385,40 +390,9 @@ If the link partner is forced to a specific speed and duplex, then this | |||
| 385 | parameter should not be used. Instead, use the Speed and Duplex parameters | 390 | parameter should not be used. Instead, use the Speed and Duplex parameters |
| 386 | previously mentioned to force the adapter to the same speed and duplex. | 391 | previously mentioned to force the adapter to the same speed and duplex. |
| 387 | 392 | ||
| 388 | |||
| 389 | Additional Configurations | 393 | Additional Configurations |
| 390 | ========================= | 394 | ========================= |
| 391 | 395 | ||
| 392 | Configuring the Driver on Different Distributions | ||
| 393 | ------------------------------------------------- | ||
| 394 | Configuring a network driver to load properly when the system is started | ||
| 395 | is distribution dependent. Typically, the configuration process involves | ||
| 396 | adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well | ||
| 397 | as editing other system startup scripts and/or configuration files. Many | ||
| 398 | popular Linux distributions ship with tools to make these changes for you. | ||
| 399 | To learn the proper way to configure a network device for your system, | ||
| 400 | refer to your distribution documentation. If during this process you are | ||
| 401 | asked for the driver or module name, the name for the Linux Base Driver | ||
| 402 | for the Intel(R) PRO/1000 Family of Adapters is e1000. | ||
| 403 | |||
| 404 | As an example, if you install the e1000 driver for two PRO/1000 adapters | ||
| 405 | (eth0 and eth1) and set the speed and duplex to 10full and 100half, add | ||
| 406 | the following to modules.conf or or modprobe.conf: | ||
| 407 | |||
| 408 | alias eth0 e1000 | ||
| 409 | alias eth1 e1000 | ||
| 410 | options e1000 Speed=10,100 Duplex=2,1 | ||
| 411 | |||
| 412 | Viewing Link Messages | ||
| 413 | --------------------- | ||
| 414 | Link messages will not be displayed to the console if the distribution is | ||
| 415 | restricting system messages. In order to see network driver link messages | ||
| 416 | on your console, set dmesg to eight by entering the following: | ||
| 417 | |||
| 418 | dmesg -n 8 | ||
| 419 | |||
| 420 | NOTE: This setting is not saved across reboots. | ||
| 421 | |||
| 422 | Jumbo Frames | 396 | Jumbo Frames |
| 423 | ------------ | 397 | ------------ |
| 424 | Jumbo Frames support is enabled by changing the MTU to a value larger than | 398 | Jumbo Frames support is enabled by changing the MTU to a value larger than |
| @@ -437,9 +411,11 @@ Additional Configurations | |||
| 437 | setting in a different location. | 411 | setting in a different location. |
| 438 | 412 | ||
| 439 | Notes: | 413 | Notes: |
| 440 | 414 | Degradation in throughput performance may be observed in some Jumbo frames | |
| 441 | - To enable Jumbo Frames, increase the MTU size on the interface beyond | 415 | environments. If this is observed, increasing the application's socket buffer |
| 442 | 1500. | 416 | size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help. |
| 417 | See the specific application manual and /usr/src/linux*/Documentation/ | ||
| 418 | networking/ip-sysctl.txt for more details. | ||
| 443 | 419 | ||
| 444 | - The maximum MTU setting for Jumbo Frames is 16110. This value coincides | 420 | - The maximum MTU setting for Jumbo Frames is 16110. This value coincides |
| 445 | with the maximum Jumbo Frames size of 16128. | 421 | with the maximum Jumbo Frames size of 16128. |
| @@ -447,40 +423,11 @@ Additional Configurations | |||
| 447 | - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or | 423 | - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or |
| 448 | loss of link. | 424 | loss of link. |
| 449 | 425 | ||
| 450 | - Some Intel gigabit adapters that support Jumbo Frames have a frame size | ||
| 451 | limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes. | ||
| 452 | The adapters with this limitation are based on the Intel(R) 82571EB, | ||
| 453 | 82572EI, 82573L and 80003ES2LAN controller. These correspond to the | ||
| 454 | following product names: | ||
| 455 | Intel(R) PRO/1000 PT Server Adapter | ||
| 456 | Intel(R) PRO/1000 PT Desktop Adapter | ||
| 457 | Intel(R) PRO/1000 PT Network Connection | ||
| 458 | Intel(R) PRO/1000 PT Dual Port Server Adapter | ||
| 459 | Intel(R) PRO/1000 PT Dual Port Network Connection | ||
| 460 | Intel(R) PRO/1000 PF Server Adapter | ||
| 461 | Intel(R) PRO/1000 PF Network Connection | ||
| 462 | Intel(R) PRO/1000 PF Dual Port Server Adapter | ||
| 463 | Intel(R) PRO/1000 PB Server Connection | ||
| 464 | Intel(R) PRO/1000 PL Network Connection | ||
| 465 | Intel(R) PRO/1000 EB Network Connection with I/O Acceleration | ||
| 466 | Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration | ||
| 467 | Intel(R) PRO/1000 PT Quad Port Server Adapter | ||
| 468 | |||
| 469 | - Adapters based on the Intel(R) 82542 and 82573V/E controller do not | 426 | - Adapters based on the Intel(R) 82542 and 82573V/E controller do not |
| 470 | support Jumbo Frames. These correspond to the following product names: | 427 | support Jumbo Frames. These correspond to the following product names: |
| 471 | Intel(R) PRO/1000 Gigabit Server Adapter | 428 | Intel(R) PRO/1000 Gigabit Server Adapter |
| 472 | Intel(R) PRO/1000 PM Network Connection | 429 | Intel(R) PRO/1000 PM Network Connection |
| 473 | 430 | ||
| 474 | - The following adapters do not support Jumbo Frames: | ||
| 475 | Intel(R) 82562V 10/100 Network Connection | ||
| 476 | Intel(R) 82566DM Gigabit Network Connection | ||
| 477 | Intel(R) 82566DC Gigabit Network Connection | ||
| 478 | Intel(R) 82566MM Gigabit Network Connection | ||
| 479 | Intel(R) 82566MC Gigabit Network Connection | ||
| 480 | Intel(R) 82562GT 10/100 Network Connection | ||
| 481 | Intel(R) 82562G 10/100 Network Connection | ||
| 482 | |||
| 483 | |||
| 484 | Ethtool | 431 | Ethtool |
| 485 | ------- | 432 | ------- |
| 486 | The driver utilizes the ethtool interface for driver configuration and | 433 | The driver utilizes the ethtool interface for driver configuration and |
| @@ -490,142 +437,14 @@ Additional Configurations | |||
| 490 | The latest release of ethtool can be found from | 437 | The latest release of ethtool can be found from |
| 491 | http://sourceforge.net/projects/gkernel. | 438 | http://sourceforge.net/projects/gkernel. |
| 492 | 439 | ||
| 493 | NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support | ||
| 494 | for a more complete ethtool feature set can be enabled by upgrading | ||
| 495 | ethtool to ethtool-1.8.1. | ||
| 496 | |||
| 497 | Enabling Wake on LAN* (WoL) | 440 | Enabling Wake on LAN* (WoL) |
| 498 | --------------------------- | 441 | --------------------------- |
| 499 | WoL is configured through the Ethtool* utility. Ethtool is included with | 442 | WoL is configured through the Ethtool* utility. |
| 500 | all versions of Red Hat after Red Hat 7.2. For other Linux distributions, | ||
| 501 | download and install Ethtool from the following website: | ||
| 502 | http://sourceforge.net/projects/gkernel. | ||
| 503 | |||
| 504 | For instructions on enabling WoL with Ethtool, refer to the website listed | ||
| 505 | above. | ||
| 506 | 443 | ||
| 507 | WoL will be enabled on the system during the next shut down or reboot. | 444 | WoL will be enabled on the system during the next shut down or reboot. |
| 508 | For this driver version, in order to enable WoL, the e1000 driver must be | 445 | For this driver version, in order to enable WoL, the e1000 driver must be |
| 509 | loaded when shutting down or rebooting the system. | 446 | loaded when shutting down or rebooting the system. |
| 510 | 447 | ||
| 511 | Wake On LAN is only supported on port A for the following devices: | ||
| 512 | Intel(R) PRO/1000 PT Dual Port Network Connection | ||
| 513 | Intel(R) PRO/1000 PT Dual Port Server Connection | ||
| 514 | Intel(R) PRO/1000 PT Dual Port Server Adapter | ||
| 515 | Intel(R) PRO/1000 PF Dual Port Server Adapter | ||
| 516 | Intel(R) PRO/1000 PT Quad Port Server Adapter | ||
| 517 | |||
| 518 | NAPI | ||
| 519 | ---- | ||
| 520 | NAPI (Rx polling mode) is enabled in the e1000 driver. | ||
| 521 | |||
| 522 | See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI. | ||
| 523 | |||
| 524 | |||
| 525 | Known Issues | ||
| 526 | ============ | ||
| 527 | |||
| 528 | Dropped Receive Packets on Half-duplex 10/100 Networks | ||
| 529 | ------------------------------------------------------ | ||
| 530 | If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half- | ||
| 531 | duplex, you may observe occasional dropped receive packets. There are no | ||
| 532 | workarounds for this problem in this network configuration. The network must | ||
| 533 | be updated to operate in full-duplex, and/or 1000mbps only. | ||
| 534 | |||
| 535 | Jumbo Frames System Requirement | ||
| 536 | ------------------------------- | ||
| 537 | Memory allocation failures have been observed on Linux systems with 64 MB | ||
| 538 | of RAM or less that are running Jumbo Frames. If you are using Jumbo | ||
| 539 | Frames, your system may require more than the advertised minimum | ||
| 540 | requirement of 64 MB of system memory. | ||
| 541 | |||
| 542 | Performance Degradation with Jumbo Frames | ||
| 543 | ----------------------------------------- | ||
| 544 | Degradation in throughput performance may be observed in some Jumbo frames | ||
| 545 | environments. If this is observed, increasing the application's socket | ||
| 546 | buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values | ||
| 547 | may help. See the specific application manual and | ||
| 548 | /usr/src/linux*/Documentation/ | ||
| 549 | networking/ip-sysctl.txt for more details. | ||
| 550 | |||
| 551 | Jumbo Frames on Foundry BigIron 8000 switch | ||
| 552 | ------------------------------------------- | ||
| 553 | There is a known issue using Jumbo frames when connected to a Foundry | ||
| 554 | BigIron 8000 switch. This is a 3rd party limitation. If you experience | ||
| 555 | loss of packets, lower the MTU size. | ||
| 556 | |||
| 557 | Allocating Rx Buffers when Using Jumbo Frames | ||
| 558 | --------------------------------------------- | ||
| 559 | Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if | ||
| 560 | the available memory is heavily fragmented. This issue may be seen with PCI-X | ||
| 561 | adapters or with packet split disabled. This can be reduced or eliminated | ||
| 562 | by changing the amount of available memory for receive buffer allocation, by | ||
| 563 | increasing /proc/sys/vm/min_free_kbytes. | ||
| 564 | |||
| 565 | Multiple Interfaces on Same Ethernet Broadcast Network | ||
| 566 | ------------------------------------------------------ | ||
| 567 | Due to the default ARP behavior on Linux, it is not possible to have | ||
| 568 | one system on two IP networks in the same Ethernet broadcast domain | ||
| 569 | (non-partitioned switch) behave as expected. All Ethernet interfaces | ||
| 570 | will respond to IP traffic for any IP address assigned to the system. | ||
| 571 | This results in unbalanced receive traffic. | ||
| 572 | |||
| 573 | If you have multiple interfaces in a server, either turn on ARP | ||
| 574 | filtering by entering: | ||
| 575 | |||
| 576 | echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter | ||
| 577 | (this only works if your kernel's version is higher than 2.4.5), | ||
| 578 | |||
| 579 | NOTE: This setting is not saved across reboots. The configuration | ||
| 580 | change can be made permanent by adding the line: | ||
| 581 | net.ipv4.conf.all.arp_filter = 1 | ||
| 582 | to the file /etc/sysctl.conf | ||
| 583 | |||
| 584 | or, | ||
| 585 | |||
| 586 | install the interfaces in separate broadcast domains (either in | ||
| 587 | different switches or in a switch partitioned to VLANs). | ||
| 588 | |||
| 589 | 82541/82547 can't link or are slow to link with some link partners | ||
| 590 | ----------------------------------------------------------------- | ||
| 591 | There is a known compatibility issue with 82541/82547 and some | ||
| 592 | low-end switches where the link will not be established, or will | ||
| 593 | be slow to establish. In particular, these switches are known to | ||
| 594 | be incompatible with 82541/82547: | ||
| 595 | |||
| 596 | Planex FXG-08TE | ||
| 597 | I-O Data ETG-SH8 | ||
| 598 | |||
| 599 | To workaround this issue, the driver can be compiled with an override | ||
| 600 | of the PHY's master/slave setting. Forcing master or forcing slave | ||
| 601 | mode will improve time-to-link. | ||
| 602 | |||
| 603 | # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n> | ||
| 604 | |||
| 605 | Where <n> is: | ||
| 606 | |||
| 607 | 0 = Hardware default | ||
| 608 | 1 = Master mode | ||
| 609 | 2 = Slave mode | ||
| 610 | 3 = Auto master/slave | ||
| 611 | |||
| 612 | Disable rx flow control with ethtool | ||
| 613 | ------------------------------------ | ||
| 614 | In order to disable receive flow control using ethtool, you must turn | ||
| 615 | off auto-negotiation on the same command line. | ||
| 616 | |||
| 617 | For example: | ||
| 618 | |||
| 619 | ethtool -A eth? autoneg off rx off | ||
| 620 | |||
| 621 | Unplugging network cable while ethtool -p is running | ||
| 622 | ---------------------------------------------------- | ||
| 623 | In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging | ||
| 624 | the network cable while ethtool -p is running will cause the system to | ||
| 625 | become unresponsive to keyboard commands, except for control-alt-delete. | ||
| 626 | Restarting the system appears to be the only remedy. | ||
| 627 | |||
| 628 | |||
| 629 | Support | 448 | Support |
| 630 | ======= | 449 | ======= |
| 631 | 450 | ||
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt new file mode 100644 index 000000000000..6aa048badf32 --- /dev/null +++ b/Documentation/networking/e1000e.txt | |||
| @@ -0,0 +1,302 @@ | |||
| 1 | Linux* Driver for Intel(R) Network Connection | ||
| 2 | =============================================================== | ||
| 3 | |||
| 4 | Intel Gigabit Linux driver. | ||
| 5 | Copyright(c) 1999 - 2010 Intel Corporation. | ||
| 6 | |||
| 7 | Contents | ||
| 8 | ======== | ||
| 9 | |||
| 10 | - Identifying Your Adapter | ||
| 11 | - Command Line Parameters | ||
| 12 | - Additional Configurations | ||
| 13 | - Support | ||
| 14 | |||
| 15 | Identifying Your Adapter | ||
| 16 | ======================== | ||
| 17 | |||
| 18 | The e1000e driver supports all PCI Express Intel(R) Gigabit Network | ||
| 19 | Connections, except those that are 82575, 82576 and 82580-based*. | ||
| 20 | |||
| 21 | * NOTE: The Intel(R) PRO/1000 P Dual Port Server Adapter is supported by | ||
| 22 | the e1000 driver, not the e1000e driver due to the 82546 part being used | ||
| 23 | behind a PCI Express bridge. | ||
| 24 | |||
| 25 | For more information on how to identify your adapter, go to the Adapter & | ||
| 26 | Driver ID Guide at: | ||
| 27 | |||
| 28 | http://support.intel.com/support/go/network/adapter/idguide.htm | ||
| 29 | |||
| 30 | For the latest Intel network drivers for Linux, refer to the following | ||
| 31 | website. In the search field, enter your adapter name or type, or use the | ||
| 32 | networking link on the left to search for your adapter: | ||
| 33 | |||
| 34 | http://support.intel.com/support/go/network/adapter/home.htm | ||
| 35 | |||
| 36 | Command Line Parameters | ||
| 37 | ======================= | ||
| 38 | |||
| 39 | The default value for each parameter is generally the recommended setting, | ||
| 40 | unless otherwise noted. | ||
| 41 | |||
| 42 | NOTES: For more information about the InterruptThrottleRate, | ||
| 43 | RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay | ||
| 44 | parameters, see the application note at: | ||
| 45 | http://www.intel.com/design/network/applnots/ap450.htm | ||
| 46 | |||
| 47 | InterruptThrottleRate | ||
| 48 | --------------------- | ||
| 49 | Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, | ||
| 50 | 4=simplified balancing) | ||
| 51 | Default Value: 3 | ||
| 52 | |||
| 53 | The driver can limit the amount of interrupts per second that the adapter | ||
| 54 | will generate for incoming packets. It does this by writing a value to the | ||
| 55 | adapter that is based on the maximum amount of interrupts that the adapter | ||
| 56 | will generate per second. | ||
| 57 | |||
| 58 | Setting InterruptThrottleRate to a value greater or equal to 100 | ||
| 59 | will program the adapter to send out a maximum of that many interrupts | ||
| 60 | per second, even if more packets have come in. This reduces interrupt | ||
| 61 | load on the system and can lower CPU utilization under heavy load, | ||
| 62 | but will increase latency as packets are not processed as quickly. | ||
| 63 | |||
| 64 | The driver has two adaptive modes (setting 1 or 3) in which | ||
| 65 | it dynamically adjusts the InterruptThrottleRate value based on the traffic | ||
| 66 | that it receives. After determining the type of incoming traffic in the last | ||
| 67 | timeframe, it will adjust the InterruptThrottleRate to an appropriate value | ||
| 68 | for that traffic. | ||
| 69 | |||
| 70 | The algorithm classifies the incoming traffic every interval into | ||
| 71 | classes. Once the class is determined, the InterruptThrottleRate value is | ||
| 72 | adjusted to suit that traffic type the best. There are three classes defined: | ||
| 73 | "Bulk traffic", for large amounts of packets of normal size; "Low latency", | ||
| 74 | for small amounts of traffic and/or a significant percentage of small | ||
| 75 | packets; and "Lowest latency", for almost completely small packets or | ||
| 76 | minimal traffic. | ||
| 77 | |||
| 78 | In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 | ||
| 79 | for traffic that falls in class "Bulk traffic". If traffic falls in the "Low | ||
| 80 | latency" or "Lowest latency" class, the InterruptThrottleRate is increased | ||
| 81 | stepwise to 20000. This default mode is suitable for most applications. | ||
| 82 | |||
| 83 | For situations where low latency is vital such as cluster or | ||
| 84 | grid computing, the algorithm can reduce latency even more when | ||
| 85 | InterruptThrottleRate is set to mode 1. In this mode, which operates | ||
| 86 | the same as mode 3, the InterruptThrottleRate will be increased stepwise to | ||
| 87 | 70000 for traffic in class "Lowest latency". | ||
| 88 | |||
| 89 | In simplified mode the interrupt rate is based on the ratio of Tx and | ||
| 90 | Rx traffic. If the bytes per second rate is approximately equal the | ||
| 91 | interrupt rate will drop as low as 2000 interrupts per second. If the | ||
| 92 | traffic is mostly transmit or mostly receive, the interrupt rate could | ||
| 93 | be as high as 8000. | ||
| 94 | |||
| 95 | Setting InterruptThrottleRate to 0 turns off any interrupt moderation | ||
| 96 | and may improve small packet latency, but is generally not suitable | ||
| 97 | for bulk throughput traffic. | ||
| 98 | |||
| 99 | NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and | ||
| 100 | RxAbsIntDelay parameters. In other words, minimizing the receive | ||
| 101 | and/or transmit absolute delays does not force the controller to | ||
| 102 | generate more interrupts than what the Interrupt Throttle Rate | ||
| 103 | allows. | ||
| 104 | |||
| 105 | NOTE: When e1000e is loaded with default settings and multiple adapters | ||
| 106 | are in use simultaneously, the CPU utilization may increase non- | ||
| 107 | linearly. In order to limit the CPU utilization without impacting | ||
| 108 | the overall throughput, we recommend that you load the driver as | ||
| 109 | follows: | ||
| 110 | |||
| 111 | modprobe e1000e InterruptThrottleRate=3000,3000,3000 | ||
| 112 | |||
| 113 | This sets the InterruptThrottleRate to 3000 interrupts/sec for | ||
| 114 | the first, second, and third instances of the driver. The range | ||
| 115 | of 2000 to 3000 interrupts per second works on a majority of | ||
| 116 | systems and is a good starting point, but the optimal value will | ||
| 117 | be platform-specific. If CPU utilization is not a concern, use | ||
| 118 | RX_POLLING (NAPI) and default driver settings. | ||
| 119 | |||
| 120 | RxIntDelay | ||
| 121 | ---------- | ||
| 122 | Valid Range: 0-65535 (0=off) | ||
| 123 | Default Value: 0 | ||
| 124 | |||
| 125 | This value delays the generation of receive interrupts in units of 1.024 | ||
| 126 | microseconds. Receive interrupt reduction can improve CPU efficiency if | ||
| 127 | properly tuned for specific network traffic. Increasing this value adds | ||
| 128 | extra latency to frame reception and can end up decreasing the throughput | ||
| 129 | of TCP traffic. If the system is reporting dropped receives, this value | ||
| 130 | may be set too high, causing the driver to run out of available receive | ||
| 131 | descriptors. | ||
| 132 | |||
| 133 | CAUTION: When setting RxIntDelay to a value other than 0, adapters may | ||
| 134 | hang (stop transmitting) under certain network conditions. If | ||
| 135 | this occurs a NETDEV WATCHDOG message is logged in the system | ||
| 136 | event log. In addition, the controller is automatically reset, | ||
| 137 | restoring the network connection. To eliminate the potential | ||
| 138 | for the hang ensure that RxIntDelay is set to 0. | ||
| 139 | |||
| 140 | RxAbsIntDelay | ||
| 141 | ------------- | ||
| 142 | Valid Range: 0-65535 (0=off) | ||
| 143 | Default Value: 8 | ||
| 144 | |||
| 145 | This value, in units of 1.024 microseconds, limits the delay in which a | ||
| 146 | receive interrupt is generated. Useful only if RxIntDelay is non-zero, | ||
| 147 | this value ensures that an interrupt is generated after the initial | ||
| 148 | packet is received within the set amount of time. Proper tuning, | ||
| 149 | along with RxIntDelay, may improve traffic throughput in specific network | ||
| 150 | conditions. | ||
| 151 | |||
| 152 | TxIntDelay | ||
| 153 | ---------- | ||
| 154 | Valid Range: 0-65535 (0=off) | ||
| 155 | Default Value: 8 | ||
| 156 | |||
| 157 | This value delays the generation of transmit interrupts in units of | ||
| 158 | 1.024 microseconds. Transmit interrupt reduction can improve CPU | ||
| 159 | efficiency if properly tuned for specific network traffic. If the | ||
| 160 | system is reporting dropped transmits, this value may be set too high | ||
| 161 | causing the driver to run out of available transmit descriptors. | ||
| 162 | |||
| 163 | TxAbsIntDelay | ||
| 164 | ------------- | ||
| 165 | Valid Range: 0-65535 (0=off) | ||
| 166 | Default Value: 32 | ||
| 167 | |||
| 168 | This value, in units of 1.024 microseconds, limits the delay in which a | ||
| 169 | transmit interrupt is generated. Useful only if TxIntDelay is non-zero, | ||
| 170 | this value ensures that an interrupt is generated after the initial | ||
| 171 | packet is sent on the wire within the set amount of time. Proper tuning, | ||
| 172 | along with TxIntDelay, may improve traffic throughput in specific | ||
| 173 | network conditions. | ||
| 174 | |||
| 175 | Copybreak | ||
| 176 | --------- | ||
| 177 | Valid Range: 0-xxxxxxx (0=off) | ||
| 178 | Default Value: 256 | ||
| 179 | |||
| 180 | Driver copies all packets below or equaling this size to a fresh Rx | ||
| 181 | buffer before handing it up the stack. | ||
| 182 | |||
| 183 | This parameter is different than other parameters, in that it is a | ||
| 184 | single (not 1,1,1 etc.) parameter applied to all driver instances and | ||
| 185 | it is also available during runtime at | ||
| 186 | /sys/module/e1000e/parameters/copybreak | ||
| 187 | |||
| 188 | SmartPowerDownEnable | ||
| 189 | -------------------- | ||
| 190 | Valid Range: 0-1 | ||
| 191 | Default Value: 0 (disabled) | ||
| 192 | |||
| 193 | Allows PHY to turn off in lower power states. The user can set this parameter | ||
| 194 | in supported chipsets. | ||
| 195 | |||
| 196 | KumeranLockLoss | ||
| 197 | --------------- | ||
| 198 | Valid Range: 0-1 | ||
| 199 | Default Value: 1 (enabled) | ||
| 200 | |||
| 201 | This workaround skips resetting the PHY at shutdown for the initial | ||
| 202 | silicon releases of ICH8 systems. | ||
| 203 | |||
| 204 | IntMode | ||
| 205 | ------- | ||
| 206 | Valid Range: 0-2 (0=legacy, 1=MSI, 2=MSI-X) | ||
| 207 | Default Value: 2 | ||
| 208 | |||
| 209 | Allows changing the interrupt mode at module load time, without requiring a | ||
| 210 | recompile. If the driver load fails to enable a specific interrupt mode, the | ||
| 211 | driver will try other interrupt modes, from least to most compatible. The | ||
| 212 | interrupt order is MSI-X, MSI, Legacy. If specifying MSI (IntMode=1) | ||
| 213 | interrupts, only MSI and Legacy will be attempted. | ||
| 214 | |||
| 215 | CrcStripping | ||
| 216 | ------------ | ||
| 217 | Valid Range: 0-1 | ||
| 218 | Default Value: 1 (enabled) | ||
| 219 | |||
| 220 | Strip the CRC from received packets before sending up the network stack. If | ||
| 221 | you have a machine with a BMC enabled but cannot receive IPMI traffic after | ||
| 222 | loading or enabling the driver, try disabling this feature. | ||
| 223 | |||
| 224 | WriteProtectNVM | ||
| 225 | --------------- | ||
| 226 | Valid Range: 0-1 | ||
| 227 | Default Value: 1 (enabled) | ||
| 228 | |||
| 229 | Set the hardware to ignore all write/erase cycles to the GbE region in the | ||
| 230 | ICHx NVM (non-volatile memory). This feature can be disabled by the | ||
| 231 | WriteProtectNVM module parameter (enabled by default) only after a hardware | ||
| 232 | reset, but the machine must be power cycled before trying to enable writes. | ||
| 233 | |||
| 234 | Note: the kernel boot option iomem=relaxed may need to be set if the kernel | ||
| 235 | config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the | ||
| 236 | NVM from user space via ethtool. | ||
| 237 | |||
| 238 | Additional Configurations | ||
| 239 | ========================= | ||
| 240 | |||
| 241 | Jumbo Frames | ||
| 242 | ------------ | ||
| 243 | Jumbo Frames support is enabled by changing the MTU to a value larger than | ||
| 244 | the default of 1500. Use the ifconfig command to increase the MTU size. | ||
| 245 | For example: | ||
| 246 | |||
| 247 | ifconfig eth<x> mtu 9000 up | ||
| 248 | |||
| 249 | This setting is not saved across reboots. | ||
| 250 | |||
| 251 | Notes: | ||
| 252 | |||
| 253 | - The maximum MTU setting for Jumbo Frames is 9216. This value coincides | ||
| 254 | with the maximum Jumbo Frames size of 9234 bytes. | ||
| 255 | |||
| 256 | - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in | ||
| 257 | poor performance or loss of link. | ||
| 258 | |||
| 259 | - Some adapters limit Jumbo Frames sized packets to a maximum of | ||
| 260 | 4096 bytes and some adapters do not support Jumbo Frames. | ||
| 261 | |||
| 262 | |||
| 263 | Ethtool | ||
| 264 | ------- | ||
| 265 | The driver utilizes the ethtool interface for driver configuration and | ||
| 266 | diagnostics, as well as displaying statistical information. We | ||
| 267 | strongly recommend downloading the latest version of Ethtool at: | ||
| 268 | |||
| 269 | http://sourceforge.net/projects/gkernel. | ||
| 270 | |||
| 271 | Speed and Duplex | ||
| 272 | ---------------- | ||
| 273 | Speed and Duplex are configured through the Ethtool* utility. For | ||
| 274 | instructions, refer to the Ethtool man page. | ||
| 275 | |||
| 276 | Enabling Wake on LAN* (WoL) | ||
| 277 | --------------------------- | ||
| 278 | WoL is configured through the Ethtool* utility. For instructions on | ||
| 279 | enabling WoL with Ethtool, refer to the Ethtool man page. | ||
| 280 | |||
| 281 | WoL will be enabled on the system during the next shut down or reboot. | ||
| 282 | For this driver version, in order to enable WoL, the e1000e driver must be | ||
| 283 | loaded when shutting down or rebooting the system. | ||
| 284 | |||
| 285 | In most cases Wake On LAN is only supported on port A for multiple port | ||
| 286 | adapters. To verify if a port supports Wake on LAN run ethtool eth<X>. | ||
| 287 | |||
| 288 | |||
| 289 | Support | ||
| 290 | ======= | ||
| 291 | |||
| 292 | For general information, go to the Intel support website at: | ||
| 293 | |||
| 294 | www.intel.com/support/ | ||
| 295 | |||
| 296 | or the Intel Wired Networking project hosted by Sourceforge at: | ||
| 297 | |||
| 298 | http://sourceforge.net/projects/e1000 | ||
| 299 | |||
| 300 | If an issue is identified with the released source code on the supported | ||
| 301 | kernel with a supported adapter, email the specific information related | ||
| 302 | to the issue to e1000-devel@lists.sf.net | ||
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt index 19015de6725f..21dd5d15b6b4 100755..100644 --- a/Documentation/networking/ixgbevf.txt +++ b/Documentation/networking/ixgbevf.txt | |||
| @@ -1,19 +1,16 @@ | |||
| 1 | Linux* Base Driver for Intel(R) Network Connection | 1 | Linux* Base Driver for Intel(R) Network Connection |
| 2 | ================================================== | 2 | ================================================== |
| 3 | 3 | ||
| 4 | November 24, 2009 | 4 | Intel Gigabit Linux driver. |
| 5 | Copyright(c) 1999 - 2010 Intel Corporation. | ||
| 5 | 6 | ||
| 6 | Contents | 7 | Contents |
| 7 | ======== | 8 | ======== |
| 8 | 9 | ||
| 9 | - In This Release | ||
| 10 | - Identifying Your Adapter | 10 | - Identifying Your Adapter |
| 11 | - Known Issues/Troubleshooting | 11 | - Known Issues/Troubleshooting |
| 12 | - Support | 12 | - Support |
| 13 | 13 | ||
| 14 | In This Release | ||
| 15 | =============== | ||
| 16 | |||
| 17 | This file describes the ixgbevf Linux* Base Driver for Intel Network | 14 | This file describes the ixgbevf Linux* Base Driver for Intel Network |
| 18 | Connection. | 15 | Connection. |
| 19 | 16 | ||
| @@ -33,7 +30,7 @@ Identifying Your Adapter | |||
| 33 | For more information on how to identify your adapter, go to the Adapter & | 30 | For more information on how to identify your adapter, go to the Adapter & |
| 34 | Driver ID Guide at: | 31 | Driver ID Guide at: |
| 35 | 32 | ||
| 36 | http://support.intel.com/support/network/sb/CS-008441.htm | 33 | http://support.intel.com/support/go/network/adapter/idguide.htm |
| 37 | 34 | ||
| 38 | Known Issues/Troubleshooting | 35 | Known Issues/Troubleshooting |
| 39 | ============================ | 36 | ============================ |
| @@ -57,34 +54,3 @@ or the Intel Wired Networking project hosted by Sourceforge at: | |||
| 57 | If an issue is identified with the released source code on the supported | 54 | If an issue is identified with the released source code on the supported |
| 58 | kernel with a supported adapter, email the specific information related | 55 | kernel with a supported adapter, email the specific information related |
| 59 | to the issue to e1000-devel@lists.sf.net | 56 | to the issue to e1000-devel@lists.sf.net |
| 60 | |||
| 61 | License | ||
| 62 | ======= | ||
| 63 | |||
| 64 | Intel 10 Gigabit Linux driver. | ||
| 65 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
| 66 | |||
| 67 | This program is free software; you can redistribute it and/or modify it | ||
| 68 | under the terms and conditions of the GNU General Public License, | ||
| 69 | version 2, as published by the Free Software Foundation. | ||
| 70 | |||
| 71 | This program is distributed in the hope it will be useful, but WITHOUT | ||
| 72 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 73 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 74 | more details. | ||
| 75 | |||
| 76 | You should have received a copy of the GNU General Public License along with | ||
| 77 | this program; if not, write to the Free Software Foundation, Inc., | ||
| 78 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 79 | |||
| 80 | The full GNU General Public License is included in this distribution in | ||
| 81 | the file called "COPYING". | ||
| 82 | |||
| 83 | Trademarks | ||
| 84 | ========== | ||
| 85 | |||
| 86 | Intel, Itanium, and Pentium are trademarks or registered trademarks of | ||
| 87 | Intel Corporation or its subsidiaries in the United States and other | ||
| 88 | countries. | ||
| 89 | |||
| 90 | * Other names and brands may be claimed as the property of others. | ||
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c index ccd951fa94ee..cc96ee2666f2 100644 --- a/Documentation/vm/page-types.c +++ b/Documentation/vm/page-types.c | |||
| @@ -478,7 +478,7 @@ static void prepare_hwpoison_fd(void) | |||
| 478 | } | 478 | } |
| 479 | 479 | ||
| 480 | if (opt_unpoison && !hwpoison_forget_fd) { | 480 | if (opt_unpoison && !hwpoison_forget_fd) { |
| 481 | sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); | 481 | sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs); |
| 482 | hwpoison_forget_fd = checked_open(buf, O_WRONLY); | 482 | hwpoison_forget_fd = checked_open(buf, O_WRONLY); |
| 483 | } | 483 | } |
| 484 | } | 484 | } |
diff --git a/MAINTAINERS b/MAINTAINERS index 668682d1f5fa..3d4179fbc526 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -962,6 +962,23 @@ W: http://www.fluff.org/ben/linux/ | |||
| 962 | S: Maintained | 962 | S: Maintained |
| 963 | F: arch/arm/mach-s3c6410/ | 963 | F: arch/arm/mach-s3c6410/ |
| 964 | 964 | ||
| 965 | ARM/S5P ARM ARCHITECTURES | ||
| 966 | M: Kukjin Kim <kgene.kim@samsung.com> | ||
| 967 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 968 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | ||
| 969 | S: Maintained | ||
| 970 | F: arch/arm/mach-s5p*/ | ||
| 971 | |||
| 972 | ARM/SAMSUNG S5P SERIES FIMC SUPPORT | ||
| 973 | M: Kyungmin Park <kyungmin.park@samsung.com> | ||
| 974 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> | ||
| 975 | L: linux-arm-kernel@lists.infradead.org | ||
| 976 | L: linux-media@vger.kernel.org | ||
| 977 | S: Maintained | ||
| 978 | F: arch/arm/plat-s5p/dev-fimc* | ||
| 979 | F: arch/arm/plat-samsung/include/plat/*fimc* | ||
| 980 | F: drivers/media/video/s5p-fimc/ | ||
| 981 | |||
| 965 | ARM/SHMOBILE ARM ARCHITECTURE | 982 | ARM/SHMOBILE ARM ARCHITECTURE |
| 966 | M: Paul Mundt <lethal@linux-sh.org> | 983 | M: Paul Mundt <lethal@linux-sh.org> |
| 967 | M: Magnus Damm <magnus.damm@gmail.com> | 984 | M: Magnus Damm <magnus.damm@gmail.com> |
| @@ -1510,6 +1527,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git | |||
| 1510 | S: Supported | 1527 | S: Supported |
| 1511 | F: Documentation/filesystems/ceph.txt | 1528 | F: Documentation/filesystems/ceph.txt |
| 1512 | F: fs/ceph | 1529 | F: fs/ceph |
| 1530 | F: net/ceph | ||
| 1531 | F: include/linux/ceph | ||
| 1513 | 1532 | ||
| 1514 | CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: | 1533 | CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: |
| 1515 | M: David Vrabel <david.vrabel@csr.com> | 1534 | M: David Vrabel <david.vrabel@csr.com> |
| @@ -2528,7 +2547,7 @@ S: Supported | |||
| 2528 | F: drivers/scsi/gdt* | 2547 | F: drivers/scsi/gdt* |
| 2529 | 2548 | ||
| 2530 | GENERIC GPIO I2C DRIVER | 2549 | GENERIC GPIO I2C DRIVER |
| 2531 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 2550 | M: Haavard Skinnemoen <hskinnemoen@gmail.com> |
| 2532 | S: Supported | 2551 | S: Supported |
| 2533 | F: drivers/i2c/busses/i2c-gpio.c | 2552 | F: drivers/i2c/busses/i2c-gpio.c |
| 2534 | F: include/linux/i2c-gpio.h | 2553 | F: include/linux/i2c-gpio.h |
| @@ -3056,16 +3075,27 @@ L: netdev@vger.kernel.org | |||
| 3056 | S: Maintained | 3075 | S: Maintained |
| 3057 | F: drivers/net/ixp2000/ | 3076 | F: drivers/net/ixp2000/ |
| 3058 | 3077 | ||
| 3059 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe) | 3078 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf) |
| 3060 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 3079 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
| 3061 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> | 3080 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> |
| 3062 | M: Bruce Allan <bruce.w.allan@intel.com> | 3081 | M: Bruce Allan <bruce.w.allan@intel.com> |
| 3063 | M: Alex Duyck <alexander.h.duyck@intel.com> | 3082 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> |
| 3083 | M: Don Skidmore <donald.c.skidmore@intel.com> | ||
| 3084 | M: Greg Rose <gregory.v.rose@intel.com> | ||
| 3064 | M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> | 3085 | M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> |
| 3086 | M: Alex Duyck <alexander.h.duyck@intel.com> | ||
| 3065 | M: John Ronciak <john.ronciak@intel.com> | 3087 | M: John Ronciak <john.ronciak@intel.com> |
| 3066 | L: e1000-devel@lists.sourceforge.net | 3088 | L: e1000-devel@lists.sourceforge.net |
| 3067 | W: http://e1000.sourceforge.net/ | 3089 | W: http://e1000.sourceforge.net/ |
| 3068 | S: Supported | 3090 | S: Supported |
| 3091 | F: Documentation/networking/e100.txt | ||
| 3092 | F: Documentation/networking/e1000.txt | ||
| 3093 | F: Documentation/networking/e1000e.txt | ||
| 3094 | F: Documentation/networking/igb.txt | ||
| 3095 | F: Documentation/networking/igbvf.txt | ||
| 3096 | F: Documentation/networking/ixgb.txt | ||
| 3097 | F: Documentation/networking/ixgbe.txt | ||
| 3098 | F: Documentation/networking/ixgbevf.txt | ||
| 3069 | F: drivers/net/e100.c | 3099 | F: drivers/net/e100.c |
| 3070 | F: drivers/net/e1000/ | 3100 | F: drivers/net/e1000/ |
| 3071 | F: drivers/net/e1000e/ | 3101 | F: drivers/net/e1000e/ |
| @@ -3073,6 +3103,7 @@ F: drivers/net/igb/ | |||
| 3073 | F: drivers/net/igbvf/ | 3103 | F: drivers/net/igbvf/ |
| 3074 | F: drivers/net/ixgb/ | 3104 | F: drivers/net/ixgb/ |
| 3075 | F: drivers/net/ixgbe/ | 3105 | F: drivers/net/ixgbe/ |
| 3106 | F: drivers/net/ixgbevf/ | ||
| 3076 | 3107 | ||
| 3077 | INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT | 3108 | INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT |
| 3078 | L: linux-wireless@vger.kernel.org | 3109 | L: linux-wireless@vger.kernel.org |
| @@ -3133,7 +3164,7 @@ F: drivers/net/ioc3-eth.c | |||
| 3133 | 3164 | ||
| 3134 | IOC3 SERIAL DRIVER | 3165 | IOC3 SERIAL DRIVER |
| 3135 | M: Pat Gefre <pfg@sgi.com> | 3166 | M: Pat Gefre <pfg@sgi.com> |
| 3136 | L: linux-mips@linux-mips.org | 3167 | L: linux-serial@vger.kernel.org |
| 3137 | S: Maintained | 3168 | S: Maintained |
| 3138 | F: drivers/serial/ioc3_serial.c | 3169 | F: drivers/serial/ioc3_serial.c |
| 3139 | 3170 | ||
| @@ -3781,9 +3812,8 @@ W: http://www.syskonnect.com | |||
| 3781 | S: Supported | 3812 | S: Supported |
| 3782 | 3813 | ||
| 3783 | MATROX FRAMEBUFFER DRIVER | 3814 | MATROX FRAMEBUFFER DRIVER |
| 3784 | M: Petr Vandrovec <vandrove@vc.cvut.cz> | ||
| 3785 | L: linux-fbdev@vger.kernel.org | 3815 | L: linux-fbdev@vger.kernel.org |
| 3786 | S: Maintained | 3816 | S: Orphan |
| 3787 | F: drivers/video/matrox/matroxfb_* | 3817 | F: drivers/video/matrox/matroxfb_* |
| 3788 | F: include/linux/matroxfb.h | 3818 | F: include/linux/matroxfb.h |
| 3789 | 3819 | ||
| @@ -3970,8 +4000,8 @@ S: Maintained | |||
| 3970 | F: drivers/net/natsemi.c | 4000 | F: drivers/net/natsemi.c |
| 3971 | 4001 | ||
| 3972 | NCP FILESYSTEM | 4002 | NCP FILESYSTEM |
| 3973 | M: Petr Vandrovec <vandrove@vc.cvut.cz> | 4003 | M: Petr Vandrovec <petr@vandrovec.name> |
| 3974 | S: Maintained | 4004 | S: Odd Fixes |
| 3975 | F: fs/ncpfs/ | 4005 | F: fs/ncpfs/ |
| 3976 | 4006 | ||
| 3977 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) | 4007 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) |
| @@ -4777,6 +4807,15 @@ F: fs/qnx4/ | |||
| 4777 | F: include/linux/qnx4_fs.h | 4807 | F: include/linux/qnx4_fs.h |
| 4778 | F: include/linux/qnxtypes.h | 4808 | F: include/linux/qnxtypes.h |
| 4779 | 4809 | ||
| 4810 | RADOS BLOCK DEVICE (RBD) | ||
| 4811 | F: include/linux/qnxtypes.h | ||
| 4812 | M: Yehuda Sadeh <yehuda@hq.newdream.net> | ||
| 4813 | M: Sage Weil <sage@newdream.net> | ||
| 4814 | M: ceph-devel@vger.kernel.org | ||
| 4815 | S: Supported | ||
| 4816 | F: drivers/block/rbd.c | ||
| 4817 | F: drivers/block/rbd_types.h | ||
| 4818 | |||
| 4780 | RADEON FRAMEBUFFER DISPLAY DRIVER | 4819 | RADEON FRAMEBUFFER DISPLAY DRIVER |
| 4781 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> | 4820 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| 4782 | L: linux-fbdev@vger.kernel.org | 4821 | L: linux-fbdev@vger.kernel.org |
| @@ -5002,6 +5041,12 @@ F: drivers/media/common/saa7146* | |||
| 5002 | F: drivers/media/video/*7146* | 5041 | F: drivers/media/video/*7146* |
| 5003 | F: include/media/*7146* | 5042 | F: include/media/*7146* |
| 5004 | 5043 | ||
| 5044 | SAMSUNG AUDIO (ASoC) DRIVERS | ||
| 5045 | M: Jassi Brar <jassi.brar@samsung.com> | ||
| 5046 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
| 5047 | S: Supported | ||
| 5048 | F: sound/soc/s3c24xx | ||
| 5049 | |||
| 5005 | TLG2300 VIDEO4LINUX-2 DRIVER | 5050 | TLG2300 VIDEO4LINUX-2 DRIVER |
| 5006 | M: Huang Shijie <shijie8@gmail.com> | 5051 | M: Huang Shijie <shijie8@gmail.com> |
| 5007 | M: Kang Yong <kangyong@telegent.com> | 5052 | M: Kang Yong <kangyong@telegent.com> |
| @@ -6444,8 +6489,10 @@ F: include/linux/wm97xx.h | |||
| 6444 | WOLFSON MICROELECTRONICS DRIVERS | 6489 | WOLFSON MICROELECTRONICS DRIVERS |
| 6445 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 6490 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> |
| 6446 | M: Ian Lartey <ian@opensource.wolfsonmicro.com> | 6491 | M: Ian Lartey <ian@opensource.wolfsonmicro.com> |
| 6492 | M: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | ||
| 6493 | T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc | ||
| 6447 | T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus | 6494 | T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus |
| 6448 | W: http://opensource.wolfsonmicro.com/node/8 | 6495 | W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices |
| 6449 | S: Supported | 6496 | S: Supported |
| 6450 | F: Documentation/hwmon/wm83?? | 6497 | F: Documentation/hwmon/wm83?? |
| 6451 | F: drivers/leds/leds-wm83*.c | 6498 | F: drivers/leds/leds-wm83*.c |
| @@ -1,8 +1,8 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 36 | 3 | SUBLEVEL = 36 |
| 4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = |
| 5 | NAME = Sheep on Meth | 5 | NAME = Flesh-Eating Bats with Fangs |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
| 8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index d290845aef59..6f7feb5db271 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
| @@ -48,7 +48,7 @@ SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) | |||
| 48 | sigset_t mask; | 48 | sigset_t mask; |
| 49 | unsigned long res; | 49 | unsigned long res; |
| 50 | 50 | ||
| 51 | siginitset(&mask, newmask & ~_BLOCKABLE); | 51 | siginitset(&mask, newmask & _BLOCKABLE); |
| 52 | res = sigprocmask(how, &mask, &oldmask); | 52 | res = sigprocmask(how, &mask, &oldmask); |
| 53 | if (!res) { | 53 | if (!res) { |
| 54 | force_successful_syscall_return(); | 54 | force_successful_syscall_return(); |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88c97bc7a6f5..9c26ba7244fb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -1101,6 +1101,20 @@ config ARM_ERRATA_720789 | |||
| 1101 | invalidated are not, resulting in an incoherency in the system page | 1101 | invalidated are not, resulting in an incoherency in the system page |
| 1102 | tables. The workaround changes the TLB flushing routines to invalidate | 1102 | tables. The workaround changes the TLB flushing routines to invalidate |
| 1103 | entries regardless of the ASID. | 1103 | entries regardless of the ASID. |
| 1104 | |||
| 1105 | config ARM_ERRATA_743622 | ||
| 1106 | bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" | ||
| 1107 | depends on CPU_V7 | ||
| 1108 | help | ||
| 1109 | This option enables the workaround for the 743622 Cortex-A9 | ||
| 1110 | (r2p0..r2p2) erratum. Under very rare conditions, a faulty | ||
| 1111 | optimisation in the Cortex-A9 Store Buffer may lead to data | ||
| 1112 | corruption. This workaround sets a specific bit in the diagnostic | ||
| 1113 | register of the Cortex-A9 which disables the Store Buffer | ||
| 1114 | optimisation, preventing the defect from occurring. This has no | ||
| 1115 | visible impact on the overall performance or power consumption of the | ||
| 1116 | processor. | ||
| 1117 | |||
| 1104 | endmenu | 1118 | endmenu |
| 1105 | 1119 | ||
| 1106 | source "arch/arm/common/Kconfig" | 1120 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 8bccbfa693ff..2c1f0050c9c4 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
| @@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
| 1162 | { | 1162 | { |
| 1163 | /* | 1163 | /* |
| 1164 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx | 1164 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx |
| 1165 | * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx | 1165 | * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx |
| 1166 | * ALU op with S bit and Rd == 15 : | 1166 | * ALU op with S bit and Rd == 15 : |
| 1167 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx | 1167 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx |
| 1168 | */ | 1168 | */ |
| 1169 | if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ | 1169 | if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ |
| 1170 | (insn & 0x0ff00000) == 0x03400000 || /* Undef */ | ||
| 1170 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ | 1171 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ |
| 1171 | return INSN_REJECTED; | 1172 | return INSN_REJECTED; |
| 1172 | 1173 | ||
| @@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
| 1177 | * *S (bit 20) updates condition codes | 1178 | * *S (bit 20) updates condition codes |
| 1178 | * ADC/SBC/RSC reads the C flag | 1179 | * ADC/SBC/RSC reads the C flag |
| 1179 | */ | 1180 | */ |
| 1180 | insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ | 1181 | insn &= 0xffff0fff; /* Rd = r0 */ |
| 1181 | asi->insn[0] = insn; | 1182 | asi->insn[0] = insn; |
| 1182 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ | 1183 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ |
| 1183 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; | 1184 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; |
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h index c80e090b3670..ee8db152592e 100644 --- a/arch/arm/mach-at91/include/mach/system.h +++ b/arch/arm/mach-at91/include/mach/system.h | |||
| @@ -28,17 +28,16 @@ | |||
| 28 | 28 | ||
| 29 | static inline void arch_idle(void) | 29 | static inline void arch_idle(void) |
| 30 | { | 30 | { |
| 31 | #ifndef CONFIG_DEBUG_KERNEL | ||
| 32 | /* | 31 | /* |
| 33 | * Disable the processor clock. The processor will be automatically | 32 | * Disable the processor clock. The processor will be automatically |
| 34 | * re-enabled by an interrupt or by a reset. | 33 | * re-enabled by an interrupt or by a reset. |
| 35 | */ | 34 | */ |
| 36 | at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK); | 35 | at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK); |
| 37 | #else | 36 | #ifndef CONFIG_CPU_ARM920T |
| 38 | /* | 37 | /* |
| 39 | * Set the processor (CP15) into 'Wait for Interrupt' mode. | 38 | * Set the processor (CP15) into 'Wait for Interrupt' mode. |
| 40 | * Unlike disabling the processor clock via the PMC (above) | 39 | * Post-RM9200 processors need this in conjunction with the above |
| 41 | * this allows the processor to be woken via JTAG. | 40 | * to save power when idle. |
| 42 | */ | 41 | */ |
| 43 | cpu_do_idle(); | 42 | cpu_do_idle(); |
| 44 | #endif | 43 | #endif |
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c index 8904ca4e2e24..a696d354b1f8 100644 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ b/arch/arm/mach-ep93xx/dma-m2p.c | |||
| @@ -276,7 +276,7 @@ static void channel_disable(struct m2p_channel *ch) | |||
| 276 | v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); | 276 | v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); |
| 277 | m2p_set_control(ch, v); | 277 | m2p_set_control(ch, v); |
| 278 | 278 | ||
| 279 | while (m2p_channel_state(ch) == STATE_ON) | 279 | while (m2p_channel_state(ch) >= STATE_ON) |
| 280 | cpu_relax(); | 280 | cpu_relax(); |
| 281 | 281 | ||
| 282 | m2p_set_control(ch, 0x0); | 282 | m2p_set_control(ch, 0x0); |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index c5c0369bb481..2f7e2728970d 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
| @@ -122,6 +122,7 @@ config MACH_CPUIMX27 | |||
| 122 | select IMX_HAVE_PLATFORM_IMX_I2C | 122 | select IMX_HAVE_PLATFORM_IMX_I2C |
| 123 | select IMX_HAVE_PLATFORM_IMX_UART | 123 | select IMX_HAVE_PLATFORM_IMX_UART |
| 124 | select IMX_HAVE_PLATFORM_MXC_NAND | 124 | select IMX_HAVE_PLATFORM_MXC_NAND |
| 125 | select MXC_ULPI if USB_ULPI | ||
| 125 | help | 126 | help |
| 126 | Include support for Eukrea CPUIMX27 platform. This includes | 127 | Include support for Eukrea CPUIMX27 platform. This includes |
| 127 | specific configurations for the module and its peripherals. | 128 | specific configurations for the module and its peripherals. |
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c index 339150ab0ea5..6830afd1d2ba 100644 --- a/arch/arm/mach-imx/mach-cpuimx27.c +++ b/arch/arm/mach-imx/mach-cpuimx27.c | |||
| @@ -259,7 +259,7 @@ static void __init eukrea_cpuimx27_init(void) | |||
| 259 | i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices, | 259 | i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices, |
| 260 | ARRAY_SIZE(eukrea_cpuimx27_i2c_devices)); | 260 | ARRAY_SIZE(eukrea_cpuimx27_i2c_devices)); |
| 261 | 261 | ||
| 262 | imx27_add_i2c_imx1(&cpuimx27_i2c1_data); | 262 | imx27_add_i2c_imx0(&cpuimx27_i2c1_data); |
| 263 | 263 | ||
| 264 | platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); | 264 | platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); |
| 265 | 265 | ||
diff --git a/arch/arm/mach-s5p6440/cpu.c b/arch/arm/mach-s5p6440/cpu.c index 526f33adb31d..ec592e866054 100644 --- a/arch/arm/mach-s5p6440/cpu.c +++ b/arch/arm/mach-s5p6440/cpu.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
| 20 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
| 21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/sched.h> | ||
| 22 | 23 | ||
| 23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
| 24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c index a48fb553fd01..70ac681af72b 100644 --- a/arch/arm/mach-s5p6442/cpu.c +++ b/arch/arm/mach-s5p6442/cpu.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
| 20 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
| 21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/sched.h> | ||
| 22 | 23 | ||
| 23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
| 24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/cpu.c index 251c92ac5b22..cd1afbce83e2 100644 --- a/arch/arm/mach-s5pc100/cpu.c +++ b/arch/arm/mach-s5pc100/cpu.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
| 22 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
| 23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/sched.h> | ||
| 24 | 25 | ||
| 25 | #include <asm/mach/arch.h> | 26 | #include <asm/mach/arch.h> |
| 26 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index cfecd70657cb..d562670e1b0b 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c | |||
| @@ -173,11 +173,6 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable) | |||
| 173 | return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); | 173 | return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable) | ||
| 177 | { | ||
| 178 | return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable); | ||
| 179 | } | ||
| 180 | |||
| 181 | static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) | 176 | static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) |
| 182 | { | 177 | { |
| 183 | return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); | 178 | return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); |
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c index 77f456c91ad3..245b82b53df4 100644 --- a/arch/arm/mach-s5pv210/cpu.c +++ b/arch/arm/mach-s5pv210/cpu.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
| 20 | #include <linux/sysdev.h> | 20 | #include <linux/sysdev.h> |
| 21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/sched.h> | ||
| 22 | 23 | ||
| 23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
| 24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index efb127022d42..71fb17349520 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
| @@ -68,7 +68,7 @@ static void __init ct_ca9x4_init_irq(void) | |||
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | #if 0 | 70 | #if 0 |
| 71 | static void ct_ca9x4_timer_init(void) | 71 | static void __init ct_ca9x4_timer_init(void) |
| 72 | { | 72 | { |
| 73 | writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); | 73 | writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); |
| 74 | writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); | 74 | writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); |
| @@ -222,7 +222,7 @@ static struct platform_device pmu_device = { | |||
| 222 | .resource = pmu_resources, | 222 | .resource = pmu_resources, |
| 223 | }; | 223 | }; |
| 224 | 224 | ||
| 225 | static void ct_ca9x4_init(void) | 225 | static void __init ct_ca9x4_init(void) |
| 226 | { | 226 | { |
| 227 | int i; | 227 | int i; |
| 228 | 228 | ||
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index 817f0ad38a0b..7eaa232180a5 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c | |||
| @@ -48,7 +48,7 @@ void __init v2m_map_io(struct map_desc *tile, size_t num) | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | 50 | ||
| 51 | static void v2m_timer_init(void) | 51 | static void __init v2m_timer_init(void) |
| 52 | { | 52 | { |
| 53 | writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL); | 53 | writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL); |
| 54 | writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL); | 54 | writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL); |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ab506272b2d3..17e7b0b57e49 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
| @@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
| 204 | /* | 204 | /* |
| 205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
| 206 | */ | 206 | */ |
| 207 | if (WARN_ON(pfn_valid(pfn))) | 207 | if (pfn_valid(pfn)) { |
| 208 | return NULL; | 208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" |
| 209 | KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" | ||
| 210 | KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); | ||
| 211 | WARN_ON(1); | ||
| 212 | } | ||
| 209 | 213 | ||
| 210 | type = get_mem_type(mtype); | 214 | type = get_mem_type(mtype); |
| 211 | if (!type) | 215 | if (!type) |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6a3a2d0cd6db..e8ed9dc461fe 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -248,7 +248,7 @@ static struct mem_type mem_types[] = { | |||
| 248 | }, | 248 | }, |
| 249 | [MT_MEMORY] = { | 249 | [MT_MEMORY] = { |
| 250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 251 | L_PTE_USER | L_PTE_EXEC, | 251 | L_PTE_WRITE | L_PTE_EXEC, |
| 252 | .prot_l1 = PMD_TYPE_TABLE, | 252 | .prot_l1 = PMD_TYPE_TABLE, |
| 253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
| 254 | .domain = DOMAIN_KERNEL, | 254 | .domain = DOMAIN_KERNEL, |
| @@ -259,7 +259,7 @@ static struct mem_type mem_types[] = { | |||
| 259 | }, | 259 | }, |
| 260 | [MT_MEMORY_NONCACHED] = { | 260 | [MT_MEMORY_NONCACHED] = { |
| 261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 262 | L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | 262 | L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, |
| 263 | .prot_l1 = PMD_TYPE_TABLE, | 263 | .prot_l1 = PMD_TYPE_TABLE, |
| 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
| 265 | .domain = DOMAIN_KERNEL, | 265 | .domain = DOMAIN_KERNEL, |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7563ff0141bd..197f21bed5e9 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -253,6 +253,14 @@ __v7_setup: | |||
| 253 | orreq r10, r10, #1 << 22 @ set bit #22 | 253 | orreq r10, r10, #1 << 22 @ set bit #22 |
| 254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
| 255 | #endif | 255 | #endif |
| 256 | #ifdef CONFIG_ARM_ERRATA_743622 | ||
| 257 | teq r6, #0x20 @ present in r2p0 | ||
| 258 | teqne r6, #0x21 @ present in r2p1 | ||
| 259 | teqne r6, #0x22 @ present in r2p2 | ||
| 260 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
| 261 | orreq r10, r10, #1 << 6 @ set bit #6 | ||
| 262 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
| 263 | #endif | ||
| 256 | 264 | ||
| 257 | 3: mov r10, #0 | 265 | 3: mov r10, #0 |
| 258 | #ifdef HARVARD_CACHE | 266 | #ifdef HARVARD_CACHE |
| @@ -365,7 +373,7 @@ __v7_ca9mp_proc_info: | |||
| 365 | b __v7_ca9mp_setup | 373 | b __v7_ca9mp_setup |
| 366 | .long cpu_arch_name | 374 | .long cpu_arch_name |
| 367 | .long cpu_elf_name | 375 | .long cpu_elf_name |
| 368 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 376 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS |
| 369 | .long cpu_v7_name | 377 | .long cpu_v7_name |
| 370 | .long v7_processor_functions | 378 | .long v7_processor_functions |
| 371 | .long v7wbi_tlb_fns | 379 | .long v7wbi_tlb_fns |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 0691176899ff..72e09eb642dd 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
| @@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event) | |||
| 102 | if (IS_ERR(pevent)) { | 102 | if (IS_ERR(pevent)) { |
| 103 | ret = PTR_ERR(pevent); | 103 | ret = PTR_ERR(pevent); |
| 104 | } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { | 104 | } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { |
| 105 | perf_event_release_kernel(pevent); | ||
| 105 | pr_warning("oprofile: failed to enable event %d " | 106 | pr_warning("oprofile: failed to enable event %d " |
| 106 | "on CPU %d\n", event, cpu); | 107 | "on CPU %d\n", event, cpu); |
| 107 | ret = -EBUSY; | 108 | ret = -EBUSY; |
| @@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
| 365 | ret = init_driverfs(); | 366 | ret = init_driverfs(); |
| 366 | if (ret) { | 367 | if (ret) { |
| 367 | kfree(counter_config); | 368 | kfree(counter_config); |
| 369 | counter_config = NULL; | ||
| 368 | return ret; | 370 | return ret; |
| 369 | } | 371 | } |
| 370 | 372 | ||
| @@ -402,7 +404,6 @@ void oprofile_arch_exit(void) | |||
| 402 | struct perf_event *event; | 404 | struct perf_event *event; |
| 403 | 405 | ||
| 404 | if (*perf_events) { | 406 | if (*perf_events) { |
| 405 | exit_driverfs(); | ||
| 406 | for_each_possible_cpu(cpu) { | 407 | for_each_possible_cpu(cpu) { |
| 407 | for (id = 0; id < perf_num_counters; ++id) { | 408 | for (id = 0; id < perf_num_counters; ++id) { |
| 408 | event = perf_events[cpu][id]; | 409 | event = perf_events[cpu][id]; |
| @@ -413,8 +414,10 @@ void oprofile_arch_exit(void) | |||
| 413 | } | 414 | } |
| 414 | } | 415 | } |
| 415 | 416 | ||
| 416 | if (counter_config) | 417 | if (counter_config) { |
| 417 | kfree(counter_config); | 418 | kfree(counter_config); |
| 419 | exit_driverfs(); | ||
| 420 | } | ||
| 418 | } | 421 | } |
| 419 | #else | 422 | #else |
| 420 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 423 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index e39a417a368d..a92cb499313f 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
| @@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES | |||
| 33 | config OMAP_DEBUG_LEDS | 33 | config OMAP_DEBUG_LEDS |
| 34 | bool | 34 | bool |
| 35 | depends on OMAP_DEBUG_DEVICES | 35 | depends on OMAP_DEBUG_DEVICES |
| 36 | default y if LEDS | 36 | default y if LEDS_CLASS |
| 37 | 37 | ||
| 38 | config OMAP_RESET_CLOCKS | 38 | config OMAP_RESET_CLOCKS |
| 39 | bool "Reset unused clocks during boot" | 39 | bool "Reset unused clocks during boot" |
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index a202a2ce6e3d..6cd151b31bc5 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c | |||
| @@ -320,6 +320,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da) | |||
| 320 | if ((start <= da) && (da < start + bytes)) { | 320 | if ((start <= da) && (da < start + bytes)) { |
| 321 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | 321 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", |
| 322 | __func__, start, da, bytes); | 322 | __func__, start, da, bytes); |
| 323 | iotlb_load_cr(obj, &cr); | ||
| 323 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | 324 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
| 324 | } | 325 | } |
| 325 | } | 326 | } |
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index e31496e35b0f..0c8612fd8312 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
| @@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id) | |||
| 156 | /* Writing zero to RSYNC_ERR clears the IRQ */ | 156 | /* Writing zero to RSYNC_ERR clears the IRQ */ |
| 157 | MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); | 157 | MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); |
| 158 | } else { | 158 | } else { |
| 159 | complete(&mcbsp_rx->tx_irq_completion); | 159 | complete(&mcbsp_rx->rx_irq_completion); |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | return IRQ_HANDLED; | 162 | return IRQ_HANDLED; |
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index 04d9521ddc9f..e8f2be2d67f2 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c | |||
| @@ -435,7 +435,6 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state) | |||
| 435 | static int s3c_adc_resume(struct platform_device *pdev) | 435 | static int s3c_adc_resume(struct platform_device *pdev) |
| 436 | { | 436 | { |
| 437 | struct adc_device *adc = platform_get_drvdata(pdev); | 437 | struct adc_device *adc = platform_get_drvdata(pdev); |
| 438 | unsigned long flags; | ||
| 439 | 438 | ||
| 440 | clk_enable(adc->clk); | 439 | clk_enable(adc->clk); |
| 441 | enable_irq(adc->irq); | 440 | enable_irq(adc->irq); |
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c index 90a20512d68d..e8d20b0bc50e 100644 --- a/arch/arm/plat-samsung/clock.c +++ b/arch/arm/plat-samsung/clock.c | |||
| @@ -48,6 +48,9 @@ | |||
| 48 | #include <plat/clock.h> | 48 | #include <plat/clock.h> |
| 49 | #include <plat/cpu.h> | 49 | #include <plat/cpu.h> |
| 50 | 50 | ||
| 51 | #include <linux/serial_core.h> | ||
| 52 | #include <plat/regs-serial.h> /* for s3c24xx_uart_devs */ | ||
| 53 | |||
| 51 | /* clock information */ | 54 | /* clock information */ |
| 52 | 55 | ||
| 53 | static LIST_HEAD(clocks); | 56 | static LIST_HEAD(clocks); |
| @@ -65,6 +68,28 @@ static int clk_null_enable(struct clk *clk, int enable) | |||
| 65 | return 0; | 68 | return 0; |
| 66 | } | 69 | } |
| 67 | 70 | ||
| 71 | static int dev_is_s3c_uart(struct device *dev) | ||
| 72 | { | ||
| 73 | struct platform_device **pdev = s3c24xx_uart_devs; | ||
| 74 | int i; | ||
| 75 | for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++) | ||
| 76 | if (*pdev && dev == &(*pdev)->dev) | ||
| 77 | return 1; | ||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | /* | ||
| 82 | * Serial drivers call get_clock() very early, before platform bus | ||
| 83 | * has been set up, this requires a special check to let them get | ||
| 84 | * a proper clock | ||
| 85 | */ | ||
| 86 | |||
| 87 | static int dev_is_platform_device(struct device *dev) | ||
| 88 | { | ||
| 89 | return dev->bus == &platform_bus_type || | ||
| 90 | (dev->bus == NULL && dev_is_s3c_uart(dev)); | ||
| 91 | } | ||
| 92 | |||
| 68 | /* Clock API calls */ | 93 | /* Clock API calls */ |
| 69 | 94 | ||
| 70 | struct clk *clk_get(struct device *dev, const char *id) | 95 | struct clk *clk_get(struct device *dev, const char *id) |
| @@ -73,7 +98,7 @@ struct clk *clk_get(struct device *dev, const char *id) | |||
| 73 | struct clk *clk = ERR_PTR(-ENOENT); | 98 | struct clk *clk = ERR_PTR(-ENOENT); |
| 74 | int idno; | 99 | int idno; |
| 75 | 100 | ||
| 76 | if (dev == NULL || dev->bus != &platform_bus_type) | 101 | if (dev == NULL || !dev_is_platform_device(dev)) |
| 77 | idno = -1; | 102 | idno = -1; |
| 78 | else | 103 | else |
| 79 | idno = to_platform_device(dev)->id; | 104 | idno = to_platform_device(dev)->id; |
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index 98f94d041d9c..a727f54d64d6 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c | |||
| @@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
| 314 | vfree(module->arch.syminfo); | 314 | vfree(module->arch.syminfo); |
| 315 | module->arch.syminfo = NULL; | 315 | module->arch.syminfo = NULL; |
| 316 | 316 | ||
| 317 | return module_bug_finalize(hdr, sechdrs, module); | 317 | return 0; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | void module_arch_cleanup(struct module *module) | 320 | void module_arch_cleanup(struct module *module) |
| 321 | { | 321 | { |
| 322 | module_bug_cleanup(module); | ||
| 323 | } | 322 | } |
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c index 0865e291c20d..db4953dc4e1b 100644 --- a/arch/h8300/kernel/module.c +++ b/arch/h8300/kernel/module.c | |||
| @@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 112 | const Elf_Shdr *sechdrs, | 112 | const Elf_Shdr *sechdrs, |
| 113 | struct module *me) | 113 | struct module *me) |
| 114 | { | 114 | { |
| 115 | return module_bug_finalize(hdr, sechdrs, me); | 115 | return 0; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | void module_arch_cleanup(struct module *mod) | 118 | void module_arch_cleanup(struct module *mod) |
| 119 | { | 119 | { |
| 120 | module_bug_cleanup(mod); | ||
| 121 | } | 120 | } |
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h index 2f85412ef730..b8da7d0574d2 100644 --- a/arch/m32r/include/asm/elf.h +++ b/arch/m32r/include/asm/elf.h | |||
| @@ -82,9 +82,9 @@ typedef elf_fpreg_t elf_fpregset_t; | |||
| 82 | * These are used to set parameters in the core dumps. | 82 | * These are used to set parameters in the core dumps. |
| 83 | */ | 83 | */ |
| 84 | #define ELF_CLASS ELFCLASS32 | 84 | #define ELF_CLASS ELFCLASS32 |
| 85 | #if defined(__LITTLE_ENDIAN) | 85 | #if defined(__LITTLE_ENDIAN__) |
| 86 | #define ELF_DATA ELFDATA2LSB | 86 | #define ELF_DATA ELFDATA2LSB |
| 87 | #elif defined(__BIG_ENDIAN) | 87 | #elif defined(__BIG_ENDIAN__) |
| 88 | #define ELF_DATA ELFDATA2MSB | 88 | #define ELF_DATA ELFDATA2MSB |
| 89 | #else | 89 | #else |
| 90 | #error no endian defined | 90 | #error no endian defined |
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/m32r/kernel/.gitignore | |||
| @@ -0,0 +1 @@ | |||
| vmlinux.lds | |||
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 7bbe38645ed5..a08697f0886d 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | 28 | ||
| 29 | #define DEBUG_SIG 0 | 29 | #define DEBUG_SIG 0 |
| 30 | 30 | ||
| 31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
| 32 | |||
| 31 | asmlinkage int | 33 | asmlinkage int |
| 32 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 34 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
| 33 | unsigned long r2, unsigned long r3, unsigned long r4, | 35 | unsigned long r2, unsigned long r3, unsigned long r4, |
| @@ -254,7 +256,7 @@ give_sigsegv: | |||
| 254 | static int prev_insn(struct pt_regs *regs) | 256 | static int prev_insn(struct pt_regs *regs) |
| 255 | { | 257 | { |
| 256 | u16 inst; | 258 | u16 inst; |
| 257 | if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) | 259 | if (get_user(inst, (u16 __user *)(regs->bpc - 2))) |
| 258 | return -EFAULT; | 260 | return -EFAULT; |
| 259 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 261 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ |
| 260 | regs->bpc -= 2; | 262 | regs->bpc -= 2; |
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c index 8f0640847ad2..05285d08e547 100644 --- a/arch/m68k/mac/macboing.c +++ b/arch/m68k/mac/macboing.c | |||
| @@ -162,7 +162,7 @@ static void mac_init_asc( void ) | |||
| 162 | void mac_mksound( unsigned int freq, unsigned int length ) | 162 | void mac_mksound( unsigned int freq, unsigned int length ) |
| 163 | { | 163 | { |
| 164 | __u32 cfreq = ( freq << 5 ) / 468; | 164 | __u32 cfreq = ( freq << 5 ) / 468; |
| 165 | __u32 flags; | 165 | unsigned long flags; |
| 166 | int i; | 166 | int i; |
| 167 | 167 | ||
| 168 | if ( mac_special_bell == NULL ) | 168 | if ( mac_special_bell == NULL ) |
| @@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored ) | |||
| 224 | */ | 224 | */ |
| 225 | static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) | 225 | static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) |
| 226 | { | 226 | { |
| 227 | __u32 flags; | 227 | unsigned long flags; |
| 228 | 228 | ||
| 229 | /* if the bell is already ringing, ring longer */ | 229 | /* if the bell is already ringing, ring longer */ |
| 230 | if ( mac_bell_duration > 0 ) | 230 | if ( mac_bell_duration > 0 ) |
| @@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig | |||
| 271 | static void mac_quadra_ring_bell( unsigned long ignored ) | 271 | static void mac_quadra_ring_bell( unsigned long ignored ) |
| 272 | { | 272 | { |
| 273 | int i, count = mac_asc_samplespersec / HZ; | 273 | int i, count = mac_asc_samplespersec / HZ; |
| 274 | __u32 flags; | 274 | unsigned long flags; |
| 275 | 275 | ||
| 276 | /* | 276 | /* |
| 277 | * we neither want a sound buffer overflow nor underflow, so we need to match | 277 | * we neither want a sound buffer overflow nor underflow, so we need to match |
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild index e322d65f33a4..7dd65cfae837 100644 --- a/arch/mips/Kbuild +++ b/arch/mips/Kbuild | |||
| @@ -7,6 +7,10 @@ subdir-ccflags-y := -Werror | |||
| 7 | include arch/mips/Kbuild.platforms | 7 | include arch/mips/Kbuild.platforms |
| 8 | obj-y := $(platform-y) | 8 | obj-y := $(platform-y) |
| 9 | 9 | ||
| 10 | # make clean traverses $(obj-) without having included .config, so | ||
| 11 | # everything ends up here | ||
| 12 | obj- := $(platform-) | ||
| 13 | |||
| 10 | # mips object files | 14 | # mips object files |
| 11 | # The object files are linked as core-y files would be linked | 15 | # The object files are linked as core-y files would be linked |
| 12 | 16 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3ad59dde4852..4c9f402295dd 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
| @@ -13,6 +13,7 @@ config MIPS | |||
| 13 | select HAVE_KPROBES | 13 | select HAVE_KPROBES |
| 14 | select HAVE_KRETPROBES | 14 | select HAVE_KRETPROBES |
| 15 | select RTC_LIB if !MACH_LOONGSON | 15 | select RTC_LIB if !MACH_LOONGSON |
| 16 | select GENERIC_ATOMIC64 if !64BIT | ||
| 16 | 17 | ||
| 17 | mainmenu "Linux/MIPS Kernel Configuration" | 18 | mainmenu "Linux/MIPS Kernel Configuration" |
| 18 | 19 | ||
| @@ -880,11 +881,15 @@ config NO_IOPORT | |||
| 880 | config GENERIC_ISA_DMA | 881 | config GENERIC_ISA_DMA |
| 881 | bool | 882 | bool |
| 882 | select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n | 883 | select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n |
| 884 | select ISA_DMA_API | ||
| 883 | 885 | ||
| 884 | config GENERIC_ISA_DMA_SUPPORT_BROKEN | 886 | config GENERIC_ISA_DMA_SUPPORT_BROKEN |
| 885 | bool | 887 | bool |
| 886 | select GENERIC_ISA_DMA | 888 | select GENERIC_ISA_DMA |
| 887 | 889 | ||
| 890 | config ISA_DMA_API | ||
| 891 | bool | ||
| 892 | |||
| 888 | config GENERIC_GPIO | 893 | config GENERIC_GPIO |
| 889 | bool | 894 | bool |
| 890 | 895 | ||
| @@ -1646,8 +1651,16 @@ config MIPS_MT_SMP | |||
| 1646 | select SYS_SUPPORTS_SMP | 1651 | select SYS_SUPPORTS_SMP |
| 1647 | select SMP_UP | 1652 | select SMP_UP |
| 1648 | help | 1653 | help |
| 1649 | This is a kernel model which is also known a VSMP or lately | 1654 | This is a kernel model which is known a VSMP but lately has been |
| 1650 | has been marketesed into SMVP. | 1655 | marketesed into SMVP. |
| 1656 | Virtual SMP uses the processor's VPEs to implement virtual | ||
| 1657 | processors. In currently available configuration of the 34K processor | ||
| 1658 | this allows for a dual processor. Both processors will share the same | ||
| 1659 | primary caches; each will obtain the half of the TLB for it's own | ||
| 1660 | exclusive use. For a layman this model can be described as similar to | ||
| 1661 | what Intel calls Hyperthreading. | ||
| 1662 | |||
| 1663 | For further information see http://www.linux-mips.org/wiki/34K#VSMP | ||
| 1651 | 1664 | ||
| 1652 | config MIPS_MT_SMTC | 1665 | config MIPS_MT_SMTC |
| 1653 | bool "SMTC: Use all TCs on all VPEs for SMP" | 1666 | bool "SMTC: Use all TCs on all VPEs for SMP" |
| @@ -1664,6 +1677,14 @@ config MIPS_MT_SMTC | |||
| 1664 | help | 1677 | help |
| 1665 | This is a kernel model which is known a SMTC or lately has been | 1678 | This is a kernel model which is known a SMTC or lately has been |
| 1666 | marketesed into SMVP. | 1679 | marketesed into SMVP. |
| 1680 | is presenting the available TC's of the core as processors to Linux. | ||
| 1681 | On currently available 34K processors this means a Linux system will | ||
| 1682 | see up to 5 processors. The implementation of the SMTC kernel differs | ||
| 1683 | significantly from VSMP and cannot efficiently coexist in the same | ||
| 1684 | kernel binary so the choice between VSMP and SMTC is a compile time | ||
| 1685 | decision. | ||
| 1686 | |||
| 1687 | For further information see http://www.linux-mips.org/wiki/34K#SMTC | ||
| 1667 | 1688 | ||
| 1668 | endchoice | 1689 | endchoice |
| 1669 | 1690 | ||
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c index c29511b11d44..534021059629 100644 --- a/arch/mips/alchemy/common/prom.c +++ b/arch/mips/alchemy/common/prom.c | |||
| @@ -43,7 +43,7 @@ int prom_argc; | |||
| 43 | char **prom_argv; | 43 | char **prom_argv; |
| 44 | char **prom_envp; | 44 | char **prom_envp; |
| 45 | 45 | ||
| 46 | void prom_init_cmdline(void) | 46 | void __init prom_init_cmdline(void) |
| 47 | { | 47 | { |
| 48 | int i; | 48 | int i; |
| 49 | 49 | ||
| @@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str) | |||
| 104 | } | 104 | } |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | int prom_get_ethernet_addr(char *ethernet_addr) | 107 | int __init prom_get_ethernet_addr(char *ethernet_addr) |
| 108 | { | 108 | { |
| 109 | char *ethaddr_str; | 109 | char *ethaddr_str; |
| 110 | 110 | ||
| @@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr) | |||
| 123 | 123 | ||
| 124 | return 0; | 124 | return 0; |
| 125 | } | 125 | } |
| 126 | EXPORT_SYMBOL(prom_get_ethernet_addr); | ||
| 127 | 126 | ||
| 128 | void __init prom_free_prom_memory(void) | 127 | void __init prom_free_prom_memory(void) |
| 129 | { | 128 | { |
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index ed9bb709c9a3..5042d51b0512 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile | |||
| @@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE | |||
| 59 | hostprogs-y := calc_vmlinuz_load_addr | 59 | hostprogs-y := calc_vmlinuz_load_addr |
| 60 | 60 | ||
| 61 | VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ | 61 | VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ |
| 62 | $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS)) | 62 | $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) |
| 63 | 63 | ||
| 64 | vmlinuzobjs-y += $(obj)/piggy.o | 64 | vmlinuzobjs-y += $(obj)/piggy.o |
| 65 | 65 | ||
| @@ -105,4 +105,4 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec | |||
| 105 | vmlinuz.srec: vmlinuz | 105 | vmlinuz.srec: vmlinuz |
| 106 | $(call cmd,objcopy) | 106 | $(call cmd,objcopy) |
| 107 | 107 | ||
| 108 | clean-files := $(objtree)/vmlinuz.* | 108 | clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec} |
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 094c17e38e16..47323ca452dc 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig | |||
| @@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE | |||
| 83 | def_bool y | 83 | def_bool y |
| 84 | select SPARSEMEM_STATIC | 84 | select SPARSEMEM_STATIC |
| 85 | depends on CPU_CAVIUM_OCTEON | 85 | depends on CPU_CAVIUM_OCTEON |
| 86 | |||
| 87 | config CAVIUM_OCTEON_HELPER | ||
| 88 | def_bool y | ||
| 89 | depends on OCTEON_ETHERNET || PCI | ||
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index c664c8cc2b42..a5b427909b5c 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c | |||
| @@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
| 41 | return NOTIFY_OK; /* Let default notifier send signals */ | 41 | return NOTIFY_OK; /* Let default notifier send signals */ |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static int cnmips_cu2_setup(void) | 44 | static int __init cnmips_cu2_setup(void) |
| 45 | { | 45 | { |
| 46 | return cu2_notifier(cnmips_cu2_call, 0); | 46 | return cu2_notifier(cnmips_cu2_call, 0); |
| 47 | } | 47 | } |
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index 2fd66db6939e..7f41c5be2190 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile | |||
| @@ -11,4 +11,4 @@ | |||
| 11 | 11 | ||
| 12 | obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o | 12 | obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o |
| 13 | 13 | ||
| 14 | obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o | 14 | obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o |
diff --git a/arch/mips/dec/Platform b/arch/mips/dec/Platform index 3adbcbd95db1..cf55a6f4e720 100644 --- a/arch/mips/dec/Platform +++ b/arch/mips/dec/Platform | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # DECstation family | 2 | # DECstation family |
| 3 | # | 3 | # |
| 4 | platform-$(CONFIG_MACH_DECSTATION) = dec/ | 4 | platform-$(CONFIG_MACH_DECSTATION) += dec/ |
| 5 | cflags-$(CONFIG_MACH_DECSTATION) += \ | 5 | cflags-$(CONFIG_MACH_DECSTATION) += \ |
| 6 | -I$(srctree)/arch/mips/include/asm/mach-dec | 6 | -I$(srctree)/arch/mips/include/asm/mach-dec |
| 7 | libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/ | 7 | libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/ |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index c63c56bfd184..47d87da379f9 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
| @@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
| 782 | */ | 782 | */ |
| 783 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) | 783 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) |
| 784 | 784 | ||
| 785 | #else /* !CONFIG_64BIT */ | ||
| 786 | |||
| 787 | #include <asm-generic/atomic64.h> | ||
| 788 | |||
| 785 | #endif /* CONFIG_64BIT */ | 789 | #endif /* CONFIG_64BIT */ |
| 786 | 790 | ||
| 787 | /* | 791 | /* |
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index 2cb2f0c2c4f8..3532e2c5f098 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h | |||
| @@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v); | |||
| 24 | 24 | ||
| 25 | #define cu2_notifier(fn, pri) \ | 25 | #define cu2_notifier(fn, pri) \ |
| 26 | ({ \ | 26 | ({ \ |
| 27 | static struct notifier_block fn##_nb __cpuinitdata = { \ | 27 | static struct notifier_block fn##_nb = { \ |
| 28 | .notifier_call = fn, \ | 28 | .notifier_call = fn, \ |
| 29 | .priority = pri \ | 29 | .priority = pri \ |
| 30 | }; \ | 30 | }; \ |
diff --git a/arch/mips/include/asm/fcntl.h b/arch/mips/include/asm/fcntl.h index e482fe90fe88..75eddedcfc3e 100644 --- a/arch/mips/include/asm/fcntl.h +++ b/arch/mips/include/asm/fcntl.h | |||
| @@ -56,6 +56,7 @@ | |||
| 56 | */ | 56 | */ |
| 57 | 57 | ||
| 58 | #ifdef CONFIG_32BIT | 58 | #ifdef CONFIG_32BIT |
| 59 | #include <linux/types.h> | ||
| 59 | 60 | ||
| 60 | struct flock { | 61 | struct flock { |
| 61 | short l_type; | 62 | short l_type; |
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 9b9436a4d816..86548da650e7 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h | |||
| @@ -321,6 +321,7 @@ struct gic_intrmask_regs { | |||
| 321 | */ | 321 | */ |
| 322 | struct gic_intr_map { | 322 | struct gic_intr_map { |
| 323 | unsigned int cpunum; /* Directed to this CPU */ | 323 | unsigned int cpunum; /* Directed to this CPU */ |
| 324 | #define GIC_UNUSED 0xdead /* Dummy data */ | ||
| 324 | unsigned int pin; /* Directed to this Pin */ | 325 | unsigned int pin; /* Directed to this Pin */ |
| 325 | unsigned int polarity; /* Polarity : +/- */ | 326 | unsigned int polarity; /* Polarity : +/- */ |
| 326 | unsigned int trigtype; /* Trigger : Edge/Levl */ | 327 | unsigned int trigtype; /* Trigger : Edge/Levl */ |
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h index b74caf65482b..ff9a8b86cb93 100644 --- a/arch/mips/include/asm/mach-tx49xx/kmalloc.h +++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | #ifndef __ASM_MACH_TX49XX_KMALLOC_H | 1 | #ifndef __ASM_MACH_TX49XX_KMALLOC_H |
| 2 | #define __ASM_MACH_TX49XX_KMALLOC_H | 2 | #define __ASM_MACH_TX49XX_KMALLOC_H |
| 3 | 3 | ||
| 4 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES | 4 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
| 5 | 5 | ||
| 6 | #endif /* __ASM_MACH_TX49XX_KMALLOC_H */ | 6 | #endif /* __ASM_MACH_TX49XX_KMALLOC_H */ |
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index cea872fc6f5c..d11aa02a956a 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h | |||
| @@ -88,9 +88,6 @@ | |||
| 88 | 88 | ||
| 89 | #define GIC_EXT_INTR(x) x | 89 | #define GIC_EXT_INTR(x) x |
| 90 | 90 | ||
| 91 | /* Dummy data */ | ||
| 92 | #define X 0xdead | ||
| 93 | |||
| 94 | /* External Interrupts used for IPI */ | 91 | /* External Interrupts used for IPI */ |
| 95 | #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 | 92 | #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 |
| 96 | #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 | 93 | #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index a16beafcea91..e59cd1ac09c2 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
| @@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) |
| 151 | #endif | 151 | #endif |
| 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
| 153 | |||
| 154 | /* | ||
| 155 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad | ||
| 156 | * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The | ||
| 157 | * discussion can be found in lkml posting | ||
| 158 | * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is | ||
| 159 | * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html | ||
| 160 | * | ||
| 161 | * It is unclear if the misscompilations mentioned in | ||
| 162 | * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one | ||
| 163 | * until GCC 3.x has been retired before we can apply | ||
| 164 | * https://patchwork.linux-mips.org/patch/1541/ | ||
| 165 | */ | ||
| 166 | |||
| 153 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) | 167 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
| 154 | 168 | ||
| 155 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 169 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h index 96e28f18dad1..1ca64b4d33d9 100644 --- a/arch/mips/include/asm/siginfo.h +++ b/arch/mips/include/asm/siginfo.h | |||
| @@ -88,6 +88,7 @@ typedef struct siginfo { | |||
| 88 | #ifdef __ARCH_SI_TRAPNO | 88 | #ifdef __ARCH_SI_TRAPNO |
| 89 | int _trapno; /* TRAP # which caused the signal */ | 89 | int _trapno; /* TRAP # which caused the signal */ |
| 90 | #endif | 90 | #endif |
| 91 | short _addr_lsb; | ||
| 91 | } _sigfault; | 92 | } _sigfault; |
| 92 | 93 | ||
| 93 | /* SIGPOLL, SIGXFSZ (To do ...) */ | 94 | /* SIGPOLL, SIGXFSZ (To do ...) */ |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 2376f2e06e47..70df9c0d3c5b 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
| @@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
| 146 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) | 146 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) |
| 147 | 147 | ||
| 148 | /* work to do on interrupt/exception return */ | 148 | /* work to do on interrupt/exception return */ |
| 149 | #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) | 149 | #define _TIF_WORK_MASK (0x0000ffef & \ |
| 150 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) | ||
| 150 | /* work to do on any return to u-space */ | 151 | /* work to do on any return to u-space */ |
| 151 | #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) | 152 | #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) |
| 152 | 153 | ||
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index baa318a59c97..550725b881d5 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
| @@ -356,16 +356,19 @@ | |||
| 356 | #define __NR_perf_event_open (__NR_Linux + 333) | 356 | #define __NR_perf_event_open (__NR_Linux + 333) |
| 357 | #define __NR_accept4 (__NR_Linux + 334) | 357 | #define __NR_accept4 (__NR_Linux + 334) |
| 358 | #define __NR_recvmmsg (__NR_Linux + 335) | 358 | #define __NR_recvmmsg (__NR_Linux + 335) |
| 359 | #define __NR_fanotify_init (__NR_Linux + 336) | ||
| 360 | #define __NR_fanotify_mark (__NR_Linux + 337) | ||
| 361 | #define __NR_prlimit64 (__NR_Linux + 338) | ||
| 359 | 362 | ||
| 360 | /* | 363 | /* |
| 361 | * Offset of the last Linux o32 flavoured syscall | 364 | * Offset of the last Linux o32 flavoured syscall |
| 362 | */ | 365 | */ |
| 363 | #define __NR_Linux_syscalls 335 | 366 | #define __NR_Linux_syscalls 338 |
| 364 | 367 | ||
| 365 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 368 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
| 366 | 369 | ||
| 367 | #define __NR_O32_Linux 4000 | 370 | #define __NR_O32_Linux 4000 |
| 368 | #define __NR_O32_Linux_syscalls 335 | 371 | #define __NR_O32_Linux_syscalls 338 |
| 369 | 372 | ||
| 370 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 373 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
| 371 | 374 | ||
| @@ -668,16 +671,19 @@ | |||
| 668 | #define __NR_perf_event_open (__NR_Linux + 292) | 671 | #define __NR_perf_event_open (__NR_Linux + 292) |
| 669 | #define __NR_accept4 (__NR_Linux + 293) | 672 | #define __NR_accept4 (__NR_Linux + 293) |
| 670 | #define __NR_recvmmsg (__NR_Linux + 294) | 673 | #define __NR_recvmmsg (__NR_Linux + 294) |
| 674 | #define __NR_fanotify_init (__NR_Linux + 295) | ||
| 675 | #define __NR_fanotify_mark (__NR_Linux + 296) | ||
| 676 | #define __NR_prlimit64 (__NR_Linux + 297) | ||
| 671 | 677 | ||
| 672 | /* | 678 | /* |
| 673 | * Offset of the last Linux 64-bit flavoured syscall | 679 | * Offset of the last Linux 64-bit flavoured syscall |
| 674 | */ | 680 | */ |
| 675 | #define __NR_Linux_syscalls 294 | 681 | #define __NR_Linux_syscalls 297 |
| 676 | 682 | ||
| 677 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 683 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
| 678 | 684 | ||
| 679 | #define __NR_64_Linux 5000 | 685 | #define __NR_64_Linux 5000 |
| 680 | #define __NR_64_Linux_syscalls 294 | 686 | #define __NR_64_Linux_syscalls 297 |
| 681 | 687 | ||
| 682 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 688 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
| 683 | 689 | ||
| @@ -985,16 +991,19 @@ | |||
| 985 | #define __NR_accept4 (__NR_Linux + 297) | 991 | #define __NR_accept4 (__NR_Linux + 297) |
| 986 | #define __NR_recvmmsg (__NR_Linux + 298) | 992 | #define __NR_recvmmsg (__NR_Linux + 298) |
| 987 | #define __NR_getdents64 (__NR_Linux + 299) | 993 | #define __NR_getdents64 (__NR_Linux + 299) |
| 994 | #define __NR_fanotify_init (__NR_Linux + 300) | ||
| 995 | #define __NR_fanotify_mark (__NR_Linux + 301) | ||
| 996 | #define __NR_prlimit64 (__NR_Linux + 302) | ||
| 988 | 997 | ||
| 989 | /* | 998 | /* |
| 990 | * Offset of the last N32 flavoured syscall | 999 | * Offset of the last N32 flavoured syscall |
| 991 | */ | 1000 | */ |
| 992 | #define __NR_Linux_syscalls 299 | 1001 | #define __NR_Linux_syscalls 302 |
| 993 | 1002 | ||
| 994 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1003 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
| 995 | 1004 | ||
| 996 | #define __NR_N32_Linux 6000 | 1005 | #define __NR_N32_Linux 6000 |
| 997 | #define __NR_N32_Linux_syscalls 299 | 1006 | #define __NR_N32_Linux_syscalls 302 |
| 998 | 1007 | ||
| 999 | #ifdef __KERNEL__ | 1008 | #ifdef __KERNEL__ |
| 1000 | 1009 | ||
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform index 6a97230e3d05..ba91be9c21ef 100644 --- a/arch/mips/jz4740/Platform +++ b/arch/mips/jz4740/Platform | |||
| @@ -1,3 +1,3 @@ | |||
| 1 | core-$(CONFIG_MACH_JZ4740) += arch/mips/jz4740/ | 1 | platform-$(CONFIG_MACH_JZ4740) += jz4740/ |
| 2 | cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 | 2 | cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 |
| 3 | load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000 | 3 | load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000 |
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 0176ed015c89..32103cc2a257 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
| @@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs) | |||
| 40 | return -EFAULT; | 40 | return -EFAULT; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | regs->regs[0] = 0; | ||
| 44 | switch (insn.i_format.opcode) { | 43 | switch (insn.i_format.opcode) { |
| 45 | /* | 44 | /* |
| 46 | * jr and jalr are in r_format format. | 45 | * jr and jalr are in r_format format. |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index b181f2f0ea8e..82ba9f62f49e 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | #include <asm/io.h> | 7 | #include <asm/io.h> |
| 8 | #include <asm/gic.h> | 8 | #include <asm/gic.h> |
| 9 | #include <asm/gcmpregs.h> | 9 | #include <asm/gcmpregs.h> |
| 10 | #include <asm/mips-boards/maltaint.h> | ||
| 11 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
| 12 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
| 13 | #include <asm-generic/bitops/find.h> | 12 | #include <asm-generic/bitops/find.h> |
| @@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 131 | int i; | 130 | int i; |
| 132 | 131 | ||
| 133 | irq -= _irqbase; | 132 | irq -= _irqbase; |
| 134 | pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); | 133 | pr_debug("%s(%d) called\n", __func__, irq); |
| 135 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 134 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
| 136 | if (cpus_empty(tmp)) | 135 | if (cpus_empty(tmp)) |
| 137 | return -1; | 136 | return -1; |
| @@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, | |||
| 222 | /* Setup specifics */ | 221 | /* Setup specifics */ |
| 223 | for (i = 0; i < mapsize; i++) { | 222 | for (i = 0; i < mapsize; i++) { |
| 224 | cpu = intrmap[i].cpunum; | 223 | cpu = intrmap[i].cpunum; |
| 225 | if (cpu == X) | 224 | if (cpu == GIC_UNUSED) |
| 226 | continue; | 225 | continue; |
| 227 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) | 226 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) |
| 228 | continue; | 227 | continue; |
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 1f4e2fa64140..f4546e97c60d 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
| @@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, | |||
| 283 | struct pt_regs *regs = args->regs; | 283 | struct pt_regs *regs = args->regs; |
| 284 | int trap = (regs->cp0_cause & 0x7c) >> 2; | 284 | int trap = (regs->cp0_cause & 0x7c) >> 2; |
| 285 | 285 | ||
| 286 | /* Userpace events, ignore. */ | 286 | /* Userspace events, ignore. */ |
| 287 | if (user_mode(regs)) | 287 | if (user_mode(regs)) |
| 288 | return NOTIFY_DONE; | 288 | return NOTIFY_DONE; |
| 289 | 289 | ||
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index 80e2ba694bab..29811f043399 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
| @@ -251,7 +251,7 @@ void sp_work_handle_request(void) | |||
| 251 | memset(&tz, 0, sizeof(tz)); | 251 | memset(&tz, 0, sizeof(tz)); |
| 252 | if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, | 252 | if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, |
| 253 | (int)&tz, 0, 0)) == 0) | 253 | (int)&tz, 0, 0)) == 0) |
| 254 | ret.retval = tv.tv_sec; | 254 | ret.retval = tv.tv_sec; |
| 255 | break; | 255 | break; |
| 256 | 256 | ||
| 257 | case MTSP_SYSCALL_EXIT: | 257 | case MTSP_SYSCALL_EXIT: |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index c2dab140dc98..6343b4a5b835 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
| @@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, | |||
| 341 | { | 341 | { |
| 342 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); | 342 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); |
| 343 | } | 343 | } |
| 344 | |||
| 345 | SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, | ||
| 346 | u64, a3, u64, a4, int, dfd, const char __user *, pathname) | ||
| 347 | { | ||
| 348 | return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), | ||
| 349 | dfd, pathname); | ||
| 350 | } | ||
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 2340f11dc29c..9a526ba6f257 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
| @@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
| 103 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 103 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
| 104 | goto out_unlock; | 104 | goto out_unlock; |
| 105 | 105 | ||
| 106 | retval = security_task_setscheduler(p, 0, NULL); | 106 | retval = security_task_setscheduler(p) |
| 107 | if (retval) | 107 | if (retval) |
| 108 | goto out_unlock; | 108 | goto out_unlock; |
| 109 | 109 | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index c51b95ff8644..c8777333e198 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
| @@ -536,7 +536,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
| 536 | { | 536 | { |
| 537 | /* do the secure computing check first */ | 537 | /* do the secure computing check first */ |
| 538 | if (!entryexit) | 538 | if (!entryexit) |
| 539 | secure_computing(regs->regs[0]); | 539 | secure_computing(regs->regs[2]); |
| 540 | 540 | ||
| 541 | if (unlikely(current->audit_context) && entryexit) | 541 | if (unlikely(current->audit_context) && entryexit) |
| 542 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), | 542 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), |
| @@ -565,7 +565,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
| 565 | 565 | ||
| 566 | out: | 566 | out: |
| 567 | if (unlikely(current->audit_context) && !entryexit) | 567 | if (unlikely(current->audit_context) && !entryexit) |
| 568 | audit_syscall_entry(audit_arch(), regs->regs[0], | 568 | audit_syscall_entry(audit_arch(), regs->regs[2], |
| 569 | regs->regs[4], regs->regs[5], | 569 | regs->regs[4], regs->regs[5], |
| 570 | regs->regs[6], regs->regs[7]); | 570 | regs->regs[6], regs->regs[7]); |
| 571 | } | 571 | } |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 17202bbe843f..fbaabad0e6e2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
| @@ -63,9 +63,9 @@ stack_done: | |||
| 63 | sw t0, PT_R7(sp) # set error flag | 63 | sw t0, PT_R7(sp) # set error flag |
| 64 | beqz t0, 1f | 64 | beqz t0, 1f |
| 65 | 65 | ||
| 66 | lw t1, PT_R2(sp) # syscall number | ||
| 66 | negu v0 # error | 67 | negu v0 # error |
| 67 | sw v0, PT_R0(sp) # set flag for syscall | 68 | sw t1, PT_R0(sp) # save it for syscall restarting |
| 68 | # restarting | ||
| 69 | 1: sw v0, PT_R2(sp) # result | 69 | 1: sw v0, PT_R2(sp) # result |
| 70 | 70 | ||
| 71 | o32_syscall_exit: | 71 | o32_syscall_exit: |
| @@ -104,9 +104,9 @@ syscall_trace_entry: | |||
| 104 | sw t0, PT_R7(sp) # set error flag | 104 | sw t0, PT_R7(sp) # set error flag |
| 105 | beqz t0, 1f | 105 | beqz t0, 1f |
| 106 | 106 | ||
| 107 | lw t1, PT_R2(sp) # syscall number | ||
| 107 | negu v0 # error | 108 | negu v0 # error |
| 108 | sw v0, PT_R0(sp) # set flag for syscall | 109 | sw t1, PT_R0(sp) # save it for syscall restarting |
| 109 | # restarting | ||
| 110 | 1: sw v0, PT_R2(sp) # result | 110 | 1: sw v0, PT_R2(sp) # result |
| 111 | 111 | ||
| 112 | j syscall_exit | 112 | j syscall_exit |
| @@ -169,8 +169,7 @@ stackargs: | |||
| 169 | * We probably should handle this case a bit more drastic. | 169 | * We probably should handle this case a bit more drastic. |
| 170 | */ | 170 | */ |
| 171 | bad_stack: | 171 | bad_stack: |
| 172 | negu v0 # error | 172 | li v0, EFAULT |
| 173 | sw v0, PT_R0(sp) | ||
| 174 | sw v0, PT_R2(sp) | 173 | sw v0, PT_R2(sp) |
| 175 | li t0, 1 # set error flag | 174 | li t0, 1 # set error flag |
| 176 | sw t0, PT_R7(sp) | 175 | sw t0, PT_R7(sp) |
| @@ -583,7 +582,10 @@ einval: li v0, -ENOSYS | |||
| 583 | sys sys_rt_tgsigqueueinfo 4 | 582 | sys sys_rt_tgsigqueueinfo 4 |
| 584 | sys sys_perf_event_open 5 | 583 | sys sys_perf_event_open 5 |
| 585 | sys sys_accept4 4 | 584 | sys sys_accept4 4 |
| 586 | sys sys_recvmmsg 5 | 585 | sys sys_recvmmsg 5 /* 4335 */ |
| 586 | sys sys_fanotify_init 2 | ||
| 587 | sys sys_fanotify_mark 6 | ||
| 588 | sys sys_prlimit64 4 | ||
| 587 | .endm | 589 | .endm |
| 588 | 590 | ||
| 589 | /* We pre-compute the number of _instruction_ bytes needed to | 591 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a8a6c596eb04..3f4179283207 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
| @@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
| 66 | sd t0, PT_R7(sp) # set error flag | 66 | sd t0, PT_R7(sp) # set error flag |
| 67 | beqz t0, 1f | 67 | beqz t0, 1f |
| 68 | 68 | ||
| 69 | ld t1, PT_R2(sp) # syscall number | ||
| 69 | dnegu v0 # error | 70 | dnegu v0 # error |
| 70 | sd v0, PT_R0(sp) # set flag for syscall | 71 | sd t1, PT_R0(sp) # save it for syscall restarting |
| 71 | # restarting | ||
| 72 | 1: sd v0, PT_R2(sp) # result | 72 | 1: sd v0, PT_R2(sp) # result |
| 73 | 73 | ||
| 74 | n64_syscall_exit: | 74 | n64_syscall_exit: |
| @@ -109,8 +109,9 @@ syscall_trace_entry: | |||
| 109 | sd t0, PT_R7(sp) # set error flag | 109 | sd t0, PT_R7(sp) # set error flag |
| 110 | beqz t0, 1f | 110 | beqz t0, 1f |
| 111 | 111 | ||
| 112 | ld t1, PT_R2(sp) # syscall number | ||
| 112 | dnegu v0 # error | 113 | dnegu v0 # error |
| 113 | sd v0, PT_R0(sp) # set flag for syscall restarting | 114 | sd t1, PT_R0(sp) # save it for syscall restarting |
| 114 | 1: sd v0, PT_R2(sp) # result | 115 | 1: sd v0, PT_R2(sp) # result |
| 115 | 116 | ||
| 116 | j syscall_exit | 117 | j syscall_exit |
| @@ -416,9 +417,12 @@ sys_call_table: | |||
| 416 | PTR sys_pipe2 | 417 | PTR sys_pipe2 |
| 417 | PTR sys_inotify_init1 | 418 | PTR sys_inotify_init1 |
| 418 | PTR sys_preadv | 419 | PTR sys_preadv |
| 419 | PTR sys_pwritev /* 5390 */ | 420 | PTR sys_pwritev /* 5290 */ |
| 420 | PTR sys_rt_tgsigqueueinfo | 421 | PTR sys_rt_tgsigqueueinfo |
| 421 | PTR sys_perf_event_open | 422 | PTR sys_perf_event_open |
| 422 | PTR sys_accept4 | 423 | PTR sys_accept4 |
| 423 | PTR sys_recvmmsg | 424 | PTR sys_recvmmsg |
| 425 | PTR sys_fanotify_init /* 5295 */ | ||
| 426 | PTR sys_fanotify_mark | ||
| 427 | PTR sys_prlimit64 | ||
| 424 | .size sys_call_table,.-sys_call_table | 428 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index a3d66137731a..f08ece6d8acc 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
| @@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp) | |||
| 65 | sd t0, PT_R7(sp) # set error flag | 65 | sd t0, PT_R7(sp) # set error flag |
| 66 | beqz t0, 1f | 66 | beqz t0, 1f |
| 67 | 67 | ||
| 68 | ld t1, PT_R2(sp) # syscall number | ||
| 68 | dnegu v0 # error | 69 | dnegu v0 # error |
| 69 | sd v0, PT_R0(sp) # set flag for syscall restarting | 70 | sd t1, PT_R0(sp) # save it for syscall restarting |
| 70 | 1: sd v0, PT_R2(sp) # result | 71 | 1: sd v0, PT_R2(sp) # result |
| 71 | 72 | ||
| 72 | local_irq_disable # make sure need_resched and | 73 | local_irq_disable # make sure need_resched and |
| @@ -106,8 +107,9 @@ n32_syscall_trace_entry: | |||
| 106 | sd t0, PT_R7(sp) # set error flag | 107 | sd t0, PT_R7(sp) # set error flag |
| 107 | beqz t0, 1f | 108 | beqz t0, 1f |
| 108 | 109 | ||
| 110 | ld t1, PT_R2(sp) # syscall number | ||
| 109 | dnegu v0 # error | 111 | dnegu v0 # error |
| 110 | sd v0, PT_R0(sp) # set flag for syscall restarting | 112 | sd t1, PT_R0(sp) # save it for syscall restarting |
| 111 | 1: sd v0, PT_R2(sp) # result | 113 | 1: sd v0, PT_R2(sp) # result |
| 112 | 114 | ||
| 113 | j syscall_exit | 115 | j syscall_exit |
| @@ -320,10 +322,10 @@ EXPORT(sysn32_call_table) | |||
| 320 | PTR sys_cacheflush | 322 | PTR sys_cacheflush |
| 321 | PTR sys_cachectl | 323 | PTR sys_cachectl |
| 322 | PTR sys_sysmips | 324 | PTR sys_sysmips |
| 323 | PTR sys_io_setup /* 6200 */ | 325 | PTR compat_sys_io_setup /* 6200 */ |
| 324 | PTR sys_io_destroy | 326 | PTR sys_io_destroy |
| 325 | PTR sys_io_getevents | 327 | PTR compat_sys_io_getevents |
| 326 | PTR sys_io_submit | 328 | PTR compat_sys_io_submit |
| 327 | PTR sys_io_cancel | 329 | PTR sys_io_cancel |
| 328 | PTR sys_exit_group /* 6205 */ | 330 | PTR sys_exit_group /* 6205 */ |
| 329 | PTR sys_lookup_dcookie | 331 | PTR sys_lookup_dcookie |
| @@ -419,5 +421,8 @@ EXPORT(sysn32_call_table) | |||
| 419 | PTR sys_perf_event_open | 421 | PTR sys_perf_event_open |
| 420 | PTR sys_accept4 | 422 | PTR sys_accept4 |
| 421 | PTR compat_sys_recvmmsg | 423 | PTR compat_sys_recvmmsg |
| 422 | PTR sys_getdents | 424 | PTR sys_getdents64 |
| 425 | PTR sys_fanotify_init /* 6300 */ | ||
| 426 | PTR sys_fanotify_mark | ||
| 427 | PTR sys_prlimit64 | ||
| 423 | .size sysn32_call_table,.-sysn32_call_table | 428 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 813689ef2384..78d768a3e19d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
| @@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp) | |||
| 93 | sd t0, PT_R7(sp) # set error flag | 93 | sd t0, PT_R7(sp) # set error flag |
| 94 | beqz t0, 1f | 94 | beqz t0, 1f |
| 95 | 95 | ||
| 96 | ld t1, PT_R2(sp) # syscall number | ||
| 96 | dnegu v0 # error | 97 | dnegu v0 # error |
| 97 | sd v0, PT_R0(sp) # flag for syscall restarting | 98 | sd t1, PT_R0(sp) # save it for syscall restarting |
| 98 | 1: sd v0, PT_R2(sp) # result | 99 | 1: sd v0, PT_R2(sp) # result |
| 99 | 100 | ||
| 100 | o32_syscall_exit: | 101 | o32_syscall_exit: |
| @@ -142,8 +143,9 @@ trace_a_syscall: | |||
| 142 | sd t0, PT_R7(sp) # set error flag | 143 | sd t0, PT_R7(sp) # set error flag |
| 143 | beqz t0, 1f | 144 | beqz t0, 1f |
| 144 | 145 | ||
| 146 | ld t1, PT_R2(sp) # syscall number | ||
| 145 | dnegu v0 # error | 147 | dnegu v0 # error |
| 146 | sd v0, PT_R0(sp) # set flag for syscall restarting | 148 | sd t1, PT_R0(sp) # save it for syscall restarting |
| 147 | 1: sd v0, PT_R2(sp) # result | 149 | 1: sd v0, PT_R2(sp) # result |
| 148 | 150 | ||
| 149 | j syscall_exit | 151 | j syscall_exit |
| @@ -154,8 +156,7 @@ trace_a_syscall: | |||
| 154 | * The stackpointer for a call with more than 4 arguments is bad. | 156 | * The stackpointer for a call with more than 4 arguments is bad. |
| 155 | */ | 157 | */ |
| 156 | bad_stack: | 158 | bad_stack: |
| 157 | dnegu v0 # error | 159 | li v0, EFAULT |
| 158 | sd v0, PT_R0(sp) | ||
| 159 | sd v0, PT_R2(sp) | 160 | sd v0, PT_R2(sp) |
| 160 | li t0, 1 # set error flag | 161 | li t0, 1 # set error flag |
| 161 | sd t0, PT_R7(sp) | 162 | sd t0, PT_R7(sp) |
| @@ -444,10 +445,10 @@ sys_call_table: | |||
| 444 | PTR compat_sys_futex | 445 | PTR compat_sys_futex |
| 445 | PTR compat_sys_sched_setaffinity | 446 | PTR compat_sys_sched_setaffinity |
| 446 | PTR compat_sys_sched_getaffinity /* 4240 */ | 447 | PTR compat_sys_sched_getaffinity /* 4240 */ |
| 447 | PTR sys_io_setup | 448 | PTR compat_sys_io_setup |
| 448 | PTR sys_io_destroy | 449 | PTR sys_io_destroy |
| 449 | PTR sys_io_getevents | 450 | PTR compat_sys_io_getevents |
| 450 | PTR sys_io_submit | 451 | PTR compat_sys_io_submit |
| 451 | PTR sys_io_cancel /* 4245 */ | 452 | PTR sys_io_cancel /* 4245 */ |
| 452 | PTR sys_exit_group | 453 | PTR sys_exit_group |
| 453 | PTR sys32_lookup_dcookie | 454 | PTR sys32_lookup_dcookie |
| @@ -538,5 +539,8 @@ sys_call_table: | |||
| 538 | PTR compat_sys_rt_tgsigqueueinfo | 539 | PTR compat_sys_rt_tgsigqueueinfo |
| 539 | PTR sys_perf_event_open | 540 | PTR sys_perf_event_open |
| 540 | PTR sys_accept4 | 541 | PTR sys_accept4 |
| 541 | PTR compat_sys_recvmmsg | 542 | PTR compat_sys_recvmmsg /* 4335 */ |
| 543 | PTR sys_fanotify_init | ||
| 544 | PTR sys_32_fanotify_mark | ||
| 545 | PTR sys_prlimit64 | ||
| 542 | .size sys_call_table,.-sys_call_table | 546 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 2099d5a4c4b7..5922342bca39 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
| @@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 390 | { | 390 | { |
| 391 | struct rt_sigframe __user *frame; | 391 | struct rt_sigframe __user *frame; |
| 392 | sigset_t set; | 392 | sigset_t set; |
| 393 | stack_t st; | ||
| 394 | int sig; | 393 | int sig; |
| 395 | 394 | ||
| 396 | frame = (struct rt_sigframe __user *) regs.regs[29]; | 395 | frame = (struct rt_sigframe __user *) regs.regs[29]; |
| @@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 411 | else if (sig) | 410 | else if (sig) |
| 412 | force_sig(sig, current); | 411 | force_sig(sig, current); |
| 413 | 412 | ||
| 414 | if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) | ||
| 415 | goto badframe; | ||
| 416 | /* It is more difficult to avoid calling this function than to | 413 | /* It is more difficult to avoid calling this function than to |
| 417 | call it and ignore errors. */ | 414 | call it and ignore errors. */ |
| 418 | do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); | 415 | do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); |
| 419 | 416 | ||
| 420 | /* | 417 | /* |
| 421 | * Don't let your children do this ... | 418 | * Don't let your children do this ... |
| @@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
| 550 | struct mips_abi *abi = current->thread.abi; | 547 | struct mips_abi *abi = current->thread.abi; |
| 551 | void *vdso = current->mm->context.vdso; | 548 | void *vdso = current->mm->context.vdso; |
| 552 | 549 | ||
| 553 | switch(regs->regs[0]) { | 550 | if (regs->regs[0]) { |
| 554 | case ERESTART_RESTARTBLOCK: | 551 | switch(regs->regs[2]) { |
| 555 | case ERESTARTNOHAND: | 552 | case ERESTART_RESTARTBLOCK: |
| 556 | regs->regs[2] = EINTR; | 553 | case ERESTARTNOHAND: |
| 557 | break; | ||
| 558 | case ERESTARTSYS: | ||
| 559 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
| 560 | regs->regs[2] = EINTR; | 554 | regs->regs[2] = EINTR; |
| 561 | break; | 555 | break; |
| 556 | case ERESTARTSYS: | ||
| 557 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
| 558 | regs->regs[2] = EINTR; | ||
| 559 | break; | ||
| 560 | } | ||
| 561 | /* fallthrough */ | ||
| 562 | case ERESTARTNOINTR: | ||
| 563 | regs->regs[7] = regs->regs[26]; | ||
| 564 | regs->regs[2] = regs->regs[0]; | ||
| 565 | regs->cp0_epc -= 4; | ||
| 562 | } | 566 | } |
| 563 | /* fallthrough */ | ||
| 564 | case ERESTARTNOINTR: /* Userland will reload $v0. */ | ||
| 565 | regs->regs[7] = regs->regs[26]; | ||
| 566 | regs->cp0_epc -= 8; | ||
| 567 | } | ||
| 568 | 567 | ||
| 569 | regs->regs[0] = 0; /* Don't deal with this again. */ | 568 | regs->regs[0] = 0; /* Don't deal with this again. */ |
| 569 | } | ||
| 570 | 570 | ||
| 571 | if (sig_uses_siginfo(ka)) | 571 | if (sig_uses_siginfo(ka)) |
| 572 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, | 572 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, |
| @@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
| 575 | ret = abi->setup_frame(vdso + abi->signal_return_offset, | 575 | ret = abi->setup_frame(vdso + abi->signal_return_offset, |
| 576 | ka, regs, sig, oldset); | 576 | ka, regs, sig, oldset); |
| 577 | 577 | ||
| 578 | if (ret) | ||
| 579 | return ret; | ||
| 580 | |||
| 578 | spin_lock_irq(¤t->sighand->siglock); | 581 | spin_lock_irq(¤t->sighand->siglock); |
| 579 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 582 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
| 580 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 583 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
| @@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs) | |||
| 622 | return; | 625 | return; |
| 623 | } | 626 | } |
| 624 | 627 | ||
| 625 | /* | ||
| 626 | * Who's code doesn't conform to the restartable syscall convention | ||
| 627 | * dies here!!! The li instruction, a single machine instruction, | ||
| 628 | * must directly be followed by the syscall instruction. | ||
| 629 | */ | ||
| 630 | if (regs->regs[0]) { | 628 | if (regs->regs[0]) { |
| 631 | if (regs->regs[2] == ERESTARTNOHAND || | 629 | if (regs->regs[2] == ERESTARTNOHAND || |
| 632 | regs->regs[2] == ERESTARTSYS || | 630 | regs->regs[2] == ERESTARTSYS || |
| 633 | regs->regs[2] == ERESTARTNOINTR) { | 631 | regs->regs[2] == ERESTARTNOINTR) { |
| 632 | regs->regs[2] = regs->regs[0]; | ||
| 634 | regs->regs[7] = regs->regs[26]; | 633 | regs->regs[7] = regs->regs[26]; |
| 635 | regs->cp0_epc -= 8; | 634 | regs->cp0_epc -= 4; |
| 636 | } | 635 | } |
| 637 | if (regs->regs[2] == ERESTART_RESTARTBLOCK) { | 636 | if (regs->regs[2] == ERESTART_RESTARTBLOCK) { |
| 638 | regs->regs[2] = current->thread.abi->restart; | 637 | regs->regs[2] = current->thread.abi->restart; |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 2c5df818c65a..ee24d814d5b9 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
| @@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
| 109 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | 109 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
| 110 | { | 110 | { |
| 111 | struct rt_sigframe_n32 __user *frame; | 111 | struct rt_sigframe_n32 __user *frame; |
| 112 | mm_segment_t old_fs; | ||
| 112 | sigset_t set; | 113 | sigset_t set; |
| 113 | stack_t st; | 114 | stack_t st; |
| 114 | s32 sp; | 115 | s32 sp; |
| @@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 143 | 144 | ||
| 144 | /* It is more difficult to avoid calling this function than to | 145 | /* It is more difficult to avoid calling this function than to |
| 145 | call it and ignore errors. */ | 146 | call it and ignore errors. */ |
| 147 | old_fs = get_fs(); | ||
| 148 | set_fs(KERNEL_DS); | ||
| 146 | do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); | 149 | do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); |
| 150 | set_fs(old_fs); | ||
| 151 | |||
| 147 | 152 | ||
| 148 | /* | 153 | /* |
| 149 | * Don't let your children do this ... | 154 | * Don't let your children do this ... |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 69b039ca8d83..33d5a5ce4a29 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
| @@ -109,8 +109,6 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
| 109 | unsigned long value; | 109 | unsigned long value; |
| 110 | unsigned int res; | 110 | unsigned int res; |
| 111 | 111 | ||
| 112 | regs->regs[0] = 0; | ||
| 113 | |||
| 114 | /* | 112 | /* |
| 115 | * This load never faults. | 113 | * This load never faults. |
| 116 | */ | 114 | */ |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 7ba890860d98..469d4019f795 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
| @@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev) | |||
| 44 | 44 | ||
| 45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) | 45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
| 46 | { | 46 | { |
| 47 | gfp_t dma_flag; | ||
| 48 | |||
| 47 | /* ignore region specifiers */ | 49 | /* ignore region specifiers */ |
| 48 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 50 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
| 49 | 51 | ||
| 50 | #ifdef CONFIG_ZONE_DMA | 52 | #ifdef CONFIG_ISA |
| 51 | if (dev == NULL) | 53 | if (dev == NULL) |
| 52 | gfp |= __GFP_DMA; | 54 | dma_flag = __GFP_DMA; |
| 53 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) | ||
| 54 | gfp |= __GFP_DMA; | ||
| 55 | else | 55 | else |
| 56 | #endif | 56 | #endif |
| 57 | #ifdef CONFIG_ZONE_DMA32 | 57 | #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) |
| 58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) | 58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
| 59 | gfp |= __GFP_DMA32; | 59 | dma_flag = __GFP_DMA; |
| 60 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
| 61 | dma_flag = __GFP_DMA32; | ||
| 62 | else | ||
| 63 | #endif | ||
| 64 | #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) | ||
| 65 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
| 66 | dma_flag = __GFP_DMA32; | ||
| 67 | else | ||
| 68 | #endif | ||
| 69 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | ||
| 70 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
| 71 | dma_flag = __GFP_DMA; | ||
| 60 | else | 72 | else |
| 61 | #endif | 73 | #endif |
| 62 | ; | 74 | dma_flag = 0; |
| 63 | 75 | ||
| 64 | /* Don't invoke OOM killer */ | 76 | /* Don't invoke OOM killer */ |
| 65 | gfp |= __GFP_NORETRY; | 77 | gfp |= __GFP_NORETRY; |
| 66 | 78 | ||
| 67 | return gfp; | 79 | return gfp | dma_flag; |
| 68 | } | 80 | } |
| 69 | 81 | ||
| 70 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 82 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 1ef75cd80a0d..274af3be1442 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #define tc_lsize 32 | 30 | #define tc_lsize 32 |
| 31 | 31 | ||
| 32 | extern unsigned long icache_way_size, dcache_way_size; | 32 | extern unsigned long icache_way_size, dcache_way_size; |
| 33 | unsigned long tcache_size; | 33 | static unsigned long tcache_size; |
| 34 | 34 | ||
| 35 | #include <asm/r4kcache.h> | 35 | #include <asm/r4kcache.h> |
| 36 | 36 | ||
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 15949b0be811..b79b24afe3a2 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
| @@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap); | |||
| 385 | */ | 385 | */ |
| 386 | 386 | ||
| 387 | #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK | 387 | #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK |
| 388 | #define X GIC_UNUSED | ||
| 389 | |||
| 388 | static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { | 390 | static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { |
| 389 | { X, X, X, X, 0 }, | 391 | { X, X, X, X, 0 }, |
| 390 | { X, X, X, X, 0 }, | 392 | { X, X, X, X, 0 }, |
| @@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { | |||
| 404 | { X, X, X, X, 0 }, | 406 | { X, X, X, X, 0 }, |
| 405 | /* The remainder of this table is initialised by fill_ipi_map */ | 407 | /* The remainder of this table is initialised by fill_ipi_map */ |
| 406 | }; | 408 | }; |
| 409 | #undef X | ||
| 407 | 410 | ||
| 408 | /* | 411 | /* |
| 409 | * GCMP needs to be detected before any SMP initialisation | 412 | * GCMP needs to be detected before any SMP initialisation |
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 71f7d27b0d4c..f31218e17d3c 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c | |||
| @@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void) | |||
| 118 | if (!((pcicvalue == PCIM_H_EA) || | 118 | if (!((pcicvalue == PCIM_H_EA) || |
| 119 | (pcicvalue == PCIM_H_IA_FIX) || | 119 | (pcicvalue == PCIM_H_IA_FIX) || |
| 120 | (pcicvalue == PCIM_H_IA_RR))) { | 120 | (pcicvalue == PCIM_H_IA_RR))) { |
| 121 | pr_err(KERN_ERR "PCI init error!!!\n"); | 121 | pr_err("PCI init error!!!\n"); |
| 122 | /* Not in Host Mode, return ERROR */ | 122 | /* Not in Host Mode, return ERROR */ |
| 123 | return -1; | 123 | return -1; |
| 124 | } | 124 | } |
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c index fadd8744a6bc..e7a12ff304b9 100644 --- a/arch/mips/pnx8550/common/reset.c +++ b/arch/mips/pnx8550/common/reset.c | |||
| @@ -22,29 +22,19 @@ | |||
| 22 | */ | 22 | */ |
| 23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
| 24 | 24 | ||
| 25 | #include <asm/processor.h> | ||
| 25 | #include <asm/reboot.h> | 26 | #include <asm/reboot.h> |
| 26 | #include <glb.h> | 27 | #include <glb.h> |
| 27 | 28 | ||
| 28 | void pnx8550_machine_restart(char *command) | 29 | void pnx8550_machine_restart(char *command) |
| 29 | { | 30 | { |
| 30 | char head[] = "************* Machine restart *************"; | ||
| 31 | char foot[] = "*******************************************"; | ||
| 32 | |||
| 33 | printk("\n\n"); | ||
| 34 | printk("%s\n", head); | ||
| 35 | if (command != NULL) | ||
| 36 | printk("* %s\n", command); | ||
| 37 | printk("%s\n", foot); | ||
| 38 | |||
| 39 | PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; | 31 | PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; |
| 40 | } | 32 | } |
| 41 | 33 | ||
| 42 | void pnx8550_machine_halt(void) | 34 | void pnx8550_machine_halt(void) |
| 43 | { | 35 | { |
| 44 | printk("*** Machine halt. (Not implemented) ***\n"); | 36 | while (1) { |
| 45 | } | 37 | if (cpu_wait) |
| 46 | 38 | cpu_wait(); | |
| 47 | void pnx8550_machine_power_off(void) | 39 | } |
| 48 | { | ||
| 49 | printk("*** Machine power off. (Not implemented) ***\n"); | ||
| 50 | } | 40 | } |
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c index 64246c9c875c..43cb3945fdbf 100644 --- a/arch/mips/pnx8550/common/setup.c +++ b/arch/mips/pnx8550/common/setup.c | |||
| @@ -44,7 +44,6 @@ | |||
| 44 | extern void __init board_setup(void); | 44 | extern void __init board_setup(void); |
| 45 | extern void pnx8550_machine_restart(char *); | 45 | extern void pnx8550_machine_restart(char *); |
| 46 | extern void pnx8550_machine_halt(void); | 46 | extern void pnx8550_machine_halt(void); |
| 47 | extern void pnx8550_machine_power_off(void); | ||
| 48 | extern struct resource ioport_resource; | 47 | extern struct resource ioport_resource; |
| 49 | extern struct resource iomem_resource; | 48 | extern struct resource iomem_resource; |
| 50 | extern char *prom_getcmdline(void); | 49 | extern char *prom_getcmdline(void); |
| @@ -100,7 +99,7 @@ void __init plat_mem_setup(void) | |||
| 100 | 99 | ||
| 101 | _machine_restart = pnx8550_machine_restart; | 100 | _machine_restart = pnx8550_machine_restart; |
| 102 | _machine_halt = pnx8550_machine_halt; | 101 | _machine_halt = pnx8550_machine_halt; |
| 103 | pm_power_off = pnx8550_machine_power_off; | 102 | pm_power_off = pnx8550_machine_halt; |
| 104 | 103 | ||
| 105 | /* Clear the Global 2 Register, PCI Inta Output Enable Registers | 104 | /* Clear the Global 2 Register, PCI Inta Output Enable Registers |
| 106 | Bit 1:Enable DAC Powerdown | 105 | Bit 1:Enable DAC Powerdown |
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c index 6aea7fd76993..196a111e2e29 100644 --- a/arch/mn10300/kernel/module.c +++ b/arch/mn10300/kernel/module.c | |||
| @@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 206 | const Elf_Shdr *sechdrs, | 206 | const Elf_Shdr *sechdrs, |
| 207 | struct module *me) | 207 | struct module *me) |
| 208 | { | 208 | { |
| 209 | return module_bug_finalize(hdr, sechdrs, me); | 209 | return 0; |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | /* | 212 | /* |
| @@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 214 | */ | 214 | */ |
| 215 | void module_arch_cleanup(struct module *mod) | 215 | void module_arch_cleanup(struct module *mod) |
| 216 | { | 216 | { |
| 217 | module_bug_cleanup(mod); | ||
| 218 | } | 217 | } |
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c index 1b76719ec1c3..9261217e8d2c 100644 --- a/arch/mn10300/mm/cache.c +++ b/arch/mn10300/mm/cache.c | |||
| @@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page); | |||
| 54 | void flush_icache_range(unsigned long start, unsigned long end) | 54 | void flush_icache_range(unsigned long start, unsigned long end) |
| 55 | { | 55 | { |
| 56 | #ifdef CONFIG_MN10300_CACHE_WBACK | 56 | #ifdef CONFIG_MN10300_CACHE_WBACK |
| 57 | unsigned long addr, size, off; | 57 | unsigned long addr, size, base, off; |
| 58 | struct page *page; | 58 | struct page *page; |
| 59 | pgd_t *pgd; | 59 | pgd_t *pgd; |
| 60 | pud_t *pud; | 60 | pud_t *pud; |
| 61 | pmd_t *pmd; | 61 | pmd_t *pmd; |
| 62 | pte_t *ppte, pte; | 62 | pte_t *ppte, pte; |
| 63 | 63 | ||
| 64 | if (end > 0x80000000UL) { | ||
| 65 | /* addresses above 0xa0000000 do not go through the cache */ | ||
| 66 | if (end > 0xa0000000UL) { | ||
| 67 | end = 0xa0000000UL; | ||
| 68 | if (start >= end) | ||
| 69 | return; | ||
| 70 | } | ||
| 71 | |||
| 72 | /* kernel addresses between 0x80000000 and 0x9fffffff do not | ||
| 73 | * require page tables, so we just map such addresses directly */ | ||
| 74 | base = (start >= 0x80000000UL) ? start : 0x80000000UL; | ||
| 75 | mn10300_dcache_flush_range(base, end); | ||
| 76 | if (base == start) | ||
| 77 | goto invalidate; | ||
| 78 | end = base; | ||
| 79 | } | ||
| 80 | |||
| 64 | for (; start < end; start += size) { | 81 | for (; start < end; start += size) { |
| 65 | /* work out how much of the page to flush */ | 82 | /* work out how much of the page to flush */ |
| 66 | off = start & (PAGE_SIZE - 1); | 83 | off = start & (PAGE_SIZE - 1); |
| @@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
| 104 | } | 121 | } |
| 105 | #endif | 122 | #endif |
| 106 | 123 | ||
| 124 | invalidate: | ||
| 107 | mn10300_icache_inv(); | 125 | mn10300_icache_inv(); |
| 108 | } | 126 | } |
| 109 | EXPORT_SYMBOL(flush_icache_range); | 127 | EXPORT_SYMBOL(flush_icache_range); |
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 159a2b81e90c..6e81bb596e5b 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 941 | nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; | 941 | nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; |
| 942 | DEBUGP("NEW num_symtab %lu\n", nsyms); | 942 | DEBUGP("NEW num_symtab %lu\n", nsyms); |
| 943 | symhdr->sh_size = nsyms * sizeof(Elf_Sym); | 943 | symhdr->sh_size = nsyms * sizeof(Elf_Sym); |
| 944 | return module_bug_finalize(hdr, sechdrs, me); | 944 | return 0; |
| 945 | } | 945 | } |
| 946 | 946 | ||
| 947 | void module_arch_cleanup(struct module *mod) | 947 | void module_arch_cleanup(struct module *mod) |
| 948 | { | 948 | { |
| 949 | deregister_unwind_table(mod); | 949 | deregister_unwind_table(mod); |
| 950 | module_bug_cleanup(mod); | ||
| 951 | } | 950 | } |
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 477c663e0140..49cee9df225b 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c | |||
| @@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 63 | const Elf_Shdr *sechdrs, struct module *me) | 63 | const Elf_Shdr *sechdrs, struct module *me) |
| 64 | { | 64 | { |
| 65 | const Elf_Shdr *sect; | 65 | const Elf_Shdr *sect; |
| 66 | int err; | ||
| 67 | |||
| 68 | err = module_bug_finalize(hdr, sechdrs, me); | ||
| 69 | if (err) | ||
| 70 | return err; | ||
| 71 | 66 | ||
| 72 | /* Apply feature fixups */ | 67 | /* Apply feature fixups */ |
| 73 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); | 68 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); |
| @@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 101 | 96 | ||
| 102 | void module_arch_cleanup(struct module *mod) | 97 | void module_arch_cleanup(struct module *mod) |
| 103 | { | 98 | { |
| 104 | module_bug_cleanup(mod); | ||
| 105 | } | 99 | } |
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c index 5b243bd3eb3b..3dc2a8d262b8 100644 --- a/arch/powerpc/platforms/512x/clock.c +++ b/arch/powerpc/platforms/512x/clock.c | |||
| @@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id) | |||
| 57 | int id_match = 0; | 57 | int id_match = 0; |
| 58 | 58 | ||
| 59 | if (dev == NULL || id == NULL) | 59 | if (dev == NULL || id == NULL) |
| 60 | return NULL; | 60 | return clk; |
| 61 | 61 | ||
| 62 | mutex_lock(&clocks_mutex); | 62 | mutex_lock(&clocks_mutex); |
| 63 | list_for_each_entry(p, &clocks, node) { | 63 | list_for_each_entry(p, &clocks, node) { |
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 45c0cb9b67e6..18c104820198 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c | |||
| @@ -99,7 +99,7 @@ static void __init efika_pcisetup(void) | |||
| 99 | if (bus_range == NULL || len < 2 * sizeof(int)) { | 99 | if (bus_range == NULL || len < 2 * sizeof(int)) { |
| 100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
| 101 | ": Can't get bus-range for %s\n", pcictrl->full_name); | 101 | ": Can't get bus-range for %s\n", pcictrl->full_name); |
| 102 | return; | 102 | goto out_put; |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | if (bus_range[1] == bus_range[0]) | 105 | if (bus_range[1] == bus_range[0]) |
| @@ -111,12 +111,12 @@ static void __init efika_pcisetup(void) | |||
| 111 | printk(" controlled by %s\n", pcictrl->full_name); | 111 | printk(" controlled by %s\n", pcictrl->full_name); |
| 112 | printk("\n"); | 112 | printk("\n"); |
| 113 | 113 | ||
| 114 | hose = pcibios_alloc_controller(of_node_get(pcictrl)); | 114 | hose = pcibios_alloc_controller(pcictrl); |
| 115 | if (!hose) { | 115 | if (!hose) { |
| 116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
| 117 | ": Can't allocate PCI controller structure for %s\n", | 117 | ": Can't allocate PCI controller structure for %s\n", |
| 118 | pcictrl->full_name); | 118 | pcictrl->full_name); |
| 119 | return; | 119 | goto out_put; |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | hose->first_busno = bus_range[0]; | 122 | hose->first_busno = bus_range[0]; |
| @@ -124,6 +124,9 @@ static void __init efika_pcisetup(void) | |||
| 124 | hose->ops = &rtas_pci_ops; | 124 | hose->ops = &rtas_pci_ops; |
| 125 | 125 | ||
| 126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); | 126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); |
| 127 | return; | ||
| 128 | out_put: | ||
| 129 | of_node_put(pcictrl); | ||
| 127 | } | 130 | } |
| 128 | 131 | ||
| 129 | #else | 132 | #else |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 6e905314ad5d..41f3a7eda1de 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
| @@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number) | |||
| 325 | clrbits32(&simple_gpio->simple_dvo, sync | out); | 325 | clrbits32(&simple_gpio->simple_dvo, sync | out); |
| 326 | clrbits8(&wkup_gpio->wkup_dvo, reset); | 326 | clrbits8(&wkup_gpio->wkup_dvo, reset); |
| 327 | 327 | ||
| 328 | /* wait at lease 1 us */ | 328 | /* wait for 1 us */ |
| 329 | udelay(2); | 329 | udelay(1); |
| 330 | 330 | ||
| 331 | /* Deassert reset */ | 331 | /* Deassert reset */ |
| 332 | setbits8(&wkup_gpio->wkup_dvo, reset); | 332 | setbits8(&wkup_gpio->wkup_dvo, reset); |
| 333 | 333 | ||
| 334 | /* wait at least 200ns */ | ||
| 335 | /* 7 ~= (200ns * timebase) / ns2sec */ | ||
| 336 | __delay(7); | ||
| 337 | |||
| 334 | /* Restore pin-muxing */ | 338 | /* Restore pin-muxing */ |
| 335 | out_be32(&simple_gpio->port_config, mux); | 339 | out_be32(&simple_gpio->port_config, mux); |
| 336 | 340 | ||
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 22cfd634c355..f7167ee4604c 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
| @@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 407 | { | 407 | { |
| 408 | vfree(me->arch.syminfo); | 408 | vfree(me->arch.syminfo); |
| 409 | me->arch.syminfo = NULL; | 409 | me->arch.syminfo = NULL; |
| 410 | return module_bug_finalize(hdr, sechdrs, me); | 410 | return 0; |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | void module_arch_cleanup(struct module *mod) | 413 | void module_arch_cleanup(struct module *mod) |
| 414 | { | 414 | { |
| 415 | module_bug_cleanup(mod); | ||
| 416 | } | 415 | } |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index 43adddfe4c04..ae0be697a89e 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
| @@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 149 | int ret = 0; | 149 | int ret = 0; |
| 150 | 150 | ||
| 151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); | 151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); |
| 152 | ret |= module_bug_finalize(hdr, sechdrs, me); | ||
| 153 | 152 | ||
| 154 | return ret; | 153 | return ret; |
| 155 | } | 154 | } |
| 156 | 155 | ||
| 157 | void module_arch_cleanup(struct module *mod) | 156 | void module_arch_cleanup(struct module *mod) |
| 158 | { | 157 | { |
| 159 | module_bug_cleanup(mod); | ||
| 160 | module_dwarf_cleanup(mod); | 158 | module_dwarf_cleanup(mod); |
| 161 | } | 159 | } |
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c index 0c46e398cd8f..63c740a85b4c 100644 --- a/arch/um/drivers/hostaudio_kern.c +++ b/arch/um/drivers/hostaudio_kern.c | |||
| @@ -40,6 +40,11 @@ static char *mixer = HOSTAUDIO_DEV_MIXER; | |||
| 40 | " This is used to specify the host mixer device to the hostaudio driver.\n"\ | 40 | " This is used to specify the host mixer device to the hostaudio driver.\n"\ |
| 41 | " The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" | 41 | " The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" |
| 42 | 42 | ||
| 43 | module_param(dsp, charp, 0644); | ||
| 44 | MODULE_PARM_DESC(dsp, DSP_HELP); | ||
| 45 | module_param(mixer, charp, 0644); | ||
| 46 | MODULE_PARM_DESC(mixer, MIXER_HELP); | ||
| 47 | |||
| 43 | #ifndef MODULE | 48 | #ifndef MODULE |
| 44 | static int set_dsp(char *name, int *add) | 49 | static int set_dsp(char *name, int *add) |
| 45 | { | 50 | { |
| @@ -56,15 +61,6 @@ static int set_mixer(char *name, int *add) | |||
| 56 | } | 61 | } |
| 57 | 62 | ||
| 58 | __uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); | 63 | __uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); |
| 59 | |||
| 60 | #else /*MODULE*/ | ||
| 61 | |||
| 62 | module_param(dsp, charp, 0644); | ||
| 63 | MODULE_PARM_DESC(dsp, DSP_HELP); | ||
| 64 | |||
| 65 | module_param(mixer, charp, 0644); | ||
| 66 | MODULE_PARM_DESC(mixer, MIXER_HELP); | ||
| 67 | |||
| 68 | #endif | 64 | #endif |
| 69 | 65 | ||
| 70 | /* /dev/dsp file operations */ | 66 | /* /dev/dsp file operations */ |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 2ab233ba32c1..47d0c37897d5 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
| @@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev) | |||
| 255 | netif_wake_queue(dev); | 255 | netif_wake_queue(dev); |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | static int uml_net_set_mac(struct net_device *dev, void *addr) | ||
| 259 | { | ||
| 260 | struct uml_net_private *lp = netdev_priv(dev); | ||
| 261 | struct sockaddr *hwaddr = addr; | ||
| 262 | |||
| 263 | spin_lock_irq(&lp->lock); | ||
| 264 | eth_mac_addr(dev, hwaddr->sa_data); | ||
| 265 | spin_unlock_irq(&lp->lock); | ||
| 266 | |||
| 267 | return 0; | ||
| 268 | } | ||
| 269 | |||
| 270 | static int uml_net_change_mtu(struct net_device *dev, int new_mtu) | 258 | static int uml_net_change_mtu(struct net_device *dev, int new_mtu) |
| 271 | { | 259 | { |
| 272 | dev->mtu = new_mtu; | 260 | dev->mtu = new_mtu; |
| @@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = { | |||
| 373 | .ndo_start_xmit = uml_net_start_xmit, | 361 | .ndo_start_xmit = uml_net_start_xmit, |
| 374 | .ndo_set_multicast_list = uml_net_set_multicast_list, | 362 | .ndo_set_multicast_list = uml_net_set_multicast_list, |
| 375 | .ndo_tx_timeout = uml_net_tx_timeout, | 363 | .ndo_tx_timeout = uml_net_tx_timeout, |
| 376 | .ndo_set_mac_address = uml_net_set_mac, | 364 | .ndo_set_mac_address = eth_mac_addr, |
| 377 | .ndo_change_mtu = uml_net_change_mtu, | 365 | .ndo_change_mtu = uml_net_change_mtu, |
| 378 | .ndo_validate_addr = eth_validate_addr, | 366 | .ndo_validate_addr = eth_validate_addr, |
| 379 | }; | 367 | }; |
| @@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac, | |||
| 472 | ((*transport->user->init)(&lp->user, dev) != 0)) | 460 | ((*transport->user->init)(&lp->user, dev) != 0)) |
| 473 | goto out_unregister; | 461 | goto out_unregister; |
| 474 | 462 | ||
| 475 | eth_mac_addr(dev, device->mac); | 463 | /* don't use eth_mac_addr, it will not work here */ |
| 464 | memcpy(dev->dev_addr, device->mac, ETH_ALEN); | ||
| 476 | dev->mtu = transport->user->mtu; | 465 | dev->mtu = transport->user->mtu; |
| 477 | dev->netdev_ops = ¨_netdev_ops; | 466 | dev->netdev_ops = ¨_netdev_ops; |
| 478 | dev->ethtool_ops = ¨_net_ethtool_ops; | 467 | dev->ethtool_ops = ¨_net_ethtool_ops; |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 1bcd208c459f..9734994cba1e 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
| @@ -163,6 +163,7 @@ struct ubd { | |||
| 163 | struct scatterlist sg[MAX_SG]; | 163 | struct scatterlist sg[MAX_SG]; |
| 164 | struct request *request; | 164 | struct request *request; |
| 165 | int start_sg, end_sg; | 165 | int start_sg, end_sg; |
| 166 | sector_t rq_pos; | ||
| 166 | }; | 167 | }; |
| 167 | 168 | ||
| 168 | #define DEFAULT_COW { \ | 169 | #define DEFAULT_COW { \ |
| @@ -187,6 +188,7 @@ struct ubd { | |||
| 187 | .request = NULL, \ | 188 | .request = NULL, \ |
| 188 | .start_sg = 0, \ | 189 | .start_sg = 0, \ |
| 189 | .end_sg = 0, \ | 190 | .end_sg = 0, \ |
| 191 | .rq_pos = 0, \ | ||
| 190 | } | 192 | } |
| 191 | 193 | ||
| 192 | /* Protected by ubd_lock */ | 194 | /* Protected by ubd_lock */ |
| @@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q) | |||
| 1228 | { | 1230 | { |
| 1229 | struct io_thread_req *io_req; | 1231 | struct io_thread_req *io_req; |
| 1230 | struct request *req; | 1232 | struct request *req; |
| 1231 | sector_t sector; | ||
| 1232 | int n; | 1233 | int n; |
| 1233 | 1234 | ||
| 1234 | while(1){ | 1235 | while(1){ |
| @@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q) | |||
| 1239 | return; | 1240 | return; |
| 1240 | 1241 | ||
| 1241 | dev->request = req; | 1242 | dev->request = req; |
| 1243 | dev->rq_pos = blk_rq_pos(req); | ||
| 1242 | dev->start_sg = 0; | 1244 | dev->start_sg = 0; |
| 1243 | dev->end_sg = blk_rq_map_sg(q, req, dev->sg); | 1245 | dev->end_sg = blk_rq_map_sg(q, req, dev->sg); |
| 1244 | } | 1246 | } |
| 1245 | 1247 | ||
| 1246 | req = dev->request; | 1248 | req = dev->request; |
| 1247 | sector = blk_rq_pos(req); | ||
| 1248 | while(dev->start_sg < dev->end_sg){ | 1249 | while(dev->start_sg < dev->end_sg){ |
| 1249 | struct scatterlist *sg = &dev->sg[dev->start_sg]; | 1250 | struct scatterlist *sg = &dev->sg[dev->start_sg]; |
| 1250 | 1251 | ||
| @@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q) | |||
| 1256 | return; | 1257 | return; |
| 1257 | } | 1258 | } |
| 1258 | prepare_request(req, io_req, | 1259 | prepare_request(req, io_req, |
| 1259 | (unsigned long long)sector << 9, | 1260 | (unsigned long long)dev->rq_pos << 9, |
| 1260 | sg->offset, sg->length, sg_page(sg)); | 1261 | sg->offset, sg->length, sg_page(sg)); |
| 1261 | 1262 | ||
| 1262 | sector += sg->length >> 9; | ||
| 1263 | n = os_write_file(thread_fd, &io_req, | 1263 | n = os_write_file(thread_fd, &io_req, |
| 1264 | sizeof(struct io_thread_req *)); | 1264 | sizeof(struct io_thread_req *)); |
| 1265 | if(n != sizeof(struct io_thread_req *)){ | 1265 | if(n != sizeof(struct io_thread_req *)){ |
| @@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q) | |||
| 1272 | return; | 1272 | return; |
| 1273 | } | 1273 | } |
| 1274 | 1274 | ||
| 1275 | dev->rq_pos += sg->length >> 9; | ||
| 1275 | dev->start_sg++; | 1276 | dev->start_sg++; |
| 1276 | } | 1277 | } |
| 1277 | dev->end_sg = 0; | 1278 | dev->end_sg = 0; |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 0350311906ae..2d93bdbc9ac0 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | #include <asm/ia32.h> | 34 | #include <asm/ia32.h> |
| 35 | 35 | ||
| 36 | #undef WARN_OLD | 36 | #undef WARN_OLD |
| 37 | #undef CORE_DUMP /* probably broken */ | 37 | #undef CORE_DUMP /* definitely broken */ |
| 38 | 38 | ||
| 39 | static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs); | 39 | static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs); |
| 40 | static int load_aout_library(struct file *); | 40 | static int load_aout_library(struct file *); |
| @@ -131,21 +131,15 @@ static void set_brk(unsigned long start, unsigned long end) | |||
| 131 | * macros to write out all the necessary info. | 131 | * macros to write out all the necessary info. |
| 132 | */ | 132 | */ |
| 133 | 133 | ||
| 134 | static int dump_write(struct file *file, const void *addr, int nr) | 134 | #include <linux/coredump.h> |
| 135 | { | ||
| 136 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | ||
| 137 | } | ||
| 138 | 135 | ||
| 139 | #define DUMP_WRITE(addr, nr) \ | 136 | #define DUMP_WRITE(addr, nr) \ |
| 140 | if (!dump_write(file, (void *)(addr), (nr))) \ | 137 | if (!dump_write(file, (void *)(addr), (nr))) \ |
| 141 | goto end_coredump; | 138 | goto end_coredump; |
| 142 | 139 | ||
| 143 | #define DUMP_SEEK(offset) \ | 140 | #define DUMP_SEEK(offset) \ |
| 144 | if (file->f_op->llseek) { \ | 141 | if (!dump_seek(file, offset)) \ |
| 145 | if (file->f_op->llseek(file, (offset), 0) != (offset)) \ | 142 | goto end_coredump; |
| 146 | goto end_coredump; \ | ||
| 147 | } else \ | ||
| 148 | file->f_pos = (offset) | ||
| 149 | 143 | ||
| 150 | #define START_DATA() (u.u_tsize << PAGE_SHIFT) | 144 | #define START_DATA() (u.u_tsize << PAGE_SHIFT) |
| 151 | #define START_STACK(u) (u.start_stack) | 145 | #define START_STACK(u) (u.start_stack) |
| @@ -217,12 +211,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, | |||
| 217 | dump_size = dump.u_ssize << PAGE_SHIFT; | 211 | dump_size = dump.u_ssize << PAGE_SHIFT; |
| 218 | DUMP_WRITE(dump_start, dump_size); | 212 | DUMP_WRITE(dump_start, dump_size); |
| 219 | } | 213 | } |
| 220 | /* | ||
| 221 | * Finally dump the task struct. Not be used by gdb, but | ||
| 222 | * could be useful | ||
| 223 | */ | ||
| 224 | set_fs(KERNEL_DS); | ||
| 225 | DUMP_WRITE(current, sizeof(*current)); | ||
| 226 | end_coredump: | 214 | end_coredump: |
| 227 | set_fs(fs); | 215 | set_fs(fs); |
| 228 | return has_dumped; | 216 | return has_dumped; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 502e53f999cf..c52e2eb40a1e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |||
| 652 | return (struct kvm_mmu_page *)page_private(page); | 652 | return (struct kvm_mmu_page *)page_private(page); |
| 653 | } | 653 | } |
| 654 | 654 | ||
| 655 | static inline u16 kvm_read_fs(void) | ||
| 656 | { | ||
| 657 | u16 seg; | ||
| 658 | asm("mov %%fs, %0" : "=g"(seg)); | ||
| 659 | return seg; | ||
| 660 | } | ||
| 661 | |||
| 662 | static inline u16 kvm_read_gs(void) | ||
| 663 | { | ||
| 664 | u16 seg; | ||
| 665 | asm("mov %%gs, %0" : "=g"(seg)); | ||
| 666 | return seg; | ||
| 667 | } | ||
| 668 | |||
| 669 | static inline u16 kvm_read_ldt(void) | 655 | static inline u16 kvm_read_ldt(void) |
| 670 | { | 656 | { |
| 671 | u16 ldt; | 657 | u16 ldt; |
| @@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void) | |||
| 673 | return ldt; | 659 | return ldt; |
| 674 | } | 660 | } |
| 675 | 661 | ||
| 676 | static inline void kvm_load_fs(u16 sel) | ||
| 677 | { | ||
| 678 | asm("mov %0, %%fs" : : "rm"(sel)); | ||
| 679 | } | ||
| 680 | |||
| 681 | static inline void kvm_load_gs(u16 sel) | ||
| 682 | { | ||
| 683 | asm("mov %0, %%gs" : : "rm"(sel)); | ||
| 684 | } | ||
| 685 | |||
| 686 | static inline void kvm_load_ldt(u16 sel) | 662 | static inline void kvm_load_ldt(u16 sel) |
| 687 | { | 663 | { |
| 688 | asm("lldt %0" : : "rm"(sel)); | 664 | asm("lldt %0" : : "rm"(sel)); |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index fb7a5f052e2b..fb16f17e59be 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
| @@ -61,7 +61,7 @@ struct cstate_entry { | |||
| 61 | unsigned int ecx; | 61 | unsigned int ecx; |
| 62 | } states[ACPI_PROCESSOR_MAX_POWER]; | 62 | } states[ACPI_PROCESSOR_MAX_POWER]; |
| 63 | }; | 63 | }; |
| 64 | static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ | 64 | static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ |
| 65 | 65 | ||
| 66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
| 67 | 67 | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f1efebaf5510..5c5b8f3dddb5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, | |||
| 306 | 306 | ||
| 307 | old_cfg = old_desc->chip_data; | 307 | old_cfg = old_desc->chip_data; |
| 308 | 308 | ||
| 309 | memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); | 309 | cfg->vector = old_cfg->vector; |
| 310 | cfg->move_in_progress = old_cfg->move_in_progress; | ||
| 311 | cpumask_copy(cfg->domain, old_cfg->domain); | ||
| 312 | cpumask_copy(cfg->old_domain, old_cfg->old_domain); | ||
| 310 | 313 | ||
| 311 | init_copy_irq_2_pin(old_cfg, cfg, node); | 314 | init_copy_irq_2_pin(old_cfg, cfg, node); |
| 312 | } | 315 | } |
| 313 | 316 | ||
| 314 | static void free_irq_cfg(struct irq_cfg *old_cfg) | 317 | static void free_irq_cfg(struct irq_cfg *cfg) |
| 315 | { | 318 | { |
| 316 | kfree(old_cfg); | 319 | free_cpumask_var(cfg->domain); |
| 320 | free_cpumask_var(cfg->old_domain); | ||
| 321 | kfree(cfg); | ||
| 317 | } | 322 | } |
| 318 | 323 | ||
| 319 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | 324 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 490dac63c2d2..f2f9ac7da25c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
| 545 | } | 545 | } |
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 548 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) |
| 549 | { | 549 | { |
| 550 | u32 tfms, xlvl; | 550 | u32 tfms, xlvl; |
| 551 | u32 ebx; | 551 | u32 ebx; |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 3624e8a0f71b..f668bb1f7d43 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
| @@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], | |||
| 33 | *const __x86_cpu_dev_end[]; | 33 | *const __x86_cpu_dev_end[]; |
| 34 | 34 | ||
| 35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | 35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
| 36 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | ||
| 36 | 37 | ||
| 37 | #endif | 38 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 994230d4dc4e..4f6f679f2799 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
| @@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle) | |||
| 368 | return -ENODEV; | 368 | return -ENODEV; |
| 369 | 369 | ||
| 370 | out_obj = output.pointer; | 370 | out_obj = output.pointer; |
| 371 | if (out_obj->type != ACPI_TYPE_BUFFER) | 371 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
| 372 | return -ENODEV; | 372 | ret = -ENODEV; |
| 373 | goto out_free; | ||
| 374 | } | ||
| 373 | 375 | ||
| 374 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | 376 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); |
| 375 | if (errors) | 377 | if (errors) { |
| 376 | return -ENODEV; | 378 | ret = -ENODEV; |
| 379 | goto out_free; | ||
| 380 | } | ||
| 377 | 381 | ||
| 378 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); | 382 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); |
| 379 | if (!(supported & 0x1)) | 383 | if (!(supported & 0x1)) { |
| 380 | return -ENODEV; | 384 | ret = -ENODEV; |
| 385 | goto out_free; | ||
| 386 | } | ||
| 381 | 387 | ||
| 382 | out_free: | 388 | out_free: |
| 383 | kfree(output.pointer); | 389 | kfree(output.pointer); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 85f69cdeae10..b4389441efbb 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
| 39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | 39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; |
| 40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
| 41 | c->cpuid_level = cpuid_eax(0); | 41 | c->cpuid_level = cpuid_eax(0); |
| 42 | get_cpu_cap(c); | ||
| 42 | } | 43 | } |
| 43 | } | 44 | } |
| 44 | 45 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5e975298fa81..39aaee5c1ab2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -141,6 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 141 | address = (low & MASK_BLKPTR_LO) >> 21; | 141 | address = (low & MASK_BLKPTR_LO) >> 21; |
| 142 | if (!address) | 142 | if (!address) |
| 143 | break; | 143 | break; |
| 144 | |||
| 144 | address += MCG_XBLK_ADDR; | 145 | address += MCG_XBLK_ADDR; |
| 145 | } else | 146 | } else |
| 146 | ++address; | 147 | ++address; |
| @@ -148,12 +149,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 148 | if (rdmsr_safe(address, &low, &high)) | 149 | if (rdmsr_safe(address, &low, &high)) |
| 149 | break; | 150 | break; |
| 150 | 151 | ||
| 151 | if (!(high & MASK_VALID_HI)) { | 152 | if (!(high & MASK_VALID_HI)) |
| 152 | if (block) | 153 | continue; |
| 153 | continue; | ||
| 154 | else | ||
| 155 | break; | ||
| 156 | } | ||
| 157 | 154 | ||
| 158 | if (!(high & MASK_CNTP_HI) || | 155 | if (!(high & MASK_CNTP_HI) || |
| 159 | (high & MASK_LOCKED_HI)) | 156 | (high & MASK_LOCKED_HI)) |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index d9368eeda309..169d8804a9f8 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -216,7 +216,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | |||
| 216 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 216 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
| 217 | &attr_core_power_limit_count.attr, | 217 | &attr_core_power_limit_count.attr, |
| 218 | thermal_attr_group.name); | 218 | thermal_attr_group.name); |
| 219 | if (cpu_has(c, X86_FEATURE_PTS)) | 219 | if (cpu_has(c, X86_FEATURE_PTS)) { |
| 220 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 220 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
| 221 | &attr_package_throttle_count.attr, | 221 | &attr_package_throttle_count.attr, |
| 222 | thermal_attr_group.name); | 222 | thermal_attr_group.name); |
| @@ -224,6 +224,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | |||
| 224 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 224 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
| 225 | &attr_package_power_limit_count.attr, | 225 | &attr_package_power_limit_count.attr, |
| 226 | thermal_attr_group.name); | 226 | thermal_attr_group.name); |
| 227 | } | ||
| 227 | 228 | ||
| 228 | return err; | 229 | return err; |
| 229 | } | 230 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index b560db3305be..249015173992 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
| @@ -660,8 +660,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
| 660 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 660 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
| 661 | int overflow; | 661 | int overflow; |
| 662 | 662 | ||
| 663 | if (!test_bit(idx, cpuc->active_mask)) | 663 | if (!test_bit(idx, cpuc->active_mask)) { |
| 664 | /* catch in-flight IRQs */ | ||
| 665 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
| 666 | handled++; | ||
| 664 | continue; | 667 | continue; |
| 668 | } | ||
| 665 | 669 | ||
| 666 | event = cpuc->events[idx]; | 670 | event = cpuc->events[idx]; |
| 667 | hwc = &event->hw; | 671 | hwc = &event->hw; |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 410fdb3f1939..7494999141b3 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
| 506 | { | 506 | { |
| 507 | unsigned int irq; | 507 | unsigned int irq; |
| 508 | 508 | ||
| 509 | irq = create_irq(); | 509 | irq = create_irq_nr(0, -1); |
| 510 | if (!irq) | 510 | if (!irq) |
| 511 | return -EINVAL; | 511 | return -EINVAL; |
| 512 | 512 | ||
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index e0bc186d7501..1c355c550960 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
| @@ -239,11 +239,10 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 239 | apply_paravirt(pseg, pseg + para->sh_size); | 239 | apply_paravirt(pseg, pseg + para->sh_size); |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | return module_bug_finalize(hdr, sechdrs, me); | 242 | return 0; |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | void module_arch_cleanup(struct module *mod) | 245 | void module_arch_cleanup(struct module *mod) |
| 246 | { | 246 | { |
| 247 | alternatives_smp_module_del(mod); | 247 | alternatives_smp_module_del(mod); |
| 248 | module_bug_cleanup(mod); | ||
| 249 | } | 248 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bc5b9b8d4a33..8a3f9f64f86f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -766,7 +766,6 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
| 766 | 766 | ||
| 767 | control->iopm_base_pa = iopm_base; | 767 | control->iopm_base_pa = iopm_base; |
| 768 | control->msrpm_base_pa = __pa(svm->msrpm); | 768 | control->msrpm_base_pa = __pa(svm->msrpm); |
| 769 | control->tsc_offset = 0; | ||
| 770 | control->int_ctl = V_INTR_MASKING_MASK; | 769 | control->int_ctl = V_INTR_MASKING_MASK; |
| 771 | 770 | ||
| 772 | init_seg(&save->es); | 771 | init_seg(&save->es); |
| @@ -902,6 +901,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 902 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 901 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
| 903 | svm->asid_generation = 0; | 902 | svm->asid_generation = 0; |
| 904 | init_vmcb(svm); | 903 | init_vmcb(svm); |
| 904 | svm->vmcb->control.tsc_offset = 0-native_read_tsc(); | ||
| 905 | 905 | ||
| 906 | err = fx_init(&svm->vcpu); | 906 | err = fx_init(&svm->vcpu); |
| 907 | if (err) | 907 | if (err) |
| @@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 3163 | sync_lapic_to_cr8(vcpu); | 3163 | sync_lapic_to_cr8(vcpu); |
| 3164 | 3164 | ||
| 3165 | save_host_msrs(vcpu); | 3165 | save_host_msrs(vcpu); |
| 3166 | fs_selector = kvm_read_fs(); | 3166 | savesegment(fs, fs_selector); |
| 3167 | gs_selector = kvm_read_gs(); | 3167 | savesegment(gs, gs_selector); |
| 3168 | ldt_selector = kvm_read_ldt(); | 3168 | ldt_selector = kvm_read_ldt(); |
| 3169 | svm->vmcb->save.cr2 = vcpu->arch.cr2; | 3169 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
| 3170 | /* required for live migration with NPT */ | 3170 | /* required for live migration with NPT */ |
| @@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 3251 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | 3251 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
| 3252 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3252 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
| 3253 | 3253 | ||
| 3254 | kvm_load_fs(fs_selector); | ||
| 3255 | kvm_load_gs(gs_selector); | ||
| 3256 | kvm_load_ldt(ldt_selector); | ||
| 3257 | load_host_msrs(vcpu); | 3254 | load_host_msrs(vcpu); |
| 3255 | loadsegment(fs, fs_selector); | ||
| 3256 | #ifdef CONFIG_X86_64 | ||
| 3257 | load_gs_index(gs_selector); | ||
| 3258 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
| 3259 | #else | ||
| 3260 | loadsegment(gs, gs_selector); | ||
| 3261 | #endif | ||
| 3262 | kvm_load_ldt(ldt_selector); | ||
| 3258 | 3263 | ||
| 3259 | reload_tss(vcpu); | 3264 | reload_tss(vcpu); |
| 3260 | 3265 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 49b25eee25ac..7bddfab12013 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 803 | */ | 803 | */ |
| 804 | vmx->host_state.ldt_sel = kvm_read_ldt(); | 804 | vmx->host_state.ldt_sel = kvm_read_ldt(); |
| 805 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | 805 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; |
| 806 | vmx->host_state.fs_sel = kvm_read_fs(); | 806 | savesegment(fs, vmx->host_state.fs_sel); |
| 807 | if (!(vmx->host_state.fs_sel & 7)) { | 807 | if (!(vmx->host_state.fs_sel & 7)) { |
| 808 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | 808 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); |
| 809 | vmx->host_state.fs_reload_needed = 0; | 809 | vmx->host_state.fs_reload_needed = 0; |
| @@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 811 | vmcs_write16(HOST_FS_SELECTOR, 0); | 811 | vmcs_write16(HOST_FS_SELECTOR, 0); |
| 812 | vmx->host_state.fs_reload_needed = 1; | 812 | vmx->host_state.fs_reload_needed = 1; |
| 813 | } | 813 | } |
| 814 | vmx->host_state.gs_sel = kvm_read_gs(); | 814 | savesegment(gs, vmx->host_state.gs_sel); |
| 815 | if (!(vmx->host_state.gs_sel & 7)) | 815 | if (!(vmx->host_state.gs_sel & 7)) |
| 816 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); | 816 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); |
| 817 | else { | 817 | else { |
| @@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 841 | 841 | ||
| 842 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) | 842 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
| 843 | { | 843 | { |
| 844 | unsigned long flags; | ||
| 845 | |||
| 846 | if (!vmx->host_state.loaded) | 844 | if (!vmx->host_state.loaded) |
| 847 | return; | 845 | return; |
| 848 | 846 | ||
| 849 | ++vmx->vcpu.stat.host_state_reload; | 847 | ++vmx->vcpu.stat.host_state_reload; |
| 850 | vmx->host_state.loaded = 0; | 848 | vmx->host_state.loaded = 0; |
| 851 | if (vmx->host_state.fs_reload_needed) | 849 | if (vmx->host_state.fs_reload_needed) |
| 852 | kvm_load_fs(vmx->host_state.fs_sel); | 850 | loadsegment(fs, vmx->host_state.fs_sel); |
| 853 | if (vmx->host_state.gs_ldt_reload_needed) { | 851 | if (vmx->host_state.gs_ldt_reload_needed) { |
| 854 | kvm_load_ldt(vmx->host_state.ldt_sel); | 852 | kvm_load_ldt(vmx->host_state.ldt_sel); |
| 855 | /* | ||
| 856 | * If we have to reload gs, we must take care to | ||
| 857 | * preserve our gs base. | ||
| 858 | */ | ||
| 859 | local_irq_save(flags); | ||
| 860 | kvm_load_gs(vmx->host_state.gs_sel); | ||
| 861 | #ifdef CONFIG_X86_64 | 853 | #ifdef CONFIG_X86_64 |
| 862 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | 854 | load_gs_index(vmx->host_state.gs_sel); |
| 855 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
| 856 | #else | ||
| 857 | loadsegment(gs, vmx->host_state.gs_sel); | ||
| 863 | #endif | 858 | #endif |
| 864 | local_irq_restore(flags); | ||
| 865 | } | 859 | } |
| 866 | reload_tss(); | 860 | reload_tss(); |
| 867 | #ifdef CONFIG_X86_64 | 861 | #ifdef CONFIG_X86_64 |
| @@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
| 2589 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | 2583 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
| 2590 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 2584 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
| 2591 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 2585 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
| 2592 | vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ | 2586 | vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ |
| 2593 | vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ | 2587 | vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ |
| 2594 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 2588 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
| 2595 | #ifdef CONFIG_X86_64 | 2589 | #ifdef CONFIG_X86_64 |
| 2596 | rdmsrl(MSR_FS_BASE, a); | 2590 | rdmsrl(MSR_FS_BASE, a); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index f9897f7a9ef1..9c0d0d399c30 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
| @@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) | |||
| 420 | return -1; | 420 | return -1; |
| 421 | } | 421 | } |
| 422 | 422 | ||
| 423 | for_each_node_mask(i, nodes_parsed) | 423 | for (i = 0; i < num_node_memblks; i++) |
| 424 | e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, | 424 | e820_register_active_regions(memblk_nodeid[i], |
| 425 | nodes[i].end >> PAGE_SHIFT); | 425 | node_memblk_range[i].start >> PAGE_SHIFT, |
| 426 | node_memblk_range[i].end >> PAGE_SHIFT); | ||
| 427 | |||
| 426 | /* for out of order entries in SRAT */ | 428 | /* for out of order entries in SRAT */ |
| 427 | sort_node_map(); | 429 | sort_node_map(); |
| 428 | if (!nodes_cover_memory(nodes)) { | 430 | if (!nodes_cover_memory(nodes)) { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 009b819f48d0..f1575c9a2572 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -674,6 +674,7 @@ static int __init ppro_init(char **cpu_type) | |||
| 674 | case 0x0f: | 674 | case 0x0f: |
| 675 | case 0x16: | 675 | case 0x16: |
| 676 | case 0x17: | 676 | case 0x17: |
| 677 | case 0x1d: | ||
| 677 | *cpu_type = "i386/core_2"; | 678 | *cpu_type = "i386/core_2"; |
| 678 | break; | 679 | break; |
| 679 | case 0x1a: | 680 | case 0x1a: |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 1a5353a753fc..b2bb5aa3b054 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
| @@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void) | |||
| 489 | __init void xen_hvm_init_time_ops(void) | 489 | __init void xen_hvm_init_time_ops(void) |
| 490 | { | 490 | { |
| 491 | /* vector callback is needed otherwise we cannot receive interrupts | 491 | /* vector callback is needed otherwise we cannot receive interrupts |
| 492 | * on cpu > 0 */ | 492 | * on cpu > 0 and at this point we don't know how many cpus are |
| 493 | if (!xen_have_vector_callback && num_present_cpus() > 1) | 493 | * available */ |
| 494 | if (!xen_have_vector_callback) | ||
| 494 | return; | 495 | return; |
| 495 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { | 496 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { |
| 496 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," | 497 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," |
diff --git a/block/bsg.c b/block/bsg.c index 82d58829ba59..0c00870553a3 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
| @@ -426,7 +426,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
| 426 | /* | 426 | /* |
| 427 | * fill in all the output members | 427 | * fill in all the output members |
| 428 | */ | 428 | */ |
| 429 | hdr->device_status = status_byte(rq->errors); | 429 | hdr->device_status = rq->errors & 0xff; |
| 430 | hdr->transport_status = host_byte(rq->errors); | 430 | hdr->transport_status = host_byte(rq->errors); |
| 431 | hdr->driver_status = driver_byte(rq->errors); | 431 | hdr->driver_status = driver_byte(rq->errors); |
| 432 | hdr->info = 0; | 432 | hdr->info = 0; |
diff --git a/block/elevator.c b/block/elevator.c index 205b09a5bd9e..4e11559aa2b0 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -938,6 +938,7 @@ int elv_register_queue(struct request_queue *q) | |||
| 938 | } | 938 | } |
| 939 | } | 939 | } |
| 940 | kobject_uevent(&e->kobj, KOBJ_ADD); | 940 | kobject_uevent(&e->kobj, KOBJ_ADD); |
| 941 | e->registered = 1; | ||
| 941 | } | 942 | } |
| 942 | return error; | 943 | return error; |
| 943 | } | 944 | } |
| @@ -947,6 +948,7 @@ static void __elv_unregister_queue(struct elevator_queue *e) | |||
| 947 | { | 948 | { |
| 948 | kobject_uevent(&e->kobj, KOBJ_REMOVE); | 949 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
| 949 | kobject_del(&e->kobj); | 950 | kobject_del(&e->kobj); |
| 951 | e->registered = 0; | ||
| 950 | } | 952 | } |
| 951 | 953 | ||
| 952 | void elv_unregister_queue(struct request_queue *q) | 954 | void elv_unregister_queue(struct request_queue *q) |
| @@ -1042,11 +1044,13 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 1042 | 1044 | ||
| 1043 | spin_unlock_irq(q->queue_lock); | 1045 | spin_unlock_irq(q->queue_lock); |
| 1044 | 1046 | ||
| 1045 | __elv_unregister_queue(old_elevator); | 1047 | if (old_elevator->registered) { |
| 1048 | __elv_unregister_queue(old_elevator); | ||
| 1046 | 1049 | ||
| 1047 | err = elv_register_queue(q); | 1050 | err = elv_register_queue(q); |
| 1048 | if (err) | 1051 | if (err) |
| 1049 | goto fail_register; | 1052 | goto fail_register; |
| 1053 | } | ||
| 1050 | 1054 | ||
| 1051 | /* | 1055 | /* |
| 1052 | * finally exit old elevator and turn off BYPASS. | 1056 | * finally exit old elevator and turn off BYPASS. |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index b811f2173f6f..88681aca88c5 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS | |||
| 105 | 105 | ||
| 106 | Be aware that using this interface can confuse your Embedded | 106 | Be aware that using this interface can confuse your Embedded |
| 107 | Controller in a way that a normal reboot is not enough. You then | 107 | Controller in a way that a normal reboot is not enough. You then |
| 108 | have to power of your system, and remove the laptop battery for | 108 | have to power off your system, and remove the laptop battery for |
| 109 | some seconds. | 109 | some seconds. |
| 110 | An Embedded Controller typically is available on laptops and reads | 110 | An Embedded Controller typically is available on laptops and reads |
| 111 | sensor values like battery state and temperature. | 111 | sensor values like battery state and temperature. |
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index b76848c80be3..6b115f6c4313 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
| @@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device) | |||
| 382 | device_remove_file(&device->dev, &dev_attr_rrtime); | 382 | device_remove_file(&device->dev, &dev_attr_rrtime); |
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | /* Query firmware how many CPUs should be idle */ | 385 | /* |
| 386 | static int acpi_pad_pur(acpi_handle handle, int *num_cpus) | 386 | * Query firmware how many CPUs should be idle |
| 387 | * return -1 on failure | ||
| 388 | */ | ||
| 389 | static int acpi_pad_pur(acpi_handle handle) | ||
| 387 | { | 390 | { |
| 388 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 391 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
| 389 | union acpi_object *package; | 392 | union acpi_object *package; |
| 390 | int rev, num, ret = -EINVAL; | 393 | int num = -1; |
| 391 | 394 | ||
| 392 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) | 395 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) |
| 393 | return -EINVAL; | 396 | return num; |
| 394 | 397 | ||
| 395 | if (!buffer.length || !buffer.pointer) | 398 | if (!buffer.length || !buffer.pointer) |
| 396 | return -EINVAL; | 399 | return num; |
| 397 | 400 | ||
| 398 | package = buffer.pointer; | 401 | package = buffer.pointer; |
| 399 | if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) | 402 | |
| 400 | goto out; | 403 | if (package->type == ACPI_TYPE_PACKAGE && |
| 401 | rev = package->package.elements[0].integer.value; | 404 | package->package.count == 2 && |
| 402 | num = package->package.elements[1].integer.value; | 405 | package->package.elements[0].integer.value == 1) /* rev 1 */ |
| 403 | if (rev != 1 || num < 0) | 406 | |
| 404 | goto out; | 407 | num = package->package.elements[1].integer.value; |
| 405 | *num_cpus = num; | 408 | |
| 406 | ret = 0; | ||
| 407 | out: | ||
| 408 | kfree(buffer.pointer); | 409 | kfree(buffer.pointer); |
| 409 | return ret; | 410 | return num; |
| 410 | } | 411 | } |
| 411 | 412 | ||
| 412 | /* Notify firmware how many CPUs are idle */ | 413 | /* Notify firmware how many CPUs are idle */ |
| @@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle) | |||
| 433 | uint32_t idle_cpus; | 434 | uint32_t idle_cpus; |
| 434 | 435 | ||
| 435 | mutex_lock(&isolated_cpus_lock); | 436 | mutex_lock(&isolated_cpus_lock); |
| 436 | if (acpi_pad_pur(handle, &num_cpus)) { | 437 | num_cpus = acpi_pad_pur(handle); |
| 438 | if (num_cpus < 0) { | ||
| 437 | mutex_unlock(&isolated_cpus_lock); | 439 | mutex_unlock(&isolated_cpus_lock); |
| 438 | return; | 440 | return; |
| 439 | } | 441 | } |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index df85b53a674f..7dad9160f209 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
| @@ -854,6 +854,7 @@ struct acpi_bit_register_info { | |||
| 854 | ACPI_BITMASK_POWER_BUTTON_STATUS | \ | 854 | ACPI_BITMASK_POWER_BUTTON_STATUS | \ |
| 855 | ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ | 855 | ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ |
| 856 | ACPI_BITMASK_RT_CLOCK_STATUS | \ | 856 | ACPI_BITMASK_RT_CLOCK_STATUS | \ |
| 857 | ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \ | ||
| 857 | ACPI_BITMASK_WAKE_STATUS) | 858 | ACPI_BITMASK_WAKE_STATUS) |
| 858 | 859 | ||
| 859 | #define ACPI_BITMASK_TIMER_ENABLE 0x0001 | 860 | #define ACPI_BITMASK_TIMER_ENABLE 0x0001 |
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 74c24d517f81..4093522eed45 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c | |||
| @@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void) | |||
| 109 | * | 109 | * |
| 110 | * DESCRIPTION: Reacquire the interpreter execution region from within the | 110 | * DESCRIPTION: Reacquire the interpreter execution region from within the |
| 111 | * interpreter code. Failure to enter the interpreter region is a | 111 | * interpreter code. Failure to enter the interpreter region is a |
| 112 | * fatal system error. Used in conjuction with | 112 | * fatal system error. Used in conjunction with |
| 113 | * relinquish_interpreter | 113 | * relinquish_interpreter |
| 114 | * | 114 | * |
| 115 | ******************************************************************************/ | 115 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 22cfcfbd9fff..491191e6cf69 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c | |||
| @@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) | |||
| 149 | 149 | ||
| 150 | /* | 150 | /* |
| 151 | * 16-, 32-, and 64-bit cases must use the move macros that perform | 151 | * 16-, 32-, and 64-bit cases must use the move macros that perform |
| 152 | * endian conversion and/or accomodate hardware that cannot perform | 152 | * endian conversion and/or accommodate hardware that cannot perform |
| 153 | * misaligned memory transfers | 153 | * misaligned memory transfers |
| 154 | */ | 154 | */ |
| 155 | case ACPI_RSC_MOVE16: | 155 | case ACPI_RSC_MOVE16: |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 907e350f1c7d..fca34ccfd294 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
| @@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG | |||
| 34 | depends on ACPI_APEI | 34 | depends on ACPI_APEI |
| 35 | help | 35 | help |
| 36 | ERST is a way provided by APEI to save and retrieve hardware | 36 | ERST is a way provided by APEI to save and retrieve hardware |
| 37 | error infomation to and from a persistent store. Enable this | 37 | error information to and from a persistent store. Enable this |
| 38 | if you want to debugging and testing the ERST kernel support | 38 | if you want to debugging and testing the ERST kernel support |
| 39 | and firmware implementation. | 39 | and firmware implementation. |
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 73fd0c7487c1..4a904a4bf05f 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c | |||
| @@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub); | |||
| 445 | int apei_resources_request(struct apei_resources *resources, | 445 | int apei_resources_request(struct apei_resources *resources, |
| 446 | const char *desc) | 446 | const char *desc) |
| 447 | { | 447 | { |
| 448 | struct apei_res *res, *res_bak; | 448 | struct apei_res *res, *res_bak = NULL; |
| 449 | struct resource *r; | 449 | struct resource *r; |
| 450 | int rc; | ||
| 450 | 451 | ||
| 451 | apei_resources_sub(resources, &apei_resources_all); | 452 | rc = apei_resources_sub(resources, &apei_resources_all); |
| 453 | if (rc) | ||
| 454 | return rc; | ||
| 452 | 455 | ||
| 456 | rc = -EINVAL; | ||
| 453 | list_for_each_entry(res, &resources->iomem, list) { | 457 | list_for_each_entry(res, &resources->iomem, list) { |
| 454 | r = request_mem_region(res->start, res->end - res->start, | 458 | r = request_mem_region(res->start, res->end - res->start, |
| 455 | desc); | 459 | desc); |
| @@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources, | |||
| 475 | } | 479 | } |
| 476 | } | 480 | } |
| 477 | 481 | ||
| 478 | apei_resources_merge(&apei_resources_all, resources); | 482 | rc = apei_resources_merge(&apei_resources_all, resources); |
| 483 | if (rc) { | ||
| 484 | pr_err(APEI_PFX "Fail to merge resources!\n"); | ||
| 485 | goto err_unmap_ioport; | ||
| 486 | } | ||
| 479 | 487 | ||
| 480 | return 0; | 488 | return 0; |
| 481 | err_unmap_ioport: | 489 | err_unmap_ioport: |
| @@ -491,12 +499,13 @@ err_unmap_iomem: | |||
| 491 | break; | 499 | break; |
| 492 | release_mem_region(res->start, res->end - res->start); | 500 | release_mem_region(res->start, res->end - res->start); |
| 493 | } | 501 | } |
| 494 | return -EINVAL; | 502 | return rc; |
| 495 | } | 503 | } |
| 496 | EXPORT_SYMBOL_GPL(apei_resources_request); | 504 | EXPORT_SYMBOL_GPL(apei_resources_request); |
| 497 | 505 | ||
| 498 | void apei_resources_release(struct apei_resources *resources) | 506 | void apei_resources_release(struct apei_resources *resources) |
| 499 | { | 507 | { |
| 508 | int rc; | ||
| 500 | struct apei_res *res; | 509 | struct apei_res *res; |
| 501 | 510 | ||
| 502 | list_for_each_entry(res, &resources->iomem, list) | 511 | list_for_each_entry(res, &resources->iomem, list) |
| @@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources) | |||
| 504 | list_for_each_entry(res, &resources->ioport, list) | 513 | list_for_each_entry(res, &resources->ioport, list) |
| 505 | release_region(res->start, res->end - res->start); | 514 | release_region(res->start, res->end - res->start); |
| 506 | 515 | ||
| 507 | apei_resources_sub(&apei_resources_all, resources); | 516 | rc = apei_resources_sub(&apei_resources_all, resources); |
| 517 | if (rc) | ||
| 518 | pr_err(APEI_PFX "Fail to sub resources!\n"); | ||
| 508 | } | 519 | } |
| 509 | EXPORT_SYMBOL_GPL(apei_resources_release); | 520 | EXPORT_SYMBOL_GPL(apei_resources_release); |
| 510 | 521 | ||
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 465c885938ee..cf29df69380b 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c | |||
| @@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, | |||
| 426 | 426 | ||
| 427 | static int einj_check_table(struct acpi_table_einj *einj_tab) | 427 | static int einj_check_table(struct acpi_table_einj *einj_tab) |
| 428 | { | 428 | { |
| 429 | if (einj_tab->header_length != sizeof(struct acpi_table_einj)) | 429 | if ((einj_tab->header_length != |
| 430 | (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) | ||
| 431 | && (einj_tab->header_length != sizeof(struct acpi_table_einj))) | ||
| 430 | return -EINVAL; | 432 | return -EINVAL; |
| 431 | if (einj_tab->header.length < sizeof(struct acpi_table_einj)) | 433 | if (einj_tab->header.length < sizeof(struct acpi_table_einj)) |
| 432 | return -EINVAL; | 434 | return -EINVAL; |
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 5281ddda2777..da1228a9a544 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * APEI Error Record Serialization Table debug support | 2 | * APEI Error Record Serialization Table debug support |
| 3 | * | 3 | * |
| 4 | * ERST is a way provided by APEI to save and retrieve hardware error | 4 | * ERST is a way provided by APEI to save and retrieve hardware error |
| 5 | * infomation to and from a persistent store. This file provide the | 5 | * information to and from a persistent store. This file provide the |
| 6 | * debugging/testing support for ERST kernel support and firmware | 6 | * debugging/testing support for ERST kernel support and firmware |
| 7 | * implementation. | 7 | * implementation. |
| 8 | * | 8 | * |
| @@ -111,11 +111,13 @@ retry: | |||
| 111 | goto out; | 111 | goto out; |
| 112 | } | 112 | } |
| 113 | if (len > erst_dbg_buf_len) { | 113 | if (len > erst_dbg_buf_len) { |
| 114 | kfree(erst_dbg_buf); | 114 | void *p; |
| 115 | rc = -ENOMEM; | 115 | rc = -ENOMEM; |
| 116 | erst_dbg_buf = kmalloc(len, GFP_KERNEL); | 116 | p = kmalloc(len, GFP_KERNEL); |
| 117 | if (!erst_dbg_buf) | 117 | if (!p) |
| 118 | goto out; | 118 | goto out; |
| 119 | kfree(erst_dbg_buf); | ||
| 120 | erst_dbg_buf = p; | ||
| 119 | erst_dbg_buf_len = len; | 121 | erst_dbg_buf_len = len; |
| 120 | goto retry; | 122 | goto retry; |
| 121 | } | 123 | } |
| @@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf, | |||
| 150 | if (mutex_lock_interruptible(&erst_dbg_mutex)) | 152 | if (mutex_lock_interruptible(&erst_dbg_mutex)) |
| 151 | return -EINTR; | 153 | return -EINTR; |
| 152 | if (usize > erst_dbg_buf_len) { | 154 | if (usize > erst_dbg_buf_len) { |
| 153 | kfree(erst_dbg_buf); | 155 | void *p; |
| 154 | rc = -ENOMEM; | 156 | rc = -ENOMEM; |
| 155 | erst_dbg_buf = kmalloc(usize, GFP_KERNEL); | 157 | p = kmalloc(usize, GFP_KERNEL); |
| 156 | if (!erst_dbg_buf) | 158 | if (!p) |
| 157 | goto out; | 159 | goto out; |
| 160 | kfree(erst_dbg_buf); | ||
| 161 | erst_dbg_buf = p; | ||
| 158 | erst_dbg_buf_len = usize; | 162 | erst_dbg_buf_len = usize; |
| 159 | } | 163 | } |
| 160 | rc = copy_from_user(erst_dbg_buf, ubuf, usize); | 164 | rc = copy_from_user(erst_dbg_buf, ubuf, usize); |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 18645f4e83cd..1211c03149e8 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * APEI Error Record Serialization Table support | 2 | * APEI Error Record Serialization Table support |
| 3 | * | 3 | * |
| 4 | * ERST is a way provided by APEI to save and retrieve hardware error | 4 | * ERST is a way provided by APEI to save and retrieve hardware error |
| 5 | * infomation to and from a persistent store. | 5 | * information to and from a persistent store. |
| 6 | * | 6 | * |
| 7 | * For more information about ERST, please refer to ACPI Specification | 7 | * For more information about ERST, please refer to ACPI Specification |
| 8 | * version 4.0, section 17.4. | 8 | * version 4.0, section 17.4. |
| @@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx, | |||
| 266 | { | 266 | { |
| 267 | int rc; | 267 | int rc; |
| 268 | u64 offset; | 268 | u64 offset; |
| 269 | void *src, *dst; | ||
| 270 | |||
| 271 | /* ioremap does not work in interrupt context */ | ||
| 272 | if (in_interrupt()) { | ||
| 273 | pr_warning(ERST_PFX | ||
| 274 | "MOVE_DATA can not be used in interrupt context"); | ||
| 275 | return -EBUSY; | ||
| 276 | } | ||
| 269 | 277 | ||
| 270 | rc = __apei_exec_read_register(entry, &offset); | 278 | rc = __apei_exec_read_register(entry, &offset); |
| 271 | if (rc) | 279 | if (rc) |
| 272 | return rc; | 280 | return rc; |
| 273 | memmove((void *)ctx->dst_base + offset, | 281 | |
| 274 | (void *)ctx->src_base + offset, | 282 | src = ioremap(ctx->src_base + offset, ctx->var2); |
| 275 | ctx->var2); | 283 | if (!src) |
| 284 | return -ENOMEM; | ||
| 285 | dst = ioremap(ctx->dst_base + offset, ctx->var2); | ||
| 286 | if (!dst) | ||
| 287 | return -ENOMEM; | ||
| 288 | |||
| 289 | memmove(dst, src, ctx->var2); | ||
| 290 | |||
| 291 | iounmap(src); | ||
| 292 | iounmap(dst); | ||
| 276 | 293 | ||
| 277 | return 0; | 294 | return 0; |
| 278 | } | 295 | } |
| @@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable); | |||
| 750 | 767 | ||
| 751 | static int erst_check_table(struct acpi_table_erst *erst_tab) | 768 | static int erst_check_table(struct acpi_table_erst *erst_tab) |
| 752 | { | 769 | { |
| 753 | if (erst_tab->header_length != sizeof(struct acpi_table_erst)) | 770 | if ((erst_tab->header_length != |
| 771 | (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) | ||
| 772 | && (erst_tab->header_length != sizeof(struct acpi_table_einj))) | ||
| 754 | return -EINVAL; | 773 | return -EINVAL; |
| 755 | if (erst_tab->header.length < sizeof(struct acpi_table_erst)) | 774 | if (erst_tab->header.length < sizeof(struct acpi_table_erst)) |
| 756 | return -EINVAL; | 775 | return -EINVAL; |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 385a6059714a..0d505e59214d 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
| @@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) | |||
| 302 | struct ghes *ghes = NULL; | 302 | struct ghes *ghes = NULL; |
| 303 | int rc = -EINVAL; | 303 | int rc = -EINVAL; |
| 304 | 304 | ||
| 305 | generic = ghes_dev->dev.platform_data; | 305 | generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; |
| 306 | if (!generic->enabled) | 306 | if (!generic->enabled) |
| 307 | return -ENODEV; | 307 | return -ENODEV; |
| 308 | 308 | ||
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 343168d18266..1a3508a7fe03 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c | |||
| @@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) | |||
| 137 | 137 | ||
| 138 | static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) | 138 | static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) |
| 139 | { | 139 | { |
| 140 | struct acpi_hest_generic *generic; | ||
| 141 | struct platform_device *ghes_dev; | 140 | struct platform_device *ghes_dev; |
| 142 | struct ghes_arr *ghes_arr = data; | 141 | struct ghes_arr *ghes_arr = data; |
| 143 | int rc; | 142 | int rc; |
| 144 | 143 | ||
| 145 | if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) | 144 | if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) |
| 146 | return 0; | 145 | return 0; |
| 147 | generic = (struct acpi_hest_generic *)hest_hdr; | 146 | |
| 148 | if (!generic->enabled) | 147 | if (!((struct acpi_hest_generic *)hest_hdr)->enabled) |
| 149 | return 0; | 148 | return 0; |
| 150 | ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); | 149 | ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); |
| 151 | if (!ghes_dev) | 150 | if (!ghes_dev) |
| 152 | return -ENOMEM; | 151 | return -ENOMEM; |
| 153 | ghes_dev->dev.platform_data = generic; | 152 | |
| 153 | rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *)); | ||
| 154 | if (rc) | ||
| 155 | goto err; | ||
| 156 | |||
| 154 | rc = platform_device_add(ghes_dev); | 157 | rc = platform_device_add(ghes_dev); |
| 155 | if (rc) | 158 | if (rc) |
| 156 | goto err; | 159 | goto err; |
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 8f8bd736d4ff..542e53903891 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c | |||
| @@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr, | |||
| 142 | list_add_tail_rcu(&map->list, &acpi_iomaps); | 142 | list_add_tail_rcu(&map->list, &acpi_iomaps); |
| 143 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); | 143 | spin_unlock_irqrestore(&acpi_iomaps_lock, flags); |
| 144 | 144 | ||
| 145 | return vaddr + (paddr - pg_off); | 145 | return map->vaddr + (paddr - map->paddr); |
| 146 | err_unmap: | 146 | err_unmap: |
| 147 | iounmap(vaddr); | 147 | iounmap(vaddr); |
| 148 | return NULL; | 148 | return NULL; |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index dc58402b0a17..98417201e9ce 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = { | |||
| 273 | POWER_SUPPLY_PROP_CYCLE_COUNT, | 273 | POWER_SUPPLY_PROP_CYCLE_COUNT, |
| 274 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, | 274 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, |
| 275 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | 275 | POWER_SUPPLY_PROP_VOLTAGE_NOW, |
| 276 | POWER_SUPPLY_PROP_CURRENT_NOW, | ||
| 277 | POWER_SUPPLY_PROP_POWER_NOW, | 276 | POWER_SUPPLY_PROP_POWER_NOW, |
| 278 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, | 277 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, |
| 279 | POWER_SUPPLY_PROP_ENERGY_FULL, | 278 | POWER_SUPPLY_PROP_ENERGY_FULL, |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 2bb28b9d91c4..af308d03f492 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
| @@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d) | |||
| 183 | { | 183 | { |
| 184 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); | 184 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); |
| 185 | acpi_osi_setup("!Windows 2006"); | 185 | acpi_osi_setup("!Windows 2006"); |
| 186 | acpi_osi_setup("!Windows 2006 SP1"); | ||
| 187 | acpi_osi_setup("!Windows 2006 SP2"); | ||
| 186 | return 0; | 188 | return 0; |
| 187 | } | 189 | } |
| 188 | static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) | 190 | static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) |
| @@ -202,6 +204,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
| 202 | }, | 204 | }, |
| 203 | }, | 205 | }, |
| 204 | { | 206 | { |
| 207 | /* | ||
| 208 | * There have a NVIF method in MSI GX723 DSDT need call by Nvidia | ||
| 209 | * driver (e.g. nouveau) when user press brightness hotkey. | ||
| 210 | * Currently, nouveau driver didn't do the job and it causes there | ||
| 211 | * have a infinite while loop in DSDT when user press hotkey. | ||
| 212 | * We add MSI GX723's dmi information to this table for workaround | ||
| 213 | * this issue. | ||
| 214 | * Will remove MSI GX723 from the table after nouveau grows support. | ||
| 215 | */ | ||
| 216 | .callback = dmi_disable_osi_vista, | ||
| 217 | .ident = "MSI GX723", | ||
| 218 | .matches = { | ||
| 219 | DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"), | ||
| 220 | DMI_MATCH(DMI_PRODUCT_NAME, "GX723"), | ||
| 221 | }, | ||
| 222 | }, | ||
| 223 | { | ||
| 205 | .callback = dmi_disable_osi_vista, | 224 | .callback = dmi_disable_osi_vista, |
| 206 | .ident = "Sony VGN-NS10J_S", | 225 | .ident = "Sony VGN-NS10J_S", |
| 207 | .matches = { | 226 | .matches = { |
| @@ -226,6 +245,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
| 226 | }, | 245 | }, |
| 227 | }, | 246 | }, |
| 228 | { | 247 | { |
| 248 | .callback = dmi_disable_osi_vista, | ||
| 249 | .ident = "Toshiba Satellite L355", | ||
| 250 | .matches = { | ||
| 251 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
| 252 | DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"), | ||
| 253 | }, | ||
| 254 | }, | ||
| 255 | { | ||
| 229 | .callback = dmi_disable_osi_win7, | 256 | .callback = dmi_disable_osi_win7, |
| 230 | .ident = "ASUS K50IJ", | 257 | .ident = "ASUS K50IJ", |
| 231 | .matches = { | 258 | .matches = { |
| @@ -233,6 +260,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
| 233 | DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), | 260 | DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), |
| 234 | }, | 261 | }, |
| 235 | }, | 262 | }, |
| 263 | { | ||
| 264 | .callback = dmi_disable_osi_vista, | ||
| 265 | .ident = "Toshiba P305D", | ||
| 266 | .matches = { | ||
| 267 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
| 268 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"), | ||
| 269 | }, | ||
| 270 | }, | ||
| 236 | 271 | ||
| 237 | /* | 272 | /* |
| 238 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 273 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c221ab535d5..310e3b9749cb 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir); | |||
| 55 | static int set_power_nocheck(const struct dmi_system_id *id) | 55 | static int set_power_nocheck(const struct dmi_system_id *id) |
| 56 | { | 56 | { |
| 57 | printk(KERN_NOTICE PREFIX "%s detected - " | 57 | printk(KERN_NOTICE PREFIX "%s detected - " |
| 58 | "disable power check in power transistion\n", id->ident); | 58 | "disable power check in power transition\n", id->ident); |
| 59 | acpi_power_nocheck = 1; | 59 | acpi_power_nocheck = 1; |
| 60 | return 0; | 60 | return 0; |
| 61 | } | 61 | } |
| @@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id) | |||
| 80 | 80 | ||
| 81 | static struct dmi_system_id dsdt_dmi_table[] __initdata = { | 81 | static struct dmi_system_id dsdt_dmi_table[] __initdata = { |
| 82 | /* | 82 | /* |
| 83 | * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. | 83 | * Invoke DSDT corruption work-around on all Toshiba Satellite. |
| 84 | * https://bugzilla.kernel.org/show_bug.cgi?id=14679 | 84 | * https://bugzilla.kernel.org/show_bug.cgi?id=14679 |
| 85 | */ | 85 | */ |
| 86 | { | 86 | { |
| 87 | .callback = set_copy_dsdt, | 87 | .callback = set_copy_dsdt, |
| 88 | .ident = "TOSHIBA Satellite A505", | 88 | .ident = "TOSHIBA Satellite", |
| 89 | .matches = { | 89 | .matches = { |
| 90 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 90 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
| 91 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), | 91 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), |
| 92 | }, | ||
| 93 | }, | ||
| 94 | { | ||
| 95 | .callback = set_copy_dsdt, | ||
| 96 | .ident = "TOSHIBA Satellite L505D", | ||
| 97 | .matches = { | ||
| 98 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
| 99 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"), | ||
| 100 | }, | 92 | }, |
| 101 | }, | 93 | }, |
| 102 | {} | 94 | {} |
| @@ -1027,7 +1019,7 @@ static int __init acpi_init(void) | |||
| 1027 | 1019 | ||
| 1028 | /* | 1020 | /* |
| 1029 | * If the laptop falls into the DMI check table, the power state check | 1021 | * If the laptop falls into the DMI check table, the power state check |
| 1030 | * will be disabled in the course of device power transistion. | 1022 | * will be disabled in the course of device power transition. |
| 1031 | */ | 1023 | */ |
| 1032 | dmi_check_system(power_nocheck_dmi_table); | 1024 | dmi_check_system(power_nocheck_dmi_table); |
| 1033 | 1025 | ||
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 8a3b840c0bb2..d94d2953c974 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
| @@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void) | |||
| 369 | 369 | ||
| 370 | acpi_bus_unregister_driver(&acpi_fan_driver); | 370 | acpi_bus_unregister_driver(&acpi_fan_driver); |
| 371 | 371 | ||
| 372 | #ifdef CONFIG_ACPI_PROCFS | ||
| 372 | remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); | 373 | remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); |
| 374 | #endif | ||
| 373 | 375 | ||
| 374 | return; | 376 | return; |
| 375 | } | 377 | } |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e9699aaed109..bec561c14beb 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
| @@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id) | |||
| 29 | 29 | ||
| 30 | static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { | 30 | static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { |
| 31 | { | 31 | { |
| 32 | set_no_mwait, "IFL91 board", { | ||
| 33 | DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), | ||
| 34 | DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), | ||
| 35 | DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), | ||
| 36 | DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, | ||
| 37 | { | ||
| 38 | set_no_mwait, "Extensa 5220", { | 32 | set_no_mwait, "Extensa 5220", { |
| 39 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), | 33 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), |
| 40 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 34 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
| @@ -352,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void) | |||
| 352 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 346 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
| 353 | ACPI_UINT32_MAX, | 347 | ACPI_UINT32_MAX, |
| 354 | early_init_pdc, NULL, NULL, NULL); | 348 | early_init_pdc, NULL, NULL, NULL); |
| 349 | acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL); | ||
| 355 | } | 350 | } |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 156021892389..347eb21b2353 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
| @@ -850,7 +850,7 @@ static int __init acpi_processor_init(void) | |||
| 850 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | 850 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", |
| 851 | acpi_idle_driver.name); | 851 | acpi_idle_driver.name); |
| 852 | } else { | 852 | } else { |
| 853 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", | 853 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", |
| 854 | cpuidle_get_driver()->name); | 854 | cpuidle_get_driver()->name); |
| 855 | } | 855 | } |
| 856 | 856 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index ba1bd263d903..3a73a93596e8 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
| @@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module) | |||
| 447 | if (!try_module_get(calling_module)) | 447 | if (!try_module_get(calling_module)) |
| 448 | return -EINVAL; | 448 | return -EINVAL; |
| 449 | 449 | ||
| 450 | /* is_done is set to negative if an error occured, | 450 | /* is_done is set to negative if an error occurred, |
| 451 | * and to postitive if _no_ error occured, but SMM | 451 | * and to postitive if _no_ error occurred, but SMM |
| 452 | * was already notified. This avoids double notification | 452 | * was already notified. This avoids double notification |
| 453 | * which might lead to unexpected results... | 453 | * which might lead to unexpected results... |
| 454 | */ | 454 | */ |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index cf82989ae756..4754ff6e70e6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) | |||
| 363 | return 0; | 363 | return 0; |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | static int __init init_nvs_nosave(const struct dmi_system_id *d) | ||
| 367 | { | ||
| 368 | acpi_nvs_nosave(); | ||
| 369 | return 0; | ||
| 370 | } | ||
| 371 | |||
| 366 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | 372 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { |
| 367 | { | 373 | { |
| 368 | .callback = init_old_suspend_ordering, | 374 | .callback = init_old_suspend_ordering, |
| @@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
| 397 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), | 403 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), |
| 398 | }, | 404 | }, |
| 399 | }, | 405 | }, |
| 406 | { | ||
| 407 | .callback = init_nvs_nosave, | ||
| 408 | .ident = "Sony Vaio VGN-SR11M", | ||
| 409 | .matches = { | ||
| 410 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
| 411 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), | ||
| 412 | }, | ||
| 413 | }, | ||
| 414 | { | ||
| 415 | .callback = init_nvs_nosave, | ||
| 416 | .ident = "Everex StepNote Series", | ||
| 417 | .matches = { | ||
| 418 | DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), | ||
| 419 | DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), | ||
| 420 | }, | ||
| 421 | }, | ||
| 400 | {}, | 422 | {}, |
| 401 | }; | 423 | }; |
| 402 | #endif /* CONFIG_SUSPEND */ | 424 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 68e2e4582fa2..f8588f81048a 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
| @@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = { | |||
| 100 | ACPI_DEBUG_INIT(ACPI_LV_EVENTS), | 100 | ACPI_DEBUG_INIT(ACPI_LV_EVENTS), |
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | static int param_get_debug_layer(char *buffer, struct kernel_param *kp) | 103 | static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) |
| 104 | { | 104 | { |
| 105 | int result = 0; | 105 | int result = 0; |
| 106 | int i; | 106 | int i; |
| @@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp) | |||
| 128 | return result; | 128 | return result; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static int param_get_debug_level(char *buffer, struct kernel_param *kp) | 131 | static int param_get_debug_level(char *buffer, const struct kernel_param *kp) |
| 132 | { | 132 | { |
| 133 | int result = 0; | 133 | int result = 0; |
| 134 | int i; | 134 | int i; |
| @@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp) | |||
| 149 | return result; | 149 | return result; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | module_param_call(debug_layer, param_set_uint, param_get_debug_layer, | 152 | static struct kernel_param_ops param_ops_debug_layer = { |
| 153 | &acpi_dbg_layer, 0644); | 153 | .set = param_set_uint, |
| 154 | module_param_call(debug_level, param_set_uint, param_get_debug_level, | 154 | .get = param_get_debug_layer, |
| 155 | &acpi_dbg_level, 0644); | 155 | }; |
| 156 | |||
| 157 | static struct kernel_param_ops param_ops_debug_level = { | ||
| 158 | .set = param_set_uint, | ||
| 159 | .get = param_get_debug_level, | ||
| 160 | }; | ||
| 161 | |||
| 162 | module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); | ||
| 163 | module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); | ||
| 156 | 164 | ||
| 157 | static char trace_method_name[6]; | 165 | static char trace_method_name[6]; |
| 158 | module_param_string(trace_method_name, trace_method_name, 6, 0644); | 166 | module_param_string(trace_method_name, trace_method_name, 6, 0644); |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c5fef01b3c95..b83676126598 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
| @@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, | |||
| 59 | "support\n")); | 59 | "support\n")); |
| 60 | *cap |= ACPI_VIDEO_BACKLIGHT; | 60 | *cap |= ACPI_VIDEO_BACKLIGHT; |
| 61 | if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) | 61 | if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) |
| 62 | printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " | 62 | printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " |
| 63 | "control misses _BQC function\n"); | 63 | "cannot determine initial brightness\n"); |
| 64 | /* We have backlight support, no need to scan further */ | 64 | /* We have backlight support, no need to scan further */ |
| 65 | return AE_CTRL_TERMINATE; | 65 | return AE_CTRL_TERMINATE; |
| 66 | } | 66 | } |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ee9ddeb53417..8cb0347dec28 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
| @@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev, | |||
| 3156 | { | 3156 | { |
| 3157 | struct atm_dev *dev; | 3157 | struct atm_dev *dev; |
| 3158 | IADEV *iadev; | 3158 | IADEV *iadev; |
| 3159 | unsigned long flags; | ||
| 3160 | int ret; | 3159 | int ret; |
| 3161 | 3160 | ||
| 3162 | iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); | 3161 | iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); |
| @@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev, | |||
| 3188 | ia_dev[iadev_count] = iadev; | 3187 | ia_dev[iadev_count] = iadev; |
| 3189 | _ia_dev[iadev_count] = dev; | 3188 | _ia_dev[iadev_count] = dev; |
| 3190 | iadev_count++; | 3189 | iadev_count++; |
| 3191 | spin_lock_init(&iadev->misc_lock); | ||
| 3192 | /* First fixes first. I don't want to think about this now. */ | ||
| 3193 | spin_lock_irqsave(&iadev->misc_lock, flags); | ||
| 3194 | if (ia_init(dev) || ia_start(dev)) { | 3190 | if (ia_init(dev) || ia_start(dev)) { |
| 3195 | IF_INIT(printk("IA register failed!\n");) | 3191 | IF_INIT(printk("IA register failed!\n");) |
| 3196 | iadev_count--; | 3192 | iadev_count--; |
| 3197 | ia_dev[iadev_count] = NULL; | 3193 | ia_dev[iadev_count] = NULL; |
| 3198 | _ia_dev[iadev_count] = NULL; | 3194 | _ia_dev[iadev_count] = NULL; |
| 3199 | spin_unlock_irqrestore(&iadev->misc_lock, flags); | ||
| 3200 | ret = -EINVAL; | 3195 | ret = -EINVAL; |
| 3201 | goto err_out_deregister_dev; | 3196 | goto err_out_deregister_dev; |
| 3202 | } | 3197 | } |
| 3203 | spin_unlock_irqrestore(&iadev->misc_lock, flags); | ||
| 3204 | IF_EVENT(printk("iadev_count = %d\n", iadev_count);) | 3198 | IF_EVENT(printk("iadev_count = %d\n", iadev_count);) |
| 3205 | 3199 | ||
| 3206 | iadev->next_board = ia_boards; | 3200 | iadev->next_board = ia_boards; |
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index b2cd20f549cb..077735e0e04b 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h | |||
| @@ -1022,7 +1022,7 @@ typedef struct iadev_t { | |||
| 1022 | struct dle_q rx_dle_q; | 1022 | struct dle_q rx_dle_q; |
| 1023 | struct free_desc_q *rx_free_desc_qhead; | 1023 | struct free_desc_q *rx_free_desc_qhead; |
| 1024 | struct sk_buff_head rx_dma_q; | 1024 | struct sk_buff_head rx_dma_q; |
| 1025 | spinlock_t rx_lock, misc_lock; | 1025 | spinlock_t rx_lock; |
| 1026 | struct atm_vcc **rx_open; /* list of all open VCs */ | 1026 | struct atm_vcc **rx_open; /* list of all open VCs */ |
| 1027 | u16 num_rx_desc, rx_buf_sz, rxing; | 1027 | u16 num_rx_desc, rx_buf_sz, rxing; |
| 1028 | u32 rx_pkt_ram, rx_tmp_cnt; | 1028 | u32 rx_pkt_ram, rx_tmp_cnt; |
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index f916ddf63938..f46138ab38b6 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
| @@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, | |||
| 444 | struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); | 444 | struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); |
| 445 | struct solos_card *card = atmdev->dev_data; | 445 | struct solos_card *card = atmdev->dev_data; |
| 446 | struct sk_buff *skb; | 446 | struct sk_buff *skb; |
| 447 | unsigned int len; | ||
| 447 | 448 | ||
| 448 | spin_lock(&card->cli_queue_lock); | 449 | spin_lock(&card->cli_queue_lock); |
| 449 | skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); | 450 | skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); |
| @@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, | |||
| 451 | if(skb == NULL) | 452 | if(skb == NULL) |
| 452 | return sprintf(buf, "No data.\n"); | 453 | return sprintf(buf, "No data.\n"); |
| 453 | 454 | ||
| 454 | memcpy(buf, skb->data, skb->len); | 455 | len = skb->len; |
| 455 | dev_dbg(&card->dev->dev, "len: %d\n", skb->len); | 456 | memcpy(buf, skb->data, len); |
| 457 | dev_dbg(&card->dev->dev, "len: %d\n", len); | ||
| 456 | 458 | ||
| 457 | kfree_skb(skb); | 459 | kfree_skb(skb); |
| 458 | return skb->len; | 460 | return len; |
| 459 | } | 461 | } |
| 460 | 462 | ||
| 461 | static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) | 463 | static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index de277689da61..4b9359a6f6ca 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
| @@ -488,4 +488,21 @@ config BLK_DEV_HD | |||
| 488 | 488 | ||
| 489 | If unsure, say N. | 489 | If unsure, say N. |
| 490 | 490 | ||
| 491 | config BLK_DEV_RBD | ||
| 492 | tristate "Rados block device (RBD)" | ||
| 493 | depends on INET && EXPERIMENTAL && BLOCK | ||
| 494 | select CEPH_LIB | ||
| 495 | select LIBCRC32C | ||
| 496 | select CRYPTO_AES | ||
| 497 | select CRYPTO | ||
| 498 | default n | ||
| 499 | help | ||
| 500 | Say Y here if you want include the Rados block device, which stripes | ||
| 501 | a block device over objects stored in the Ceph distributed object | ||
| 502 | store. | ||
| 503 | |||
| 504 | More information at http://ceph.newdream.net/. | ||
| 505 | |||
| 506 | If unsure, say N. | ||
| 507 | |||
| 491 | endif # BLK_DEV | 508 | endif # BLK_DEV |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index aff5ac925c34..d7f463d6312d 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
| @@ -37,5 +37,6 @@ obj-$(CONFIG_BLK_DEV_HD) += hd.o | |||
| 37 | 37 | ||
| 38 | obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o | 38 | obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o |
| 39 | obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ | 39 | obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ |
| 40 | obj-$(CONFIG_BLK_DEV_RBD) += rbd.o | ||
| 40 | 41 | ||
| 41 | swim_mod-objs := swim.o swim_asm.o | 42 | swim_mod-objs := swim.o swim_asm.o |
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index e9da874d0419..03688c2da319 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
| @@ -113,7 +113,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, | |||
| 113 | memcpy(buf, dev->bounce_buf+offset, size); | 113 | memcpy(buf, dev->bounce_buf+offset, size); |
| 114 | offset += size; | 114 | offset += size; |
| 115 | flush_kernel_dcache_page(bvec->bv_page); | 115 | flush_kernel_dcache_page(bvec->bv_page); |
| 116 | bvec_kunmap_irq(bvec, &flags); | 116 | bvec_kunmap_irq(buf, &flags); |
| 117 | i++; | 117 | i++; |
| 118 | } | 118 | } |
| 119 | } | 119 | } |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c new file mode 100644 index 000000000000..6ec9d53806c5 --- /dev/null +++ b/drivers/block/rbd.c | |||
| @@ -0,0 +1,1841 @@ | |||
| 1 | /* | ||
| 2 | rbd.c -- Export ceph rados objects as a Linux block device | ||
| 3 | |||
| 4 | |||
| 5 | based on drivers/block/osdblk.c: | ||
| 6 | |||
| 7 | Copyright 2009 Red Hat, Inc. | ||
| 8 | |||
| 9 | This program is free software; you can redistribute it and/or modify | ||
| 10 | it under the terms of the GNU General Public License as published by | ||
| 11 | the Free Software Foundation. | ||
| 12 | |||
| 13 | This program is distributed in the hope that it will be useful, | ||
| 14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | GNU General Public License for more details. | ||
| 17 | |||
| 18 | You should have received a copy of the GNU General Public License | ||
| 19 | along with this program; see the file COPYING. If not, write to | ||
| 20 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 21 | |||
| 22 | |||
| 23 | |||
| 24 | Instructions for use | ||
| 25 | -------------------- | ||
| 26 | |||
| 27 | 1) Map a Linux block device to an existing rbd image. | ||
| 28 | |||
| 29 | Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name] | ||
| 30 | |||
| 31 | $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add | ||
| 32 | |||
| 33 | The snapshot name can be "-" or omitted to map the image read/write. | ||
| 34 | |||
| 35 | 2) List all active blkdev<->object mappings. | ||
| 36 | |||
| 37 | In this example, we have performed step #1 twice, creating two blkdevs, | ||
| 38 | mapped to two separate rados objects in the rados rbd pool | ||
| 39 | |||
| 40 | $ cat /sys/class/rbd/list | ||
| 41 | #id major client_name pool name snap KB | ||
| 42 | 0 254 client4143 rbd foo - 1024000 | ||
| 43 | |||
| 44 | The columns, in order, are: | ||
| 45 | - blkdev unique id | ||
| 46 | - blkdev assigned major | ||
| 47 | - rados client id | ||
| 48 | - rados pool name | ||
| 49 | - rados block device name | ||
| 50 | - mapped snapshot ("-" if none) | ||
| 51 | - device size in KB | ||
| 52 | |||
| 53 | |||
| 54 | 3) Create a snapshot. | ||
| 55 | |||
| 56 | Usage: <blkdev id> <snapname> | ||
| 57 | |||
| 58 | $ echo "0 mysnap" > /sys/class/rbd/snap_create | ||
| 59 | |||
| 60 | |||
| 61 | 4) Listing a snapshot. | ||
| 62 | |||
| 63 | $ cat /sys/class/rbd/snaps_list | ||
| 64 | #id snap KB | ||
| 65 | 0 - 1024000 (*) | ||
| 66 | 0 foo 1024000 | ||
| 67 | |||
| 68 | The columns, in order, are: | ||
| 69 | - blkdev unique id | ||
| 70 | - snapshot name, '-' means none (active read/write version) | ||
| 71 | - size of device at time of snapshot | ||
| 72 | - the (*) indicates this is the active version | ||
| 73 | |||
| 74 | 5) Rollback to snapshot. | ||
| 75 | |||
| 76 | Usage: <blkdev id> <snapname> | ||
| 77 | |||
| 78 | $ echo "0 mysnap" > /sys/class/rbd/snap_rollback | ||
| 79 | |||
| 80 | |||
| 81 | 6) Mapping an image using snapshot. | ||
| 82 | |||
| 83 | A snapshot mapping is read-only. This is being done by passing | ||
| 84 | snap=<snapname> to the options when adding a device. | ||
| 85 | |||
| 86 | $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add | ||
| 87 | |||
| 88 | |||
| 89 | 7) Remove an active blkdev<->rbd image mapping. | ||
| 90 | |||
| 91 | In this example, we remove the mapping with blkdev unique id 1. | ||
| 92 | |||
| 93 | $ echo 1 > /sys/class/rbd/remove | ||
| 94 | |||
| 95 | |||
| 96 | NOTE: The actual creation and deletion of rados objects is outside the scope | ||
| 97 | of this driver. | ||
| 98 | |||
| 99 | */ | ||
| 100 | |||
| 101 | #include <linux/ceph/libceph.h> | ||
| 102 | #include <linux/ceph/osd_client.h> | ||
| 103 | #include <linux/ceph/mon_client.h> | ||
| 104 | #include <linux/ceph/decode.h> | ||
| 105 | |||
| 106 | #include <linux/kernel.h> | ||
| 107 | #include <linux/device.h> | ||
| 108 | #include <linux/module.h> | ||
| 109 | #include <linux/fs.h> | ||
| 110 | #include <linux/blkdev.h> | ||
| 111 | |||
| 112 | #include "rbd_types.h" | ||
| 113 | |||
| 114 | #define DRV_NAME "rbd" | ||
| 115 | #define DRV_NAME_LONG "rbd (rados block device)" | ||
| 116 | |||
| 117 | #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ | ||
| 118 | |||
| 119 | #define RBD_MAX_MD_NAME_LEN (96 + sizeof(RBD_SUFFIX)) | ||
| 120 | #define RBD_MAX_POOL_NAME_LEN 64 | ||
| 121 | #define RBD_MAX_SNAP_NAME_LEN 32 | ||
| 122 | #define RBD_MAX_OPT_LEN 1024 | ||
| 123 | |||
| 124 | #define RBD_SNAP_HEAD_NAME "-" | ||
| 125 | |||
| 126 | #define DEV_NAME_LEN 32 | ||
| 127 | |||
| 128 | /* | ||
| 129 | * block device image metadata (in-memory version) | ||
| 130 | */ | ||
| 131 | struct rbd_image_header { | ||
| 132 | u64 image_size; | ||
| 133 | char block_name[32]; | ||
| 134 | __u8 obj_order; | ||
| 135 | __u8 crypt_type; | ||
| 136 | __u8 comp_type; | ||
| 137 | struct rw_semaphore snap_rwsem; | ||
| 138 | struct ceph_snap_context *snapc; | ||
| 139 | size_t snap_names_len; | ||
| 140 | u64 snap_seq; | ||
| 141 | u32 total_snaps; | ||
| 142 | |||
| 143 | char *snap_names; | ||
| 144 | u64 *snap_sizes; | ||
| 145 | }; | ||
| 146 | |||
| 147 | /* | ||
| 148 | * an instance of the client. multiple devices may share a client. | ||
| 149 | */ | ||
| 150 | struct rbd_client { | ||
| 151 | struct ceph_client *client; | ||
| 152 | struct kref kref; | ||
| 153 | struct list_head node; | ||
| 154 | }; | ||
| 155 | |||
| 156 | /* | ||
| 157 | * a single io request | ||
| 158 | */ | ||
| 159 | struct rbd_request { | ||
| 160 | struct request *rq; /* blk layer request */ | ||
| 161 | struct bio *bio; /* cloned bio */ | ||
| 162 | struct page **pages; /* list of used pages */ | ||
| 163 | u64 len; | ||
| 164 | }; | ||
| 165 | |||
| 166 | /* | ||
| 167 | * a single device | ||
| 168 | */ | ||
| 169 | struct rbd_device { | ||
| 170 | int id; /* blkdev unique id */ | ||
| 171 | |||
| 172 | int major; /* blkdev assigned major */ | ||
| 173 | struct gendisk *disk; /* blkdev's gendisk and rq */ | ||
| 174 | struct request_queue *q; | ||
| 175 | |||
| 176 | struct ceph_client *client; | ||
| 177 | struct rbd_client *rbd_client; | ||
| 178 | |||
| 179 | char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ | ||
| 180 | |||
| 181 | spinlock_t lock; /* queue lock */ | ||
| 182 | |||
| 183 | struct rbd_image_header header; | ||
| 184 | char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */ | ||
| 185 | int obj_len; | ||
| 186 | char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */ | ||
| 187 | char pool_name[RBD_MAX_POOL_NAME_LEN]; | ||
| 188 | int poolid; | ||
| 189 | |||
| 190 | char snap_name[RBD_MAX_SNAP_NAME_LEN]; | ||
| 191 | u32 cur_snap; /* index+1 of current snapshot within snap context | ||
| 192 | 0 - for the head */ | ||
| 193 | int read_only; | ||
| 194 | |||
| 195 | struct list_head node; | ||
| 196 | }; | ||
| 197 | |||
| 198 | static spinlock_t node_lock; /* protects client get/put */ | ||
| 199 | |||
| 200 | static struct class *class_rbd; /* /sys/class/rbd */ | ||
| 201 | static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ | ||
| 202 | static LIST_HEAD(rbd_dev_list); /* devices */ | ||
| 203 | static LIST_HEAD(rbd_client_list); /* clients */ | ||
| 204 | |||
| 205 | |||
| 206 | static int rbd_open(struct block_device *bdev, fmode_t mode) | ||
| 207 | { | ||
| 208 | struct gendisk *disk = bdev->bd_disk; | ||
| 209 | struct rbd_device *rbd_dev = disk->private_data; | ||
| 210 | |||
| 211 | set_device_ro(bdev, rbd_dev->read_only); | ||
| 212 | |||
| 213 | if ((mode & FMODE_WRITE) && rbd_dev->read_only) | ||
| 214 | return -EROFS; | ||
| 215 | |||
| 216 | return 0; | ||
| 217 | } | ||
| 218 | |||
| 219 | static const struct block_device_operations rbd_bd_ops = { | ||
| 220 | .owner = THIS_MODULE, | ||
| 221 | .open = rbd_open, | ||
| 222 | }; | ||
| 223 | |||
| 224 | /* | ||
| 225 | * Initialize an rbd client instance. | ||
| 226 | * We own *opt. | ||
| 227 | */ | ||
| 228 | static struct rbd_client *rbd_client_create(struct ceph_options *opt) | ||
| 229 | { | ||
| 230 | struct rbd_client *rbdc; | ||
| 231 | int ret = -ENOMEM; | ||
| 232 | |||
| 233 | dout("rbd_client_create\n"); | ||
| 234 | rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); | ||
| 235 | if (!rbdc) | ||
| 236 | goto out_opt; | ||
| 237 | |||
| 238 | kref_init(&rbdc->kref); | ||
| 239 | INIT_LIST_HEAD(&rbdc->node); | ||
| 240 | |||
| 241 | rbdc->client = ceph_create_client(opt, rbdc); | ||
| 242 | if (IS_ERR(rbdc->client)) | ||
| 243 | goto out_rbdc; | ||
| 244 | opt = NULL; /* Now rbdc->client is responsible for opt */ | ||
| 245 | |||
| 246 | ret = ceph_open_session(rbdc->client); | ||
| 247 | if (ret < 0) | ||
| 248 | goto out_err; | ||
| 249 | |||
| 250 | spin_lock(&node_lock); | ||
| 251 | list_add_tail(&rbdc->node, &rbd_client_list); | ||
| 252 | spin_unlock(&node_lock); | ||
| 253 | |||
| 254 | dout("rbd_client_create created %p\n", rbdc); | ||
| 255 | return rbdc; | ||
| 256 | |||
| 257 | out_err: | ||
| 258 | ceph_destroy_client(rbdc->client); | ||
| 259 | out_rbdc: | ||
| 260 | kfree(rbdc); | ||
| 261 | out_opt: | ||
| 262 | if (opt) | ||
| 263 | ceph_destroy_options(opt); | ||
| 264 | return ERR_PTR(ret); | ||
| 265 | } | ||
| 266 | |||
| 267 | /* | ||
| 268 | * Find a ceph client with specific addr and configuration. | ||
| 269 | */ | ||
| 270 | static struct rbd_client *__rbd_client_find(struct ceph_options *opt) | ||
| 271 | { | ||
| 272 | struct rbd_client *client_node; | ||
| 273 | |||
| 274 | if (opt->flags & CEPH_OPT_NOSHARE) | ||
| 275 | return NULL; | ||
| 276 | |||
| 277 | list_for_each_entry(client_node, &rbd_client_list, node) | ||
| 278 | if (ceph_compare_options(opt, client_node->client) == 0) | ||
| 279 | return client_node; | ||
| 280 | return NULL; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* | ||
| 284 | * Get a ceph client with specific addr and configuration, if one does | ||
| 285 | * not exist create it. | ||
| 286 | */ | ||
| 287 | static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr, | ||
| 288 | char *options) | ||
| 289 | { | ||
| 290 | struct rbd_client *rbdc; | ||
| 291 | struct ceph_options *opt; | ||
| 292 | int ret; | ||
| 293 | |||
| 294 | ret = ceph_parse_options(&opt, options, mon_addr, | ||
| 295 | mon_addr + strlen(mon_addr), NULL, NULL); | ||
| 296 | if (ret < 0) | ||
| 297 | return ret; | ||
| 298 | |||
| 299 | spin_lock(&node_lock); | ||
| 300 | rbdc = __rbd_client_find(opt); | ||
| 301 | if (rbdc) { | ||
| 302 | ceph_destroy_options(opt); | ||
| 303 | |||
| 304 | /* using an existing client */ | ||
| 305 | kref_get(&rbdc->kref); | ||
| 306 | rbd_dev->rbd_client = rbdc; | ||
| 307 | rbd_dev->client = rbdc->client; | ||
| 308 | spin_unlock(&node_lock); | ||
| 309 | return 0; | ||
| 310 | } | ||
| 311 | spin_unlock(&node_lock); | ||
| 312 | |||
| 313 | rbdc = rbd_client_create(opt); | ||
| 314 | if (IS_ERR(rbdc)) | ||
| 315 | return PTR_ERR(rbdc); | ||
| 316 | |||
| 317 | rbd_dev->rbd_client = rbdc; | ||
| 318 | rbd_dev->client = rbdc->client; | ||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* | ||
| 323 | * Destroy ceph client | ||
| 324 | */ | ||
| 325 | static void rbd_client_release(struct kref *kref) | ||
| 326 | { | ||
| 327 | struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); | ||
| 328 | |||
| 329 | dout("rbd_release_client %p\n", rbdc); | ||
| 330 | spin_lock(&node_lock); | ||
| 331 | list_del(&rbdc->node); | ||
| 332 | spin_unlock(&node_lock); | ||
| 333 | |||
| 334 | ceph_destroy_client(rbdc->client); | ||
| 335 | kfree(rbdc); | ||
| 336 | } | ||
| 337 | |||
| 338 | /* | ||
| 339 | * Drop reference to ceph client node. If it's not referenced anymore, release | ||
| 340 | * it. | ||
| 341 | */ | ||
| 342 | static void rbd_put_client(struct rbd_device *rbd_dev) | ||
| 343 | { | ||
| 344 | kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); | ||
| 345 | rbd_dev->rbd_client = NULL; | ||
| 346 | rbd_dev->client = NULL; | ||
| 347 | } | ||
| 348 | |||
| 349 | |||
| 350 | /* | ||
| 351 | * Create a new header structure, translate header format from the on-disk | ||
| 352 | * header. | ||
| 353 | */ | ||
| 354 | static int rbd_header_from_disk(struct rbd_image_header *header, | ||
| 355 | struct rbd_image_header_ondisk *ondisk, | ||
| 356 | int allocated_snaps, | ||
| 357 | gfp_t gfp_flags) | ||
| 358 | { | ||
| 359 | int i; | ||
| 360 | u32 snap_count = le32_to_cpu(ondisk->snap_count); | ||
| 361 | int ret = -ENOMEM; | ||
| 362 | |||
| 363 | init_rwsem(&header->snap_rwsem); | ||
| 364 | |||
| 365 | header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); | ||
| 366 | header->snapc = kmalloc(sizeof(struct ceph_snap_context) + | ||
| 367 | snap_count * | ||
| 368 | sizeof(struct rbd_image_snap_ondisk), | ||
| 369 | gfp_flags); | ||
| 370 | if (!header->snapc) | ||
| 371 | return -ENOMEM; | ||
| 372 | if (snap_count) { | ||
| 373 | header->snap_names = kmalloc(header->snap_names_len, | ||
| 374 | GFP_KERNEL); | ||
| 375 | if (!header->snap_names) | ||
| 376 | goto err_snapc; | ||
| 377 | header->snap_sizes = kmalloc(snap_count * sizeof(u64), | ||
| 378 | GFP_KERNEL); | ||
| 379 | if (!header->snap_sizes) | ||
| 380 | goto err_names; | ||
| 381 | } else { | ||
| 382 | header->snap_names = NULL; | ||
| 383 | header->snap_sizes = NULL; | ||
| 384 | } | ||
| 385 | memcpy(header->block_name, ondisk->block_name, | ||
| 386 | sizeof(ondisk->block_name)); | ||
| 387 | |||
| 388 | header->image_size = le64_to_cpu(ondisk->image_size); | ||
| 389 | header->obj_order = ondisk->options.order; | ||
| 390 | header->crypt_type = ondisk->options.crypt_type; | ||
| 391 | header->comp_type = ondisk->options.comp_type; | ||
| 392 | |||
| 393 | atomic_set(&header->snapc->nref, 1); | ||
| 394 | header->snap_seq = le64_to_cpu(ondisk->snap_seq); | ||
| 395 | header->snapc->num_snaps = snap_count; | ||
| 396 | header->total_snaps = snap_count; | ||
| 397 | |||
| 398 | if (snap_count && | ||
| 399 | allocated_snaps == snap_count) { | ||
| 400 | for (i = 0; i < snap_count; i++) { | ||
| 401 | header->snapc->snaps[i] = | ||
| 402 | le64_to_cpu(ondisk->snaps[i].id); | ||
| 403 | header->snap_sizes[i] = | ||
| 404 | le64_to_cpu(ondisk->snaps[i].image_size); | ||
| 405 | } | ||
| 406 | |||
| 407 | /* copy snapshot names */ | ||
| 408 | memcpy(header->snap_names, &ondisk->snaps[i], | ||
| 409 | header->snap_names_len); | ||
| 410 | } | ||
| 411 | |||
| 412 | return 0; | ||
| 413 | |||
| 414 | err_names: | ||
| 415 | kfree(header->snap_names); | ||
| 416 | err_snapc: | ||
| 417 | kfree(header->snapc); | ||
| 418 | return ret; | ||
| 419 | } | ||
| 420 | |||
| 421 | static int snap_index(struct rbd_image_header *header, int snap_num) | ||
| 422 | { | ||
| 423 | return header->total_snaps - snap_num; | ||
| 424 | } | ||
| 425 | |||
| 426 | static u64 cur_snap_id(struct rbd_device *rbd_dev) | ||
| 427 | { | ||
| 428 | struct rbd_image_header *header = &rbd_dev->header; | ||
| 429 | |||
| 430 | if (!rbd_dev->cur_snap) | ||
| 431 | return 0; | ||
| 432 | |||
| 433 | return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)]; | ||
| 434 | } | ||
| 435 | |||
| 436 | static int snap_by_name(struct rbd_image_header *header, const char *snap_name, | ||
| 437 | u64 *seq, u64 *size) | ||
| 438 | { | ||
| 439 | int i; | ||
| 440 | char *p = header->snap_names; | ||
| 441 | |||
| 442 | for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { | ||
| 443 | if (strcmp(snap_name, p) == 0) | ||
| 444 | break; | ||
| 445 | } | ||
| 446 | if (i == header->total_snaps) | ||
| 447 | return -ENOENT; | ||
| 448 | if (seq) | ||
| 449 | *seq = header->snapc->snaps[i]; | ||
| 450 | |||
| 451 | if (size) | ||
| 452 | *size = header->snap_sizes[i]; | ||
| 453 | |||
| 454 | return i; | ||
| 455 | } | ||
| 456 | |||
| 457 | static int rbd_header_set_snap(struct rbd_device *dev, | ||
| 458 | const char *snap_name, | ||
| 459 | u64 *size) | ||
| 460 | { | ||
| 461 | struct rbd_image_header *header = &dev->header; | ||
| 462 | struct ceph_snap_context *snapc = header->snapc; | ||
| 463 | int ret = -ENOENT; | ||
| 464 | |||
| 465 | down_write(&header->snap_rwsem); | ||
| 466 | |||
| 467 | if (!snap_name || | ||
| 468 | !*snap_name || | ||
| 469 | strcmp(snap_name, "-") == 0 || | ||
| 470 | strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) { | ||
| 471 | if (header->total_snaps) | ||
| 472 | snapc->seq = header->snap_seq; | ||
| 473 | else | ||
| 474 | snapc->seq = 0; | ||
| 475 | dev->cur_snap = 0; | ||
| 476 | dev->read_only = 0; | ||
| 477 | if (size) | ||
| 478 | *size = header->image_size; | ||
| 479 | } else { | ||
| 480 | ret = snap_by_name(header, snap_name, &snapc->seq, size); | ||
| 481 | if (ret < 0) | ||
| 482 | goto done; | ||
| 483 | |||
| 484 | dev->cur_snap = header->total_snaps - ret; | ||
| 485 | dev->read_only = 1; | ||
| 486 | } | ||
| 487 | |||
| 488 | ret = 0; | ||
| 489 | done: | ||
| 490 | up_write(&header->snap_rwsem); | ||
| 491 | return ret; | ||
| 492 | } | ||
| 493 | |||
| 494 | static void rbd_header_free(struct rbd_image_header *header) | ||
| 495 | { | ||
| 496 | kfree(header->snapc); | ||
| 497 | kfree(header->snap_names); | ||
| 498 | kfree(header->snap_sizes); | ||
| 499 | } | ||
| 500 | |||
| 501 | /* | ||
| 502 | * get the actual striped segment name, offset and length | ||
| 503 | */ | ||
| 504 | static u64 rbd_get_segment(struct rbd_image_header *header, | ||
| 505 | const char *block_name, | ||
| 506 | u64 ofs, u64 len, | ||
| 507 | char *seg_name, u64 *segofs) | ||
| 508 | { | ||
| 509 | u64 seg = ofs >> header->obj_order; | ||
| 510 | |||
| 511 | if (seg_name) | ||
| 512 | snprintf(seg_name, RBD_MAX_SEG_NAME_LEN, | ||
| 513 | "%s.%012llx", block_name, seg); | ||
| 514 | |||
| 515 | ofs = ofs & ((1 << header->obj_order) - 1); | ||
| 516 | len = min_t(u64, len, (1 << header->obj_order) - ofs); | ||
| 517 | |||
| 518 | if (segofs) | ||
| 519 | *segofs = ofs; | ||
| 520 | |||
| 521 | return len; | ||
| 522 | } | ||
| 523 | |||
| 524 | /* | ||
| 525 | * bio helpers | ||
| 526 | */ | ||
| 527 | |||
| 528 | static void bio_chain_put(struct bio *chain) | ||
| 529 | { | ||
| 530 | struct bio *tmp; | ||
| 531 | |||
| 532 | while (chain) { | ||
| 533 | tmp = chain; | ||
| 534 | chain = chain->bi_next; | ||
| 535 | bio_put(tmp); | ||
| 536 | } | ||
| 537 | } | ||
| 538 | |||
| 539 | /* | ||
| 540 | * zeros a bio chain, starting at specific offset | ||
| 541 | */ | ||
| 542 | static void zero_bio_chain(struct bio *chain, int start_ofs) | ||
| 543 | { | ||
| 544 | struct bio_vec *bv; | ||
| 545 | unsigned long flags; | ||
| 546 | void *buf; | ||
| 547 | int i; | ||
| 548 | int pos = 0; | ||
| 549 | |||
| 550 | while (chain) { | ||
| 551 | bio_for_each_segment(bv, chain, i) { | ||
| 552 | if (pos + bv->bv_len > start_ofs) { | ||
| 553 | int remainder = max(start_ofs - pos, 0); | ||
| 554 | buf = bvec_kmap_irq(bv, &flags); | ||
| 555 | memset(buf + remainder, 0, | ||
| 556 | bv->bv_len - remainder); | ||
| 557 | bvec_kunmap_irq(buf, &flags); | ||
| 558 | } | ||
| 559 | pos += bv->bv_len; | ||
| 560 | } | ||
| 561 | |||
| 562 | chain = chain->bi_next; | ||
| 563 | } | ||
| 564 | } | ||
| 565 | |||
| 566 | /* | ||
| 567 | * bio_chain_clone - clone a chain of bios up to a certain length. | ||
| 568 | * might return a bio_pair that will need to be released. | ||
| 569 | */ | ||
| 570 | static struct bio *bio_chain_clone(struct bio **old, struct bio **next, | ||
| 571 | struct bio_pair **bp, | ||
| 572 | int len, gfp_t gfpmask) | ||
| 573 | { | ||
| 574 | struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL; | ||
| 575 | int total = 0; | ||
| 576 | |||
| 577 | if (*bp) { | ||
| 578 | bio_pair_release(*bp); | ||
| 579 | *bp = NULL; | ||
| 580 | } | ||
| 581 | |||
| 582 | while (old_chain && (total < len)) { | ||
| 583 | tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); | ||
| 584 | if (!tmp) | ||
| 585 | goto err_out; | ||
| 586 | |||
| 587 | if (total + old_chain->bi_size > len) { | ||
| 588 | struct bio_pair *bp; | ||
| 589 | |||
| 590 | /* | ||
| 591 | * this split can only happen with a single paged bio, | ||
| 592 | * split_bio will BUG_ON if this is not the case | ||
| 593 | */ | ||
| 594 | dout("bio_chain_clone split! total=%d remaining=%d" | ||
| 595 | "bi_size=%d\n", | ||
| 596 | (int)total, (int)len-total, | ||
| 597 | (int)old_chain->bi_size); | ||
| 598 | |||
| 599 | /* split the bio. We'll release it either in the next | ||
| 600 | call, or it will have to be released outside */ | ||
| 601 | bp = bio_split(old_chain, (len - total) / 512ULL); | ||
| 602 | if (!bp) | ||
| 603 | goto err_out; | ||
| 604 | |||
| 605 | __bio_clone(tmp, &bp->bio1); | ||
| 606 | |||
| 607 | *next = &bp->bio2; | ||
| 608 | } else { | ||
| 609 | __bio_clone(tmp, old_chain); | ||
| 610 | *next = old_chain->bi_next; | ||
| 611 | } | ||
| 612 | |||
| 613 | tmp->bi_bdev = NULL; | ||
| 614 | gfpmask &= ~__GFP_WAIT; | ||
| 615 | tmp->bi_next = NULL; | ||
| 616 | |||
| 617 | if (!new_chain) { | ||
| 618 | new_chain = tail = tmp; | ||
| 619 | } else { | ||
| 620 | tail->bi_next = tmp; | ||
| 621 | tail = tmp; | ||
| 622 | } | ||
| 623 | old_chain = old_chain->bi_next; | ||
| 624 | |||
| 625 | total += tmp->bi_size; | ||
| 626 | } | ||
| 627 | |||
| 628 | BUG_ON(total < len); | ||
| 629 | |||
| 630 | if (tail) | ||
| 631 | tail->bi_next = NULL; | ||
| 632 | |||
| 633 | *old = old_chain; | ||
| 634 | |||
| 635 | return new_chain; | ||
| 636 | |||
| 637 | err_out: | ||
| 638 | dout("bio_chain_clone with err\n"); | ||
| 639 | bio_chain_put(new_chain); | ||
| 640 | return NULL; | ||
| 641 | } | ||
| 642 | |||
| 643 | /* | ||
| 644 | * helpers for osd request op vectors. | ||
| 645 | */ | ||
| 646 | static int rbd_create_rw_ops(struct ceph_osd_req_op **ops, | ||
| 647 | int num_ops, | ||
| 648 | int opcode, | ||
| 649 | u32 payload_len) | ||
| 650 | { | ||
| 651 | *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1), | ||
| 652 | GFP_NOIO); | ||
| 653 | if (!*ops) | ||
| 654 | return -ENOMEM; | ||
| 655 | (*ops)[0].op = opcode; | ||
| 656 | /* | ||
| 657 | * op extent offset and length will be set later on | ||
| 658 | * in calc_raw_layout() | ||
| 659 | */ | ||
| 660 | (*ops)[0].payload_len = payload_len; | ||
| 661 | return 0; | ||
| 662 | } | ||
| 663 | |||
| 664 | static void rbd_destroy_ops(struct ceph_osd_req_op *ops) | ||
| 665 | { | ||
| 666 | kfree(ops); | ||
| 667 | } | ||
| 668 | |||
| 669 | /* | ||
| 670 | * Send ceph osd request | ||
| 671 | */ | ||
| 672 | static int rbd_do_request(struct request *rq, | ||
| 673 | struct rbd_device *dev, | ||
| 674 | struct ceph_snap_context *snapc, | ||
| 675 | u64 snapid, | ||
| 676 | const char *obj, u64 ofs, u64 len, | ||
| 677 | struct bio *bio, | ||
| 678 | struct page **pages, | ||
| 679 | int num_pages, | ||
| 680 | int flags, | ||
| 681 | struct ceph_osd_req_op *ops, | ||
| 682 | int num_reply, | ||
| 683 | void (*rbd_cb)(struct ceph_osd_request *req, | ||
| 684 | struct ceph_msg *msg)) | ||
| 685 | { | ||
| 686 | struct ceph_osd_request *req; | ||
| 687 | struct ceph_file_layout *layout; | ||
| 688 | int ret; | ||
| 689 | u64 bno; | ||
| 690 | struct timespec mtime = CURRENT_TIME; | ||
| 691 | struct rbd_request *req_data; | ||
| 692 | struct ceph_osd_request_head *reqhead; | ||
| 693 | struct rbd_image_header *header = &dev->header; | ||
| 694 | |||
| 695 | ret = -ENOMEM; | ||
| 696 | req_data = kzalloc(sizeof(*req_data), GFP_NOIO); | ||
| 697 | if (!req_data) | ||
| 698 | goto done; | ||
| 699 | |||
| 700 | dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); | ||
| 701 | |||
| 702 | down_read(&header->snap_rwsem); | ||
| 703 | |||
| 704 | req = ceph_osdc_alloc_request(&dev->client->osdc, flags, | ||
| 705 | snapc, | ||
| 706 | ops, | ||
| 707 | false, | ||
| 708 | GFP_NOIO, pages, bio); | ||
| 709 | if (IS_ERR(req)) { | ||
| 710 | up_read(&header->snap_rwsem); | ||
| 711 | ret = PTR_ERR(req); | ||
| 712 | goto done_pages; | ||
| 713 | } | ||
| 714 | |||
| 715 | req->r_callback = rbd_cb; | ||
| 716 | |||
| 717 | req_data->rq = rq; | ||
| 718 | req_data->bio = bio; | ||
| 719 | req_data->pages = pages; | ||
| 720 | req_data->len = len; | ||
| 721 | |||
| 722 | req->r_priv = req_data; | ||
| 723 | |||
| 724 | reqhead = req->r_request->front.iov_base; | ||
| 725 | reqhead->snapid = cpu_to_le64(CEPH_NOSNAP); | ||
| 726 | |||
| 727 | strncpy(req->r_oid, obj, sizeof(req->r_oid)); | ||
| 728 | req->r_oid_len = strlen(req->r_oid); | ||
| 729 | |||
| 730 | layout = &req->r_file_layout; | ||
| 731 | memset(layout, 0, sizeof(*layout)); | ||
| 732 | layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); | ||
| 733 | layout->fl_stripe_count = cpu_to_le32(1); | ||
| 734 | layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); | ||
| 735 | layout->fl_pg_preferred = cpu_to_le32(-1); | ||
| 736 | layout->fl_pg_pool = cpu_to_le32(dev->poolid); | ||
| 737 | ceph_calc_raw_layout(&dev->client->osdc, layout, snapid, | ||
| 738 | ofs, &len, &bno, req, ops); | ||
| 739 | |||
| 740 | ceph_osdc_build_request(req, ofs, &len, | ||
| 741 | ops, | ||
| 742 | snapc, | ||
| 743 | &mtime, | ||
| 744 | req->r_oid, req->r_oid_len); | ||
| 745 | up_read(&header->snap_rwsem); | ||
| 746 | |||
| 747 | ret = ceph_osdc_start_request(&dev->client->osdc, req, false); | ||
| 748 | if (ret < 0) | ||
| 749 | goto done_err; | ||
| 750 | |||
| 751 | if (!rbd_cb) { | ||
| 752 | ret = ceph_osdc_wait_request(&dev->client->osdc, req); | ||
| 753 | ceph_osdc_put_request(req); | ||
| 754 | } | ||
| 755 | return ret; | ||
| 756 | |||
| 757 | done_err: | ||
| 758 | bio_chain_put(req_data->bio); | ||
| 759 | ceph_osdc_put_request(req); | ||
| 760 | done_pages: | ||
| 761 | kfree(req_data); | ||
| 762 | done: | ||
| 763 | if (rq) | ||
| 764 | blk_end_request(rq, ret, len); | ||
| 765 | return ret; | ||
| 766 | } | ||
| 767 | |||
| 768 | /* | ||
| 769 | * Ceph osd op callback | ||
| 770 | */ | ||
| 771 | static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) | ||
| 772 | { | ||
| 773 | struct rbd_request *req_data = req->r_priv; | ||
| 774 | struct ceph_osd_reply_head *replyhead; | ||
| 775 | struct ceph_osd_op *op; | ||
| 776 | __s32 rc; | ||
| 777 | u64 bytes; | ||
| 778 | int read_op; | ||
| 779 | |||
| 780 | /* parse reply */ | ||
| 781 | replyhead = msg->front.iov_base; | ||
| 782 | WARN_ON(le32_to_cpu(replyhead->num_ops) == 0); | ||
| 783 | op = (void *)(replyhead + 1); | ||
| 784 | rc = le32_to_cpu(replyhead->result); | ||
| 785 | bytes = le64_to_cpu(op->extent.length); | ||
| 786 | read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ); | ||
| 787 | |||
| 788 | dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc); | ||
| 789 | |||
| 790 | if (rc == -ENOENT && read_op) { | ||
| 791 | zero_bio_chain(req_data->bio, 0); | ||
| 792 | rc = 0; | ||
| 793 | } else if (rc == 0 && read_op && bytes < req_data->len) { | ||
| 794 | zero_bio_chain(req_data->bio, bytes); | ||
| 795 | bytes = req_data->len; | ||
| 796 | } | ||
| 797 | |||
| 798 | blk_end_request(req_data->rq, rc, bytes); | ||
| 799 | |||
| 800 | if (req_data->bio) | ||
| 801 | bio_chain_put(req_data->bio); | ||
| 802 | |||
| 803 | ceph_osdc_put_request(req); | ||
| 804 | kfree(req_data); | ||
| 805 | } | ||
| 806 | |||
| 807 | /* | ||
| 808 | * Do a synchronous ceph osd operation | ||
| 809 | */ | ||
| 810 | static int rbd_req_sync_op(struct rbd_device *dev, | ||
| 811 | struct ceph_snap_context *snapc, | ||
| 812 | u64 snapid, | ||
| 813 | int opcode, | ||
| 814 | int flags, | ||
| 815 | struct ceph_osd_req_op *orig_ops, | ||
| 816 | int num_reply, | ||
| 817 | const char *obj, | ||
| 818 | u64 ofs, u64 len, | ||
| 819 | char *buf) | ||
| 820 | { | ||
| 821 | int ret; | ||
| 822 | struct page **pages; | ||
| 823 | int num_pages; | ||
| 824 | struct ceph_osd_req_op *ops = orig_ops; | ||
| 825 | u32 payload_len; | ||
| 826 | |||
| 827 | num_pages = calc_pages_for(ofs , len); | ||
| 828 | pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); | ||
| 829 | if (IS_ERR(pages)) | ||
| 830 | return PTR_ERR(pages); | ||
| 831 | |||
| 832 | if (!orig_ops) { | ||
| 833 | payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0); | ||
| 834 | ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); | ||
| 835 | if (ret < 0) | ||
| 836 | goto done; | ||
| 837 | |||
| 838 | if ((flags & CEPH_OSD_FLAG_WRITE) && buf) { | ||
| 839 | ret = ceph_copy_to_page_vector(pages, buf, ofs, len); | ||
| 840 | if (ret < 0) | ||
| 841 | goto done_ops; | ||
| 842 | } | ||
| 843 | } | ||
| 844 | |||
| 845 | ret = rbd_do_request(NULL, dev, snapc, snapid, | ||
| 846 | obj, ofs, len, NULL, | ||
| 847 | pages, num_pages, | ||
| 848 | flags, | ||
| 849 | ops, | ||
| 850 | 2, | ||
| 851 | NULL); | ||
| 852 | if (ret < 0) | ||
| 853 | goto done_ops; | ||
| 854 | |||
| 855 | if ((flags & CEPH_OSD_FLAG_READ) && buf) | ||
| 856 | ret = ceph_copy_from_page_vector(pages, buf, ofs, ret); | ||
| 857 | |||
| 858 | done_ops: | ||
| 859 | if (!orig_ops) | ||
| 860 | rbd_destroy_ops(ops); | ||
| 861 | done: | ||
| 862 | ceph_release_page_vector(pages, num_pages); | ||
| 863 | return ret; | ||
| 864 | } | ||
| 865 | |||
| 866 | /* | ||
| 867 | * Do an asynchronous ceph osd operation | ||
| 868 | */ | ||
| 869 | static int rbd_do_op(struct request *rq, | ||
| 870 | struct rbd_device *rbd_dev , | ||
| 871 | struct ceph_snap_context *snapc, | ||
| 872 | u64 snapid, | ||
| 873 | int opcode, int flags, int num_reply, | ||
| 874 | u64 ofs, u64 len, | ||
| 875 | struct bio *bio) | ||
| 876 | { | ||
| 877 | char *seg_name; | ||
| 878 | u64 seg_ofs; | ||
| 879 | u64 seg_len; | ||
| 880 | int ret; | ||
| 881 | struct ceph_osd_req_op *ops; | ||
| 882 | u32 payload_len; | ||
| 883 | |||
| 884 | seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); | ||
| 885 | if (!seg_name) | ||
| 886 | return -ENOMEM; | ||
| 887 | |||
| 888 | seg_len = rbd_get_segment(&rbd_dev->header, | ||
| 889 | rbd_dev->header.block_name, | ||
| 890 | ofs, len, | ||
| 891 | seg_name, &seg_ofs); | ||
| 892 | |||
| 893 | payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0); | ||
| 894 | |||
| 895 | ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); | ||
| 896 | if (ret < 0) | ||
| 897 | goto done; | ||
| 898 | |||
| 899 | /* we've taken care of segment sizes earlier when we | ||
| 900 | cloned the bios. We should never have a segment | ||
| 901 | truncated at this point */ | ||
| 902 | BUG_ON(seg_len < len); | ||
| 903 | |||
| 904 | ret = rbd_do_request(rq, rbd_dev, snapc, snapid, | ||
| 905 | seg_name, seg_ofs, seg_len, | ||
| 906 | bio, | ||
| 907 | NULL, 0, | ||
| 908 | flags, | ||
| 909 | ops, | ||
| 910 | num_reply, | ||
| 911 | rbd_req_cb); | ||
| 912 | done: | ||
| 913 | kfree(seg_name); | ||
| 914 | return ret; | ||
| 915 | } | ||
| 916 | |||
| 917 | /* | ||
| 918 | * Request async osd write | ||
| 919 | */ | ||
| 920 | static int rbd_req_write(struct request *rq, | ||
| 921 | struct rbd_device *rbd_dev, | ||
| 922 | struct ceph_snap_context *snapc, | ||
| 923 | u64 ofs, u64 len, | ||
| 924 | struct bio *bio) | ||
| 925 | { | ||
| 926 | return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, | ||
| 927 | CEPH_OSD_OP_WRITE, | ||
| 928 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, | ||
| 929 | 2, | ||
| 930 | ofs, len, bio); | ||
| 931 | } | ||
| 932 | |||
| 933 | /* | ||
| 934 | * Request async osd read | ||
| 935 | */ | ||
| 936 | static int rbd_req_read(struct request *rq, | ||
| 937 | struct rbd_device *rbd_dev, | ||
| 938 | u64 snapid, | ||
| 939 | u64 ofs, u64 len, | ||
| 940 | struct bio *bio) | ||
| 941 | { | ||
| 942 | return rbd_do_op(rq, rbd_dev, NULL, | ||
| 943 | (snapid ? snapid : CEPH_NOSNAP), | ||
| 944 | CEPH_OSD_OP_READ, | ||
| 945 | CEPH_OSD_FLAG_READ, | ||
| 946 | 2, | ||
| 947 | ofs, len, bio); | ||
| 948 | } | ||
| 949 | |||
| 950 | /* | ||
| 951 | * Request sync osd read | ||
| 952 | */ | ||
| 953 | static int rbd_req_sync_read(struct rbd_device *dev, | ||
| 954 | struct ceph_snap_context *snapc, | ||
| 955 | u64 snapid, | ||
| 956 | const char *obj, | ||
| 957 | u64 ofs, u64 len, | ||
| 958 | char *buf) | ||
| 959 | { | ||
| 960 | return rbd_req_sync_op(dev, NULL, | ||
| 961 | (snapid ? snapid : CEPH_NOSNAP), | ||
| 962 | CEPH_OSD_OP_READ, | ||
| 963 | CEPH_OSD_FLAG_READ, | ||
| 964 | NULL, | ||
| 965 | 1, obj, ofs, len, buf); | ||
| 966 | } | ||
| 967 | |||
| 968 | /* | ||
| 969 | * Request sync osd read | ||
| 970 | */ | ||
| 971 | static int rbd_req_sync_rollback_obj(struct rbd_device *dev, | ||
| 972 | u64 snapid, | ||
| 973 | const char *obj) | ||
| 974 | { | ||
| 975 | struct ceph_osd_req_op *ops; | ||
| 976 | int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0); | ||
| 977 | if (ret < 0) | ||
| 978 | return ret; | ||
| 979 | |||
| 980 | ops[0].snap.snapid = snapid; | ||
| 981 | |||
| 982 | ret = rbd_req_sync_op(dev, NULL, | ||
| 983 | CEPH_NOSNAP, | ||
| 984 | 0, | ||
| 985 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, | ||
| 986 | ops, | ||
| 987 | 1, obj, 0, 0, NULL); | ||
| 988 | |||
| 989 | rbd_destroy_ops(ops); | ||
| 990 | |||
| 991 | if (ret < 0) | ||
| 992 | return ret; | ||
| 993 | |||
| 994 | return ret; | ||
| 995 | } | ||
| 996 | |||
| 997 | /* | ||
| 998 | * Request sync osd read | ||
| 999 | */ | ||
| 1000 | static int rbd_req_sync_exec(struct rbd_device *dev, | ||
| 1001 | const char *obj, | ||
| 1002 | const char *cls, | ||
| 1003 | const char *method, | ||
| 1004 | const char *data, | ||
| 1005 | int len) | ||
| 1006 | { | ||
| 1007 | struct ceph_osd_req_op *ops; | ||
| 1008 | int cls_len = strlen(cls); | ||
| 1009 | int method_len = strlen(method); | ||
| 1010 | int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL, | ||
| 1011 | cls_len + method_len + len); | ||
| 1012 | if (ret < 0) | ||
| 1013 | return ret; | ||
| 1014 | |||
| 1015 | ops[0].cls.class_name = cls; | ||
| 1016 | ops[0].cls.class_len = (__u8)cls_len; | ||
| 1017 | ops[0].cls.method_name = method; | ||
| 1018 | ops[0].cls.method_len = (__u8)method_len; | ||
| 1019 | ops[0].cls.argc = 0; | ||
| 1020 | ops[0].cls.indata = data; | ||
| 1021 | ops[0].cls.indata_len = len; | ||
| 1022 | |||
| 1023 | ret = rbd_req_sync_op(dev, NULL, | ||
| 1024 | CEPH_NOSNAP, | ||
| 1025 | 0, | ||
| 1026 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, | ||
| 1027 | ops, | ||
| 1028 | 1, obj, 0, 0, NULL); | ||
| 1029 | |||
| 1030 | rbd_destroy_ops(ops); | ||
| 1031 | |||
| 1032 | dout("cls_exec returned %d\n", ret); | ||
| 1033 | return ret; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | /* | ||
| 1037 | * block device queue callback | ||
| 1038 | */ | ||
| 1039 | static void rbd_rq_fn(struct request_queue *q) | ||
| 1040 | { | ||
| 1041 | struct rbd_device *rbd_dev = q->queuedata; | ||
| 1042 | struct request *rq; | ||
| 1043 | struct bio_pair *bp = NULL; | ||
| 1044 | |||
| 1045 | rq = blk_fetch_request(q); | ||
| 1046 | |||
| 1047 | while (1) { | ||
| 1048 | struct bio *bio; | ||
| 1049 | struct bio *rq_bio, *next_bio = NULL; | ||
| 1050 | bool do_write; | ||
| 1051 | int size, op_size = 0; | ||
| 1052 | u64 ofs; | ||
| 1053 | |||
| 1054 | /* peek at request from block layer */ | ||
| 1055 | if (!rq) | ||
| 1056 | break; | ||
| 1057 | |||
| 1058 | dout("fetched request\n"); | ||
| 1059 | |||
| 1060 | /* filter out block requests we don't understand */ | ||
| 1061 | if ((rq->cmd_type != REQ_TYPE_FS)) { | ||
| 1062 | __blk_end_request_all(rq, 0); | ||
| 1063 | goto next; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | /* deduce our operation (read, write) */ | ||
| 1067 | do_write = (rq_data_dir(rq) == WRITE); | ||
| 1068 | |||
| 1069 | size = blk_rq_bytes(rq); | ||
| 1070 | ofs = blk_rq_pos(rq) * 512ULL; | ||
| 1071 | rq_bio = rq->bio; | ||
| 1072 | if (do_write && rbd_dev->read_only) { | ||
| 1073 | __blk_end_request_all(rq, -EROFS); | ||
| 1074 | goto next; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | spin_unlock_irq(q->queue_lock); | ||
| 1078 | |||
| 1079 | dout("%s 0x%x bytes at 0x%llx\n", | ||
| 1080 | do_write ? "write" : "read", | ||
| 1081 | size, blk_rq_pos(rq) * 512ULL); | ||
| 1082 | |||
| 1083 | do { | ||
| 1084 | /* a bio clone to be passed down to OSD req */ | ||
| 1085 | dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); | ||
| 1086 | op_size = rbd_get_segment(&rbd_dev->header, | ||
| 1087 | rbd_dev->header.block_name, | ||
| 1088 | ofs, size, | ||
| 1089 | NULL, NULL); | ||
| 1090 | bio = bio_chain_clone(&rq_bio, &next_bio, &bp, | ||
| 1091 | op_size, GFP_ATOMIC); | ||
| 1092 | if (!bio) { | ||
| 1093 | spin_lock_irq(q->queue_lock); | ||
| 1094 | __blk_end_request_all(rq, -ENOMEM); | ||
| 1095 | goto next; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | /* init OSD command: write or read */ | ||
| 1099 | if (do_write) | ||
| 1100 | rbd_req_write(rq, rbd_dev, | ||
| 1101 | rbd_dev->header.snapc, | ||
| 1102 | ofs, | ||
| 1103 | op_size, bio); | ||
| 1104 | else | ||
| 1105 | rbd_req_read(rq, rbd_dev, | ||
| 1106 | cur_snap_id(rbd_dev), | ||
| 1107 | ofs, | ||
| 1108 | op_size, bio); | ||
| 1109 | |||
| 1110 | size -= op_size; | ||
| 1111 | ofs += op_size; | ||
| 1112 | |||
| 1113 | rq_bio = next_bio; | ||
| 1114 | } while (size > 0); | ||
| 1115 | |||
| 1116 | if (bp) | ||
| 1117 | bio_pair_release(bp); | ||
| 1118 | |||
| 1119 | spin_lock_irq(q->queue_lock); | ||
| 1120 | next: | ||
| 1121 | rq = blk_fetch_request(q); | ||
| 1122 | } | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | /* | ||
| 1126 | * a queue callback. Makes sure that we don't create a bio that spans across | ||
| 1127 | * multiple osd objects. One exception would be with a single page bios, | ||
| 1128 | * which we handle later at bio_chain_clone | ||
| 1129 | */ | ||
| 1130 | static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, | ||
| 1131 | struct bio_vec *bvec) | ||
| 1132 | { | ||
| 1133 | struct rbd_device *rbd_dev = q->queuedata; | ||
| 1134 | unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9); | ||
| 1135 | sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev); | ||
| 1136 | unsigned int bio_sectors = bmd->bi_size >> 9; | ||
| 1137 | int max; | ||
| 1138 | |||
| 1139 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) | ||
| 1140 | + bio_sectors)) << 9; | ||
| 1141 | if (max < 0) | ||
| 1142 | max = 0; /* bio_add cannot handle a negative return */ | ||
| 1143 | if (max <= bvec->bv_len && bio_sectors == 0) | ||
| 1144 | return bvec->bv_len; | ||
| 1145 | return max; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | static void rbd_free_disk(struct rbd_device *rbd_dev) | ||
| 1149 | { | ||
| 1150 | struct gendisk *disk = rbd_dev->disk; | ||
| 1151 | |||
| 1152 | if (!disk) | ||
| 1153 | return; | ||
| 1154 | |||
| 1155 | rbd_header_free(&rbd_dev->header); | ||
| 1156 | |||
| 1157 | if (disk->flags & GENHD_FL_UP) | ||
| 1158 | del_gendisk(disk); | ||
| 1159 | if (disk->queue) | ||
| 1160 | blk_cleanup_queue(disk->queue); | ||
| 1161 | put_disk(disk); | ||
| 1162 | } | ||
| 1163 | |||
| 1164 | /* | ||
| 1165 | * reload the ondisk the header | ||
| 1166 | */ | ||
| 1167 | static int rbd_read_header(struct rbd_device *rbd_dev, | ||
| 1168 | struct rbd_image_header *header) | ||
| 1169 | { | ||
| 1170 | ssize_t rc; | ||
| 1171 | struct rbd_image_header_ondisk *dh; | ||
| 1172 | int snap_count = 0; | ||
| 1173 | u64 snap_names_len = 0; | ||
| 1174 | |||
| 1175 | while (1) { | ||
| 1176 | int len = sizeof(*dh) + | ||
| 1177 | snap_count * sizeof(struct rbd_image_snap_ondisk) + | ||
| 1178 | snap_names_len; | ||
| 1179 | |||
| 1180 | rc = -ENOMEM; | ||
| 1181 | dh = kmalloc(len, GFP_KERNEL); | ||
| 1182 | if (!dh) | ||
| 1183 | return -ENOMEM; | ||
| 1184 | |||
| 1185 | rc = rbd_req_sync_read(rbd_dev, | ||
| 1186 | NULL, CEPH_NOSNAP, | ||
| 1187 | rbd_dev->obj_md_name, | ||
| 1188 | 0, len, | ||
| 1189 | (char *)dh); | ||
| 1190 | if (rc < 0) | ||
| 1191 | goto out_dh; | ||
| 1192 | |||
| 1193 | rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); | ||
| 1194 | if (rc < 0) | ||
| 1195 | goto out_dh; | ||
| 1196 | |||
| 1197 | if (snap_count != header->total_snaps) { | ||
| 1198 | snap_count = header->total_snaps; | ||
| 1199 | snap_names_len = header->snap_names_len; | ||
| 1200 | rbd_header_free(header); | ||
| 1201 | kfree(dh); | ||
| 1202 | continue; | ||
| 1203 | } | ||
| 1204 | break; | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | out_dh: | ||
| 1208 | kfree(dh); | ||
| 1209 | return rc; | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | /* | ||
| 1213 | * create a snapshot | ||
| 1214 | */ | ||
| 1215 | static int rbd_header_add_snap(struct rbd_device *dev, | ||
| 1216 | const char *snap_name, | ||
| 1217 | gfp_t gfp_flags) | ||
| 1218 | { | ||
| 1219 | int name_len = strlen(snap_name); | ||
| 1220 | u64 new_snapid; | ||
| 1221 | int ret; | ||
| 1222 | void *data, *data_start, *data_end; | ||
| 1223 | |||
| 1224 | /* we should create a snapshot only if we're pointing at the head */ | ||
| 1225 | if (dev->cur_snap) | ||
| 1226 | return -EINVAL; | ||
| 1227 | |||
| 1228 | ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid, | ||
| 1229 | &new_snapid); | ||
| 1230 | dout("created snapid=%lld\n", new_snapid); | ||
| 1231 | if (ret < 0) | ||
| 1232 | return ret; | ||
| 1233 | |||
| 1234 | data = kmalloc(name_len + 16, gfp_flags); | ||
| 1235 | if (!data) | ||
| 1236 | return -ENOMEM; | ||
| 1237 | |||
| 1238 | data_start = data; | ||
| 1239 | data_end = data + name_len + 16; | ||
| 1240 | |||
| 1241 | ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad); | ||
| 1242 | ceph_encode_64_safe(&data, data_end, new_snapid, bad); | ||
| 1243 | |||
| 1244 | ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", | ||
| 1245 | data_start, data - data_start); | ||
| 1246 | |||
| 1247 | kfree(data_start); | ||
| 1248 | |||
| 1249 | if (ret < 0) | ||
| 1250 | return ret; | ||
| 1251 | |||
| 1252 | dev->header.snapc->seq = new_snapid; | ||
| 1253 | |||
| 1254 | return 0; | ||
| 1255 | bad: | ||
| 1256 | return -ERANGE; | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | /* | ||
| 1260 | * only read the first part of the ondisk header, without the snaps info | ||
| 1261 | */ | ||
| 1262 | static int rbd_update_snaps(struct rbd_device *rbd_dev) | ||
| 1263 | { | ||
| 1264 | int ret; | ||
| 1265 | struct rbd_image_header h; | ||
| 1266 | u64 snap_seq; | ||
| 1267 | |||
| 1268 | ret = rbd_read_header(rbd_dev, &h); | ||
| 1269 | if (ret < 0) | ||
| 1270 | return ret; | ||
| 1271 | |||
| 1272 | down_write(&rbd_dev->header.snap_rwsem); | ||
| 1273 | |||
| 1274 | snap_seq = rbd_dev->header.snapc->seq; | ||
| 1275 | |||
| 1276 | kfree(rbd_dev->header.snapc); | ||
| 1277 | kfree(rbd_dev->header.snap_names); | ||
| 1278 | kfree(rbd_dev->header.snap_sizes); | ||
| 1279 | |||
| 1280 | rbd_dev->header.total_snaps = h.total_snaps; | ||
| 1281 | rbd_dev->header.snapc = h.snapc; | ||
| 1282 | rbd_dev->header.snap_names = h.snap_names; | ||
| 1283 | rbd_dev->header.snap_sizes = h.snap_sizes; | ||
| 1284 | rbd_dev->header.snapc->seq = snap_seq; | ||
| 1285 | |||
| 1286 | up_write(&rbd_dev->header.snap_rwsem); | ||
| 1287 | |||
| 1288 | return 0; | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | static int rbd_init_disk(struct rbd_device *rbd_dev) | ||
| 1292 | { | ||
| 1293 | struct gendisk *disk; | ||
| 1294 | struct request_queue *q; | ||
| 1295 | int rc; | ||
| 1296 | u64 total_size = 0; | ||
| 1297 | |||
| 1298 | /* contact OSD, request size info about the object being mapped */ | ||
| 1299 | rc = rbd_read_header(rbd_dev, &rbd_dev->header); | ||
| 1300 | if (rc) | ||
| 1301 | return rc; | ||
| 1302 | |||
| 1303 | rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size); | ||
| 1304 | if (rc) | ||
| 1305 | return rc; | ||
| 1306 | |||
| 1307 | /* create gendisk info */ | ||
| 1308 | rc = -ENOMEM; | ||
| 1309 | disk = alloc_disk(RBD_MINORS_PER_MAJOR); | ||
| 1310 | if (!disk) | ||
| 1311 | goto out; | ||
| 1312 | |||
| 1313 | sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id); | ||
| 1314 | disk->major = rbd_dev->major; | ||
| 1315 | disk->first_minor = 0; | ||
| 1316 | disk->fops = &rbd_bd_ops; | ||
| 1317 | disk->private_data = rbd_dev; | ||
| 1318 | |||
| 1319 | /* init rq */ | ||
| 1320 | rc = -ENOMEM; | ||
| 1321 | q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock); | ||
| 1322 | if (!q) | ||
| 1323 | goto out_disk; | ||
| 1324 | blk_queue_merge_bvec(q, rbd_merge_bvec); | ||
| 1325 | disk->queue = q; | ||
| 1326 | |||
| 1327 | q->queuedata = rbd_dev; | ||
| 1328 | |||
| 1329 | rbd_dev->disk = disk; | ||
| 1330 | rbd_dev->q = q; | ||
| 1331 | |||
| 1332 | /* finally, announce the disk to the world */ | ||
| 1333 | set_capacity(disk, total_size / 512ULL); | ||
| 1334 | add_disk(disk); | ||
| 1335 | |||
| 1336 | pr_info("%s: added with size 0x%llx\n", | ||
| 1337 | disk->disk_name, (unsigned long long)total_size); | ||
| 1338 | return 0; | ||
| 1339 | |||
| 1340 | out_disk: | ||
| 1341 | put_disk(disk); | ||
| 1342 | out: | ||
| 1343 | return rc; | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | /******************************************************************** | ||
| 1347 | * /sys/class/rbd/ | ||
| 1348 | * add map rados objects to blkdev | ||
| 1349 | * remove unmap rados objects | ||
| 1350 | * list show mappings | ||
| 1351 | *******************************************************************/ | ||
| 1352 | |||
| 1353 | static void class_rbd_release(struct class *cls) | ||
| 1354 | { | ||
| 1355 | kfree(cls); | ||
| 1356 | } | ||
| 1357 | |||
| 1358 | static ssize_t class_rbd_list(struct class *c, | ||
| 1359 | struct class_attribute *attr, | ||
| 1360 | char *data) | ||
| 1361 | { | ||
| 1362 | int n = 0; | ||
| 1363 | struct list_head *tmp; | ||
| 1364 | int max = PAGE_SIZE; | ||
| 1365 | |||
| 1366 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1367 | |||
| 1368 | n += snprintf(data, max, | ||
| 1369 | "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n"); | ||
| 1370 | |||
| 1371 | list_for_each(tmp, &rbd_dev_list) { | ||
| 1372 | struct rbd_device *rbd_dev; | ||
| 1373 | |||
| 1374 | rbd_dev = list_entry(tmp, struct rbd_device, node); | ||
| 1375 | n += snprintf(data+n, max-n, | ||
| 1376 | "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n", | ||
| 1377 | rbd_dev->id, | ||
| 1378 | rbd_dev->major, | ||
| 1379 | ceph_client_id(rbd_dev->client), | ||
| 1380 | rbd_dev->pool_name, | ||
| 1381 | rbd_dev->obj, rbd_dev->snap_name, | ||
| 1382 | rbd_dev->header.image_size >> 10); | ||
| 1383 | if (n == max) | ||
| 1384 | break; | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | mutex_unlock(&ctl_mutex); | ||
| 1388 | return n; | ||
| 1389 | } | ||
| 1390 | |||
| 1391 | static ssize_t class_rbd_add(struct class *c, | ||
| 1392 | struct class_attribute *attr, | ||
| 1393 | const char *buf, size_t count) | ||
| 1394 | { | ||
| 1395 | struct ceph_osd_client *osdc; | ||
| 1396 | struct rbd_device *rbd_dev; | ||
| 1397 | ssize_t rc = -ENOMEM; | ||
| 1398 | int irc, new_id = 0; | ||
| 1399 | struct list_head *tmp; | ||
| 1400 | char *mon_dev_name; | ||
| 1401 | char *options; | ||
| 1402 | |||
| 1403 | if (!try_module_get(THIS_MODULE)) | ||
| 1404 | return -ENODEV; | ||
| 1405 | |||
| 1406 | mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL); | ||
| 1407 | if (!mon_dev_name) | ||
| 1408 | goto err_out_mod; | ||
| 1409 | |||
| 1410 | options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL); | ||
| 1411 | if (!options) | ||
| 1412 | goto err_mon_dev; | ||
| 1413 | |||
| 1414 | /* new rbd_device object */ | ||
| 1415 | rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); | ||
| 1416 | if (!rbd_dev) | ||
| 1417 | goto err_out_opt; | ||
| 1418 | |||
| 1419 | /* static rbd_device initialization */ | ||
| 1420 | spin_lock_init(&rbd_dev->lock); | ||
| 1421 | INIT_LIST_HEAD(&rbd_dev->node); | ||
| 1422 | |||
| 1423 | /* generate unique id: find highest unique id, add one */ | ||
| 1424 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1425 | |||
| 1426 | list_for_each(tmp, &rbd_dev_list) { | ||
| 1427 | struct rbd_device *rbd_dev; | ||
| 1428 | |||
| 1429 | rbd_dev = list_entry(tmp, struct rbd_device, node); | ||
| 1430 | if (rbd_dev->id >= new_id) | ||
| 1431 | new_id = rbd_dev->id + 1; | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | rbd_dev->id = new_id; | ||
| 1435 | |||
| 1436 | /* add to global list */ | ||
| 1437 | list_add_tail(&rbd_dev->node, &rbd_dev_list); | ||
| 1438 | |||
| 1439 | /* parse add command */ | ||
| 1440 | if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s " | ||
| 1441 | "%" __stringify(RBD_MAX_OPT_LEN) "s " | ||
| 1442 | "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s " | ||
| 1443 | "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s" | ||
| 1444 | "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", | ||
| 1445 | mon_dev_name, options, rbd_dev->pool_name, | ||
| 1446 | rbd_dev->obj, rbd_dev->snap_name) < 4) { | ||
| 1447 | rc = -EINVAL; | ||
| 1448 | goto err_out_slot; | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | if (rbd_dev->snap_name[0] == 0) | ||
| 1452 | rbd_dev->snap_name[0] = '-'; | ||
| 1453 | |||
| 1454 | rbd_dev->obj_len = strlen(rbd_dev->obj); | ||
| 1455 | snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s", | ||
| 1456 | rbd_dev->obj, RBD_SUFFIX); | ||
| 1457 | |||
| 1458 | /* initialize rest of new object */ | ||
| 1459 | snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id); | ||
| 1460 | rc = rbd_get_client(rbd_dev, mon_dev_name, options); | ||
| 1461 | if (rc < 0) | ||
| 1462 | goto err_out_slot; | ||
| 1463 | |||
| 1464 | mutex_unlock(&ctl_mutex); | ||
| 1465 | |||
| 1466 | /* pick the pool */ | ||
| 1467 | osdc = &rbd_dev->client->osdc; | ||
| 1468 | rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); | ||
| 1469 | if (rc < 0) | ||
| 1470 | goto err_out_client; | ||
| 1471 | rbd_dev->poolid = rc; | ||
| 1472 | |||
| 1473 | /* register our block device */ | ||
| 1474 | irc = register_blkdev(0, rbd_dev->name); | ||
| 1475 | if (irc < 0) { | ||
| 1476 | rc = irc; | ||
| 1477 | goto err_out_client; | ||
| 1478 | } | ||
| 1479 | rbd_dev->major = irc; | ||
| 1480 | |||
| 1481 | /* set up and announce blkdev mapping */ | ||
| 1482 | rc = rbd_init_disk(rbd_dev); | ||
| 1483 | if (rc) | ||
| 1484 | goto err_out_blkdev; | ||
| 1485 | |||
| 1486 | return count; | ||
| 1487 | |||
| 1488 | err_out_blkdev: | ||
| 1489 | unregister_blkdev(rbd_dev->major, rbd_dev->name); | ||
| 1490 | err_out_client: | ||
| 1491 | rbd_put_client(rbd_dev); | ||
| 1492 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1493 | err_out_slot: | ||
| 1494 | list_del_init(&rbd_dev->node); | ||
| 1495 | mutex_unlock(&ctl_mutex); | ||
| 1496 | |||
| 1497 | kfree(rbd_dev); | ||
| 1498 | err_out_opt: | ||
| 1499 | kfree(options); | ||
| 1500 | err_mon_dev: | ||
| 1501 | kfree(mon_dev_name); | ||
| 1502 | err_out_mod: | ||
| 1503 | dout("Error adding device %s\n", buf); | ||
| 1504 | module_put(THIS_MODULE); | ||
| 1505 | return rc; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | static struct rbd_device *__rbd_get_dev(unsigned long id) | ||
| 1509 | { | ||
| 1510 | struct list_head *tmp; | ||
| 1511 | struct rbd_device *rbd_dev; | ||
| 1512 | |||
| 1513 | list_for_each(tmp, &rbd_dev_list) { | ||
| 1514 | rbd_dev = list_entry(tmp, struct rbd_device, node); | ||
| 1515 | if (rbd_dev->id == id) | ||
| 1516 | return rbd_dev; | ||
| 1517 | } | ||
| 1518 | return NULL; | ||
| 1519 | } | ||
| 1520 | |||
| 1521 | static ssize_t class_rbd_remove(struct class *c, | ||
| 1522 | struct class_attribute *attr, | ||
| 1523 | const char *buf, | ||
| 1524 | size_t count) | ||
| 1525 | { | ||
| 1526 | struct rbd_device *rbd_dev = NULL; | ||
| 1527 | int target_id, rc; | ||
| 1528 | unsigned long ul; | ||
| 1529 | |||
| 1530 | rc = strict_strtoul(buf, 10, &ul); | ||
| 1531 | if (rc) | ||
| 1532 | return rc; | ||
| 1533 | |||
| 1534 | /* convert to int; abort if we lost anything in the conversion */ | ||
| 1535 | target_id = (int) ul; | ||
| 1536 | if (target_id != ul) | ||
| 1537 | return -EINVAL; | ||
| 1538 | |||
| 1539 | /* remove object from list immediately */ | ||
| 1540 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1541 | |||
| 1542 | rbd_dev = __rbd_get_dev(target_id); | ||
| 1543 | if (rbd_dev) | ||
| 1544 | list_del_init(&rbd_dev->node); | ||
| 1545 | |||
| 1546 | mutex_unlock(&ctl_mutex); | ||
| 1547 | |||
| 1548 | if (!rbd_dev) | ||
| 1549 | return -ENOENT; | ||
| 1550 | |||
| 1551 | rbd_put_client(rbd_dev); | ||
| 1552 | |||
| 1553 | /* clean up and free blkdev */ | ||
| 1554 | rbd_free_disk(rbd_dev); | ||
| 1555 | unregister_blkdev(rbd_dev->major, rbd_dev->name); | ||
| 1556 | kfree(rbd_dev); | ||
| 1557 | |||
| 1558 | /* release module ref */ | ||
| 1559 | module_put(THIS_MODULE); | ||
| 1560 | |||
| 1561 | return count; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | static ssize_t class_rbd_snaps_list(struct class *c, | ||
| 1565 | struct class_attribute *attr, | ||
| 1566 | char *data) | ||
| 1567 | { | ||
| 1568 | struct rbd_device *rbd_dev = NULL; | ||
| 1569 | struct list_head *tmp; | ||
| 1570 | struct rbd_image_header *header; | ||
| 1571 | int i, n = 0, max = PAGE_SIZE; | ||
| 1572 | int ret; | ||
| 1573 | |||
| 1574 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1575 | |||
| 1576 | n += snprintf(data, max, "#id\tsnap\tKB\n"); | ||
| 1577 | |||
| 1578 | list_for_each(tmp, &rbd_dev_list) { | ||
| 1579 | char *names, *p; | ||
| 1580 | struct ceph_snap_context *snapc; | ||
| 1581 | |||
| 1582 | rbd_dev = list_entry(tmp, struct rbd_device, node); | ||
| 1583 | header = &rbd_dev->header; | ||
| 1584 | |||
| 1585 | down_read(&header->snap_rwsem); | ||
| 1586 | |||
| 1587 | names = header->snap_names; | ||
| 1588 | snapc = header->snapc; | ||
| 1589 | |||
| 1590 | n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", | ||
| 1591 | rbd_dev->id, RBD_SNAP_HEAD_NAME, | ||
| 1592 | header->image_size >> 10, | ||
| 1593 | (!rbd_dev->cur_snap ? " (*)" : "")); | ||
| 1594 | if (n == max) | ||
| 1595 | break; | ||
| 1596 | |||
| 1597 | p = names; | ||
| 1598 | for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { | ||
| 1599 | n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", | ||
| 1600 | rbd_dev->id, p, header->snap_sizes[i] >> 10, | ||
| 1601 | (rbd_dev->cur_snap && | ||
| 1602 | (snap_index(header, i) == rbd_dev->cur_snap) ? | ||
| 1603 | " (*)" : "")); | ||
| 1604 | if (n == max) | ||
| 1605 | break; | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | up_read(&header->snap_rwsem); | ||
| 1609 | } | ||
| 1610 | |||
| 1611 | |||
| 1612 | ret = n; | ||
| 1613 | mutex_unlock(&ctl_mutex); | ||
| 1614 | return ret; | ||
| 1615 | } | ||
| 1616 | |||
| 1617 | static ssize_t class_rbd_snaps_refresh(struct class *c, | ||
| 1618 | struct class_attribute *attr, | ||
| 1619 | const char *buf, | ||
| 1620 | size_t count) | ||
| 1621 | { | ||
| 1622 | struct rbd_device *rbd_dev = NULL; | ||
| 1623 | int target_id, rc; | ||
| 1624 | unsigned long ul; | ||
| 1625 | int ret = count; | ||
| 1626 | |||
| 1627 | rc = strict_strtoul(buf, 10, &ul); | ||
| 1628 | if (rc) | ||
| 1629 | return rc; | ||
| 1630 | |||
| 1631 | /* convert to int; abort if we lost anything in the conversion */ | ||
| 1632 | target_id = (int) ul; | ||
| 1633 | if (target_id != ul) | ||
| 1634 | return -EINVAL; | ||
| 1635 | |||
| 1636 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1637 | |||
| 1638 | rbd_dev = __rbd_get_dev(target_id); | ||
| 1639 | if (!rbd_dev) { | ||
| 1640 | ret = -ENOENT; | ||
| 1641 | goto done; | ||
| 1642 | } | ||
| 1643 | |||
| 1644 | rc = rbd_update_snaps(rbd_dev); | ||
| 1645 | if (rc < 0) | ||
| 1646 | ret = rc; | ||
| 1647 | |||
| 1648 | done: | ||
| 1649 | mutex_unlock(&ctl_mutex); | ||
| 1650 | return ret; | ||
| 1651 | } | ||
| 1652 | |||
| 1653 | static ssize_t class_rbd_snap_create(struct class *c, | ||
| 1654 | struct class_attribute *attr, | ||
| 1655 | const char *buf, | ||
| 1656 | size_t count) | ||
| 1657 | { | ||
| 1658 | struct rbd_device *rbd_dev = NULL; | ||
| 1659 | int target_id, ret; | ||
| 1660 | char *name; | ||
| 1661 | |||
| 1662 | name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL); | ||
| 1663 | if (!name) | ||
| 1664 | return -ENOMEM; | ||
| 1665 | |||
| 1666 | /* parse snaps add command */ | ||
| 1667 | if (sscanf(buf, "%d " | ||
| 1668 | "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", | ||
| 1669 | &target_id, | ||
| 1670 | name) != 2) { | ||
| 1671 | ret = -EINVAL; | ||
| 1672 | goto done; | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1676 | |||
| 1677 | rbd_dev = __rbd_get_dev(target_id); | ||
| 1678 | if (!rbd_dev) { | ||
| 1679 | ret = -ENOENT; | ||
| 1680 | goto done_unlock; | ||
| 1681 | } | ||
| 1682 | |||
| 1683 | ret = rbd_header_add_snap(rbd_dev, | ||
| 1684 | name, GFP_KERNEL); | ||
| 1685 | if (ret < 0) | ||
| 1686 | goto done_unlock; | ||
| 1687 | |||
| 1688 | ret = rbd_update_snaps(rbd_dev); | ||
| 1689 | if (ret < 0) | ||
| 1690 | goto done_unlock; | ||
| 1691 | |||
| 1692 | ret = count; | ||
| 1693 | done_unlock: | ||
| 1694 | mutex_unlock(&ctl_mutex); | ||
| 1695 | done: | ||
| 1696 | kfree(name); | ||
| 1697 | return ret; | ||
| 1698 | } | ||
| 1699 | |||
| 1700 | static ssize_t class_rbd_rollback(struct class *c, | ||
| 1701 | struct class_attribute *attr, | ||
| 1702 | const char *buf, | ||
| 1703 | size_t count) | ||
| 1704 | { | ||
| 1705 | struct rbd_device *rbd_dev = NULL; | ||
| 1706 | int target_id, ret; | ||
| 1707 | u64 snapid; | ||
| 1708 | char snap_name[RBD_MAX_SNAP_NAME_LEN]; | ||
| 1709 | u64 cur_ofs; | ||
| 1710 | char *seg_name; | ||
| 1711 | |||
| 1712 | /* parse snaps add command */ | ||
| 1713 | if (sscanf(buf, "%d " | ||
| 1714 | "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", | ||
| 1715 | &target_id, | ||
| 1716 | snap_name) != 2) { | ||
| 1717 | return -EINVAL; | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | ret = -ENOMEM; | ||
| 1721 | seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); | ||
| 1722 | if (!seg_name) | ||
| 1723 | return ret; | ||
| 1724 | |||
| 1725 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
| 1726 | |||
| 1727 | rbd_dev = __rbd_get_dev(target_id); | ||
| 1728 | if (!rbd_dev) { | ||
| 1729 | ret = -ENOENT; | ||
| 1730 | goto done_unlock; | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL); | ||
| 1734 | if (ret < 0) | ||
| 1735 | goto done_unlock; | ||
| 1736 | |||
| 1737 | dout("snapid=%lld\n", snapid); | ||
| 1738 | |||
| 1739 | cur_ofs = 0; | ||
| 1740 | while (cur_ofs < rbd_dev->header.image_size) { | ||
| 1741 | cur_ofs += rbd_get_segment(&rbd_dev->header, | ||
| 1742 | rbd_dev->obj, | ||
| 1743 | cur_ofs, (u64)-1, | ||
| 1744 | seg_name, NULL); | ||
| 1745 | dout("seg_name=%s\n", seg_name); | ||
| 1746 | |||
| 1747 | ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name); | ||
| 1748 | if (ret < 0) | ||
| 1749 | pr_warning("could not roll back obj %s err=%d\n", | ||
| 1750 | seg_name, ret); | ||
| 1751 | } | ||
| 1752 | |||
| 1753 | ret = rbd_update_snaps(rbd_dev); | ||
| 1754 | if (ret < 0) | ||
| 1755 | goto done_unlock; | ||
| 1756 | |||
| 1757 | ret = count; | ||
| 1758 | |||
| 1759 | done_unlock: | ||
| 1760 | mutex_unlock(&ctl_mutex); | ||
| 1761 | kfree(seg_name); | ||
| 1762 | |||
| 1763 | return ret; | ||
| 1764 | } | ||
| 1765 | |||
| 1766 | static struct class_attribute class_rbd_attrs[] = { | ||
| 1767 | __ATTR(add, 0200, NULL, class_rbd_add), | ||
| 1768 | __ATTR(remove, 0200, NULL, class_rbd_remove), | ||
| 1769 | __ATTR(list, 0444, class_rbd_list, NULL), | ||
| 1770 | __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh), | ||
| 1771 | __ATTR(snap_create, 0200, NULL, class_rbd_snap_create), | ||
| 1772 | __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL), | ||
| 1773 | __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback), | ||
| 1774 | __ATTR_NULL | ||
| 1775 | }; | ||
| 1776 | |||
| 1777 | /* | ||
| 1778 | * create control files in sysfs | ||
| 1779 | * /sys/class/rbd/... | ||
| 1780 | */ | ||
| 1781 | static int rbd_sysfs_init(void) | ||
| 1782 | { | ||
| 1783 | int ret = -ENOMEM; | ||
| 1784 | |||
| 1785 | class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL); | ||
| 1786 | if (!class_rbd) | ||
| 1787 | goto out; | ||
| 1788 | |||
| 1789 | class_rbd->name = DRV_NAME; | ||
| 1790 | class_rbd->owner = THIS_MODULE; | ||
| 1791 | class_rbd->class_release = class_rbd_release; | ||
| 1792 | class_rbd->class_attrs = class_rbd_attrs; | ||
| 1793 | |||
| 1794 | ret = class_register(class_rbd); | ||
| 1795 | if (ret) | ||
| 1796 | goto out_class; | ||
| 1797 | return 0; | ||
| 1798 | |||
| 1799 | out_class: | ||
| 1800 | kfree(class_rbd); | ||
| 1801 | class_rbd = NULL; | ||
| 1802 | pr_err(DRV_NAME ": failed to create class rbd\n"); | ||
| 1803 | out: | ||
| 1804 | return ret; | ||
| 1805 | } | ||
| 1806 | |||
| 1807 | static void rbd_sysfs_cleanup(void) | ||
| 1808 | { | ||
| 1809 | if (class_rbd) | ||
| 1810 | class_destroy(class_rbd); | ||
| 1811 | class_rbd = NULL; | ||
| 1812 | } | ||
| 1813 | |||
| 1814 | int __init rbd_init(void) | ||
| 1815 | { | ||
| 1816 | int rc; | ||
| 1817 | |||
| 1818 | rc = rbd_sysfs_init(); | ||
| 1819 | if (rc) | ||
| 1820 | return rc; | ||
| 1821 | spin_lock_init(&node_lock); | ||
| 1822 | pr_info("loaded " DRV_NAME_LONG "\n"); | ||
| 1823 | return 0; | ||
| 1824 | } | ||
| 1825 | |||
| 1826 | void __exit rbd_exit(void) | ||
| 1827 | { | ||
| 1828 | rbd_sysfs_cleanup(); | ||
| 1829 | } | ||
| 1830 | |||
| 1831 | module_init(rbd_init); | ||
| 1832 | module_exit(rbd_exit); | ||
| 1833 | |||
| 1834 | MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); | ||
| 1835 | MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); | ||
| 1836 | MODULE_DESCRIPTION("rados block device"); | ||
| 1837 | |||
| 1838 | /* following authorship retained from original osdblk.c */ | ||
| 1839 | MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); | ||
| 1840 | |||
| 1841 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h new file mode 100644 index 000000000000..fc6c678aa2cb --- /dev/null +++ b/drivers/block/rbd_types.h | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* | ||
| 2 | * Ceph - scalable distributed file system | ||
| 3 | * | ||
| 4 | * Copyright (C) 2004-2010 Sage Weil <sage@newdream.net> | ||
| 5 | * | ||
| 6 | * This is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU Lesser General Public | ||
| 8 | * License version 2.1, as published by the Free Software | ||
| 9 | * Foundation. See file COPYING. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef CEPH_RBD_TYPES_H | ||
| 14 | #define CEPH_RBD_TYPES_H | ||
| 15 | |||
| 16 | #include <linux/types.h> | ||
| 17 | |||
| 18 | /* | ||
| 19 | * rbd image 'foo' consists of objects | ||
| 20 | * foo.rbd - image metadata | ||
| 21 | * foo.00000000 | ||
| 22 | * foo.00000001 | ||
| 23 | * ... - data | ||
| 24 | */ | ||
| 25 | |||
| 26 | #define RBD_SUFFIX ".rbd" | ||
| 27 | #define RBD_DIRECTORY "rbd_directory" | ||
| 28 | #define RBD_INFO "rbd_info" | ||
| 29 | |||
| 30 | #define RBD_DEFAULT_OBJ_ORDER 22 /* 4MB */ | ||
| 31 | #define RBD_MIN_OBJ_ORDER 16 | ||
| 32 | #define RBD_MAX_OBJ_ORDER 30 | ||
| 33 | |||
| 34 | #define RBD_MAX_OBJ_NAME_LEN 96 | ||
| 35 | #define RBD_MAX_SEG_NAME_LEN 128 | ||
| 36 | |||
| 37 | #define RBD_COMP_NONE 0 | ||
| 38 | #define RBD_CRYPT_NONE 0 | ||
| 39 | |||
| 40 | #define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n" | ||
| 41 | #define RBD_HEADER_SIGNATURE "RBD" | ||
| 42 | #define RBD_HEADER_VERSION "001.005" | ||
| 43 | |||
| 44 | struct rbd_info { | ||
| 45 | __le64 max_id; | ||
| 46 | } __attribute__ ((packed)); | ||
| 47 | |||
| 48 | struct rbd_image_snap_ondisk { | ||
| 49 | __le64 id; | ||
| 50 | __le64 image_size; | ||
| 51 | } __attribute__((packed)); | ||
| 52 | |||
| 53 | struct rbd_image_header_ondisk { | ||
| 54 | char text[40]; | ||
| 55 | char block_name[24]; | ||
| 56 | char signature[4]; | ||
| 57 | char version[8]; | ||
| 58 | struct { | ||
| 59 | __u8 order; | ||
| 60 | __u8 crypt_type; | ||
| 61 | __u8 comp_type; | ||
| 62 | __u8 unused; | ||
| 63 | } __attribute__((packed)) options; | ||
| 64 | __le64 image_size; | ||
| 65 | __le64 snap_seq; | ||
| 66 | __le32 snap_count; | ||
| 67 | __le32 reserved; | ||
| 68 | __le64 snap_names_len; | ||
| 69 | struct rbd_image_snap_ondisk snaps[0]; | ||
| 70 | } __attribute__((packed)); | ||
| 71 | |||
| 72 | |||
| 73 | #endif | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2aafafca2b13..8320490226b7 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
| 3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
| 4 | #include <linux/blkdev.h> | 4 | #include <linux/blkdev.h> |
| 5 | #include <linux/smp_lock.h> | ||
| 6 | #include <linux/hdreg.h> | 5 | #include <linux/hdreg.h> |
| 7 | #include <linux/virtio.h> | 6 | #include <linux/virtio.h> |
| 8 | #include <linux/virtio_blk.h> | 7 | #include <linux/virtio_blk.h> |
| @@ -202,6 +201,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) | |||
| 202 | struct virtio_blk *vblk = disk->private_data; | 201 | struct virtio_blk *vblk = disk->private_data; |
| 203 | struct request *req; | 202 | struct request *req; |
| 204 | struct bio *bio; | 203 | struct bio *bio; |
| 204 | int err; | ||
| 205 | 205 | ||
| 206 | bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, | 206 | bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, |
| 207 | GFP_KERNEL); | 207 | GFP_KERNEL); |
| @@ -215,11 +215,14 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) | |||
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | req->cmd_type = REQ_TYPE_SPECIAL; | 217 | req->cmd_type = REQ_TYPE_SPECIAL; |
| 218 | return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); | 218 | err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); |
| 219 | blk_put_request(req); | ||
| 220 | |||
| 221 | return err; | ||
| 219 | } | 222 | } |
| 220 | 223 | ||
| 221 | static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, | 224 | static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, |
| 222 | unsigned cmd, unsigned long data) | 225 | unsigned int cmd, unsigned long data) |
| 223 | { | 226 | { |
| 224 | struct gendisk *disk = bdev->bd_disk; | 227 | struct gendisk *disk = bdev->bd_disk; |
| 225 | struct virtio_blk *vblk = disk->private_data; | 228 | struct virtio_blk *vblk = disk->private_data; |
| @@ -234,18 +237,6 @@ static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 234 | (void __user *)data); | 237 | (void __user *)data); |
| 235 | } | 238 | } |
| 236 | 239 | ||
| 237 | static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, | ||
| 238 | unsigned int cmd, unsigned long param) | ||
| 239 | { | ||
| 240 | int ret; | ||
| 241 | |||
| 242 | lock_kernel(); | ||
| 243 | ret = virtblk_locked_ioctl(bdev, mode, cmd, param); | ||
| 244 | unlock_kernel(); | ||
| 245 | |||
| 246 | return ret; | ||
| 247 | } | ||
| 248 | |||
| 249 | /* We provide getgeo only to please some old bootloader/partitioning tools */ | 240 | /* We provide getgeo only to please some old bootloader/partitioning tools */ |
| 250 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) | 241 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) |
| 251 | { | 242 | { |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 05ad4a17a28f..7c4133582dba 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
| @@ -47,6 +47,16 @@ enum tpm_duration { | |||
| 47 | #define TPM_MAX_PROTECTED_ORDINAL 12 | 47 | #define TPM_MAX_PROTECTED_ORDINAL 12 |
| 48 | #define TPM_PROTECTED_ORDINAL_MASK 0xFF | 48 | #define TPM_PROTECTED_ORDINAL_MASK 0xFF |
| 49 | 49 | ||
| 50 | /* | ||
| 51 | * Bug workaround - some TPM's don't flush the most | ||
| 52 | * recently changed pcr on suspend, so force the flush | ||
| 53 | * with an extend to the selected _unused_ non-volatile pcr. | ||
| 54 | */ | ||
| 55 | static int tpm_suspend_pcr; | ||
| 56 | module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644); | ||
| 57 | MODULE_PARM_DESC(suspend_pcr, | ||
| 58 | "PCR to use for dummy writes to faciltate flush on suspend."); | ||
| 59 | |||
| 50 | static LIST_HEAD(tpm_chip_list); | 60 | static LIST_HEAD(tpm_chip_list); |
| 51 | static DEFINE_SPINLOCK(driver_lock); | 61 | static DEFINE_SPINLOCK(driver_lock); |
| 52 | static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); | 62 | static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); |
| @@ -1077,18 +1087,6 @@ static struct tpm_input_header savestate_header = { | |||
| 1077 | .ordinal = TPM_ORD_SAVESTATE | 1087 | .ordinal = TPM_ORD_SAVESTATE |
| 1078 | }; | 1088 | }; |
| 1079 | 1089 | ||
| 1080 | /* Bug workaround - some TPM's don't flush the most | ||
| 1081 | * recently changed pcr on suspend, so force the flush | ||
| 1082 | * with an extend to the selected _unused_ non-volatile pcr. | ||
| 1083 | */ | ||
| 1084 | static int tpm_suspend_pcr; | ||
| 1085 | static int __init tpm_suspend_setup(char *str) | ||
| 1086 | { | ||
| 1087 | get_option(&str, &tpm_suspend_pcr); | ||
| 1088 | return 1; | ||
| 1089 | } | ||
| 1090 | __setup("tpm_suspend_pcr=", tpm_suspend_setup); | ||
| 1091 | |||
| 1092 | /* | 1090 | /* |
| 1093 | * We are about to suspend. Save the TPM state | 1091 | * We are about to suspend. Save the TPM state |
| 1094 | * so that it can be restored. | 1092 | * so that it can be restored. |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index c810481a5bc2..6c1b676643a9 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -48,6 +48,9 @@ struct ports_driver_data { | |||
| 48 | /* Used for exporting per-port information to debugfs */ | 48 | /* Used for exporting per-port information to debugfs */ |
| 49 | struct dentry *debugfs_dir; | 49 | struct dentry *debugfs_dir; |
| 50 | 50 | ||
| 51 | /* List of all the devices we're handling */ | ||
| 52 | struct list_head portdevs; | ||
| 53 | |||
| 51 | /* Number of devices this driver is handling */ | 54 | /* Number of devices this driver is handling */ |
| 52 | unsigned int index; | 55 | unsigned int index; |
| 53 | 56 | ||
| @@ -108,6 +111,9 @@ struct port_buffer { | |||
| 108 | * ports for that device (vdev->priv). | 111 | * ports for that device (vdev->priv). |
| 109 | */ | 112 | */ |
| 110 | struct ports_device { | 113 | struct ports_device { |
| 114 | /* Next portdev in the list, head is in the pdrvdata struct */ | ||
| 115 | struct list_head list; | ||
| 116 | |||
| 111 | /* | 117 | /* |
| 112 | * Workqueue handlers where we process deferred work after | 118 | * Workqueue handlers where we process deferred work after |
| 113 | * notification | 119 | * notification |
| @@ -178,15 +184,21 @@ struct port { | |||
| 178 | struct console cons; | 184 | struct console cons; |
| 179 | 185 | ||
| 180 | /* Each port associates with a separate char device */ | 186 | /* Each port associates with a separate char device */ |
| 181 | struct cdev cdev; | 187 | struct cdev *cdev; |
| 182 | struct device *dev; | 188 | struct device *dev; |
| 183 | 189 | ||
| 190 | /* Reference-counting to handle port hot-unplugs and file operations */ | ||
| 191 | struct kref kref; | ||
| 192 | |||
| 184 | /* A waitqueue for poll() or blocking read operations */ | 193 | /* A waitqueue for poll() or blocking read operations */ |
| 185 | wait_queue_head_t waitqueue; | 194 | wait_queue_head_t waitqueue; |
| 186 | 195 | ||
| 187 | /* The 'name' of the port that we expose via sysfs properties */ | 196 | /* The 'name' of the port that we expose via sysfs properties */ |
| 188 | char *name; | 197 | char *name; |
| 189 | 198 | ||
| 199 | /* We can notify apps of host connect / disconnect events via SIGIO */ | ||
| 200 | struct fasync_struct *async_queue; | ||
| 201 | |||
| 190 | /* The 'id' to identify the port with the Host */ | 202 | /* The 'id' to identify the port with the Host */ |
| 191 | u32 id; | 203 | u32 id; |
| 192 | 204 | ||
| @@ -221,6 +233,41 @@ out: | |||
| 221 | return port; | 233 | return port; |
| 222 | } | 234 | } |
| 223 | 235 | ||
| 236 | static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, | ||
| 237 | dev_t dev) | ||
| 238 | { | ||
| 239 | struct port *port; | ||
| 240 | unsigned long flags; | ||
| 241 | |||
| 242 | spin_lock_irqsave(&portdev->ports_lock, flags); | ||
| 243 | list_for_each_entry(port, &portdev->ports, list) | ||
| 244 | if (port->cdev->dev == dev) | ||
| 245 | goto out; | ||
| 246 | port = NULL; | ||
| 247 | out: | ||
| 248 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | ||
| 249 | |||
| 250 | return port; | ||
| 251 | } | ||
| 252 | |||
| 253 | static struct port *find_port_by_devt(dev_t dev) | ||
| 254 | { | ||
| 255 | struct ports_device *portdev; | ||
| 256 | struct port *port; | ||
| 257 | unsigned long flags; | ||
| 258 | |||
| 259 | spin_lock_irqsave(&pdrvdata_lock, flags); | ||
| 260 | list_for_each_entry(portdev, &pdrvdata.portdevs, list) { | ||
| 261 | port = find_port_by_devt_in_portdev(portdev, dev); | ||
| 262 | if (port) | ||
| 263 | goto out; | ||
| 264 | } | ||
| 265 | port = NULL; | ||
| 266 | out: | ||
| 267 | spin_unlock_irqrestore(&pdrvdata_lock, flags); | ||
| 268 | return port; | ||
| 269 | } | ||
| 270 | |||
| 224 | static struct port *find_port_by_id(struct ports_device *portdev, u32 id) | 271 | static struct port *find_port_by_id(struct ports_device *portdev, u32 id) |
| 225 | { | 272 | { |
| 226 | struct port *port; | 273 | struct port *port; |
| @@ -410,7 +457,10 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
| 410 | static ssize_t send_control_msg(struct port *port, unsigned int event, | 457 | static ssize_t send_control_msg(struct port *port, unsigned int event, |
| 411 | unsigned int value) | 458 | unsigned int value) |
| 412 | { | 459 | { |
| 413 | return __send_control_msg(port->portdev, port->id, event, value); | 460 | /* Did the port get unplugged before userspace closed it? */ |
| 461 | if (port->portdev) | ||
| 462 | return __send_control_msg(port->portdev, port->id, event, value); | ||
| 463 | return 0; | ||
| 414 | } | 464 | } |
| 415 | 465 | ||
| 416 | /* Callers must take the port->outvq_lock */ | 466 | /* Callers must take the port->outvq_lock */ |
| @@ -459,9 +509,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | |||
| 459 | 509 | ||
| 460 | /* | 510 | /* |
| 461 | * Wait till the host acknowledges it pushed out the data we | 511 | * Wait till the host acknowledges it pushed out the data we |
| 462 | * sent. This is done for ports in blocking mode or for data | 512 | * sent. This is done for data from the hvc_console; the tty |
| 463 | * from the hvc_console; the tty operations are performed with | 513 | * operations are performed with spinlocks held so we can't |
| 464 | * spinlocks held so we can't sleep here. | 514 | * sleep here. An alternative would be to copy the data to a |
| 515 | * buffer and relax the spinning requirement. The downside is | ||
| 516 | * we need to kmalloc a GFP_ATOMIC buffer each time the | ||
| 517 | * console driver writes something out. | ||
| 465 | */ | 518 | */ |
| 466 | while (!virtqueue_get_buf(out_vq, &len)) | 519 | while (!virtqueue_get_buf(out_vq, &len)) |
| 467 | cpu_relax(); | 520 | cpu_relax(); |
| @@ -522,6 +575,10 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, | |||
| 522 | /* The condition that must be true for polling to end */ | 575 | /* The condition that must be true for polling to end */ |
| 523 | static bool will_read_block(struct port *port) | 576 | static bool will_read_block(struct port *port) |
| 524 | { | 577 | { |
| 578 | if (!port->guest_connected) { | ||
| 579 | /* Port got hot-unplugged. Let's exit. */ | ||
| 580 | return false; | ||
| 581 | } | ||
| 525 | return !port_has_data(port) && port->host_connected; | 582 | return !port_has_data(port) && port->host_connected; |
| 526 | } | 583 | } |
| 527 | 584 | ||
| @@ -572,6 +629,9 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
| 572 | if (ret < 0) | 629 | if (ret < 0) |
| 573 | return ret; | 630 | return ret; |
| 574 | } | 631 | } |
| 632 | /* Port got hot-unplugged. */ | ||
| 633 | if (!port->guest_connected) | ||
| 634 | return -ENODEV; | ||
| 575 | /* | 635 | /* |
| 576 | * We could've received a disconnection message while we were | 636 | * We could've received a disconnection message while we were |
| 577 | * waiting for more data. | 637 | * waiting for more data. |
| @@ -613,6 +673,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
| 613 | if (ret < 0) | 673 | if (ret < 0) |
| 614 | return ret; | 674 | return ret; |
| 615 | } | 675 | } |
| 676 | /* Port got hot-unplugged. */ | ||
| 677 | if (!port->guest_connected) | ||
| 678 | return -ENODEV; | ||
| 616 | 679 | ||
| 617 | count = min((size_t)(32 * 1024), count); | 680 | count = min((size_t)(32 * 1024), count); |
| 618 | 681 | ||
| @@ -626,6 +689,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
| 626 | goto free_buf; | 689 | goto free_buf; |
| 627 | } | 690 | } |
| 628 | 691 | ||
| 692 | /* | ||
| 693 | * We now ask send_buf() to not spin for generic ports -- we | ||
| 694 | * can re-use the same code path that non-blocking file | ||
| 695 | * descriptors take for blocking file descriptors since the | ||
| 696 | * wait is already done and we're certain the write will go | ||
| 697 | * through to the host. | ||
| 698 | */ | ||
| 699 | nonblock = true; | ||
| 629 | ret = send_buf(port, buf, count, nonblock); | 700 | ret = send_buf(port, buf, count, nonblock); |
| 630 | 701 | ||
| 631 | if (nonblock && ret > 0) | 702 | if (nonblock && ret > 0) |
| @@ -645,6 +716,10 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | |||
| 645 | port = filp->private_data; | 716 | port = filp->private_data; |
| 646 | poll_wait(filp, &port->waitqueue, wait); | 717 | poll_wait(filp, &port->waitqueue, wait); |
| 647 | 718 | ||
| 719 | if (!port->guest_connected) { | ||
| 720 | /* Port got unplugged */ | ||
| 721 | return POLLHUP; | ||
| 722 | } | ||
| 648 | ret = 0; | 723 | ret = 0; |
| 649 | if (!will_read_block(port)) | 724 | if (!will_read_block(port)) |
| 650 | ret |= POLLIN | POLLRDNORM; | 725 | ret |= POLLIN | POLLRDNORM; |
| @@ -656,6 +731,8 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | |||
| 656 | return ret; | 731 | return ret; |
| 657 | } | 732 | } |
| 658 | 733 | ||
| 734 | static void remove_port(struct kref *kref); | ||
| 735 | |||
| 659 | static int port_fops_release(struct inode *inode, struct file *filp) | 736 | static int port_fops_release(struct inode *inode, struct file *filp) |
| 660 | { | 737 | { |
| 661 | struct port *port; | 738 | struct port *port; |
| @@ -676,6 +753,16 @@ static int port_fops_release(struct inode *inode, struct file *filp) | |||
| 676 | reclaim_consumed_buffers(port); | 753 | reclaim_consumed_buffers(port); |
| 677 | spin_unlock_irq(&port->outvq_lock); | 754 | spin_unlock_irq(&port->outvq_lock); |
| 678 | 755 | ||
| 756 | /* | ||
| 757 | * Locks aren't necessary here as a port can't be opened after | ||
| 758 | * unplug, and if a port isn't unplugged, a kref would already | ||
| 759 | * exist for the port. Plus, taking ports_lock here would | ||
| 760 | * create a dependency on other locks taken by functions | ||
| 761 | * inside remove_port if we're the last holder of the port, | ||
| 762 | * creating many problems. | ||
| 763 | */ | ||
| 764 | kref_put(&port->kref, remove_port); | ||
| 765 | |||
| 679 | return 0; | 766 | return 0; |
| 680 | } | 767 | } |
| 681 | 768 | ||
| @@ -683,22 +770,31 @@ static int port_fops_open(struct inode *inode, struct file *filp) | |||
| 683 | { | 770 | { |
| 684 | struct cdev *cdev = inode->i_cdev; | 771 | struct cdev *cdev = inode->i_cdev; |
| 685 | struct port *port; | 772 | struct port *port; |
| 773 | int ret; | ||
| 686 | 774 | ||
| 687 | port = container_of(cdev, struct port, cdev); | 775 | port = find_port_by_devt(cdev->dev); |
| 688 | filp->private_data = port; | 776 | filp->private_data = port; |
| 689 | 777 | ||
| 778 | /* Prevent against a port getting hot-unplugged at the same time */ | ||
| 779 | spin_lock_irq(&port->portdev->ports_lock); | ||
| 780 | kref_get(&port->kref); | ||
| 781 | spin_unlock_irq(&port->portdev->ports_lock); | ||
| 782 | |||
| 690 | /* | 783 | /* |
| 691 | * Don't allow opening of console port devices -- that's done | 784 | * Don't allow opening of console port devices -- that's done |
| 692 | * via /dev/hvc | 785 | * via /dev/hvc |
| 693 | */ | 786 | */ |
| 694 | if (is_console_port(port)) | 787 | if (is_console_port(port)) { |
| 695 | return -ENXIO; | 788 | ret = -ENXIO; |
| 789 | goto out; | ||
| 790 | } | ||
| 696 | 791 | ||
| 697 | /* Allow only one process to open a particular port at a time */ | 792 | /* Allow only one process to open a particular port at a time */ |
| 698 | spin_lock_irq(&port->inbuf_lock); | 793 | spin_lock_irq(&port->inbuf_lock); |
| 699 | if (port->guest_connected) { | 794 | if (port->guest_connected) { |
| 700 | spin_unlock_irq(&port->inbuf_lock); | 795 | spin_unlock_irq(&port->inbuf_lock); |
| 701 | return -EMFILE; | 796 | ret = -EMFILE; |
| 797 | goto out; | ||
| 702 | } | 798 | } |
| 703 | 799 | ||
| 704 | port->guest_connected = true; | 800 | port->guest_connected = true; |
| @@ -713,10 +809,23 @@ static int port_fops_open(struct inode *inode, struct file *filp) | |||
| 713 | reclaim_consumed_buffers(port); | 809 | reclaim_consumed_buffers(port); |
| 714 | spin_unlock_irq(&port->outvq_lock); | 810 | spin_unlock_irq(&port->outvq_lock); |
| 715 | 811 | ||
| 812 | nonseekable_open(inode, filp); | ||
| 813 | |||
| 716 | /* Notify host of port being opened */ | 814 | /* Notify host of port being opened */ |
| 717 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); | 815 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); |
| 718 | 816 | ||
| 719 | return 0; | 817 | return 0; |
| 818 | out: | ||
| 819 | kref_put(&port->kref, remove_port); | ||
| 820 | return ret; | ||
| 821 | } | ||
| 822 | |||
| 823 | static int port_fops_fasync(int fd, struct file *filp, int mode) | ||
| 824 | { | ||
| 825 | struct port *port; | ||
| 826 | |||
| 827 | port = filp->private_data; | ||
| 828 | return fasync_helper(fd, filp, mode, &port->async_queue); | ||
| 720 | } | 829 | } |
| 721 | 830 | ||
| 722 | /* | 831 | /* |
| @@ -732,6 +841,8 @@ static const struct file_operations port_fops = { | |||
| 732 | .write = port_fops_write, | 841 | .write = port_fops_write, |
| 733 | .poll = port_fops_poll, | 842 | .poll = port_fops_poll, |
| 734 | .release = port_fops_release, | 843 | .release = port_fops_release, |
| 844 | .fasync = port_fops_fasync, | ||
| 845 | .llseek = no_llseek, | ||
| 735 | }; | 846 | }; |
| 736 | 847 | ||
| 737 | /* | 848 | /* |
| @@ -990,6 +1101,12 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | |||
| 990 | return nr_added_bufs; | 1101 | return nr_added_bufs; |
| 991 | } | 1102 | } |
| 992 | 1103 | ||
| 1104 | static void send_sigio_to_port(struct port *port) | ||
| 1105 | { | ||
| 1106 | if (port->async_queue && port->guest_connected) | ||
| 1107 | kill_fasync(&port->async_queue, SIGIO, POLL_OUT); | ||
| 1108 | } | ||
| 1109 | |||
| 993 | static int add_port(struct ports_device *portdev, u32 id) | 1110 | static int add_port(struct ports_device *portdev, u32 id) |
| 994 | { | 1111 | { |
| 995 | char debugfs_name[16]; | 1112 | char debugfs_name[16]; |
| @@ -1004,6 +1121,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
| 1004 | err = -ENOMEM; | 1121 | err = -ENOMEM; |
| 1005 | goto fail; | 1122 | goto fail; |
| 1006 | } | 1123 | } |
| 1124 | kref_init(&port->kref); | ||
| 1007 | 1125 | ||
| 1008 | port->portdev = portdev; | 1126 | port->portdev = portdev; |
| 1009 | port->id = id; | 1127 | port->id = id; |
| @@ -1011,6 +1129,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
| 1011 | port->name = NULL; | 1129 | port->name = NULL; |
| 1012 | port->inbuf = NULL; | 1130 | port->inbuf = NULL; |
| 1013 | port->cons.hvc = NULL; | 1131 | port->cons.hvc = NULL; |
| 1132 | port->async_queue = NULL; | ||
| 1014 | 1133 | ||
| 1015 | port->cons.ws.ws_row = port->cons.ws.ws_col = 0; | 1134 | port->cons.ws.ws_row = port->cons.ws.ws_col = 0; |
| 1016 | 1135 | ||
| @@ -1021,14 +1140,20 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
| 1021 | port->in_vq = portdev->in_vqs[port->id]; | 1140 | port->in_vq = portdev->in_vqs[port->id]; |
| 1022 | port->out_vq = portdev->out_vqs[port->id]; | 1141 | port->out_vq = portdev->out_vqs[port->id]; |
| 1023 | 1142 | ||
| 1024 | cdev_init(&port->cdev, &port_fops); | 1143 | port->cdev = cdev_alloc(); |
| 1144 | if (!port->cdev) { | ||
| 1145 | dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); | ||
| 1146 | err = -ENOMEM; | ||
| 1147 | goto free_port; | ||
| 1148 | } | ||
| 1149 | port->cdev->ops = &port_fops; | ||
| 1025 | 1150 | ||
| 1026 | devt = MKDEV(portdev->chr_major, id); | 1151 | devt = MKDEV(portdev->chr_major, id); |
| 1027 | err = cdev_add(&port->cdev, devt, 1); | 1152 | err = cdev_add(port->cdev, devt, 1); |
| 1028 | if (err < 0) { | 1153 | if (err < 0) { |
| 1029 | dev_err(&port->portdev->vdev->dev, | 1154 | dev_err(&port->portdev->vdev->dev, |
| 1030 | "Error %d adding cdev for port %u\n", err, id); | 1155 | "Error %d adding cdev for port %u\n", err, id); |
| 1031 | goto free_port; | 1156 | goto free_cdev; |
| 1032 | } | 1157 | } |
| 1033 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | 1158 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, |
| 1034 | devt, port, "vport%up%u", | 1159 | devt, port, "vport%up%u", |
| @@ -1093,7 +1218,7 @@ free_inbufs: | |||
| 1093 | free_device: | 1218 | free_device: |
| 1094 | device_destroy(pdrvdata.class, port->dev->devt); | 1219 | device_destroy(pdrvdata.class, port->dev->devt); |
| 1095 | free_cdev: | 1220 | free_cdev: |
| 1096 | cdev_del(&port->cdev); | 1221 | cdev_del(port->cdev); |
| 1097 | free_port: | 1222 | free_port: |
| 1098 | kfree(port); | 1223 | kfree(port); |
| 1099 | fail: | 1224 | fail: |
| @@ -1102,21 +1227,45 @@ fail: | |||
| 1102 | return err; | 1227 | return err; |
| 1103 | } | 1228 | } |
| 1104 | 1229 | ||
| 1105 | /* Remove all port-specific data. */ | 1230 | /* No users remain, remove all port-specific data. */ |
| 1106 | static int remove_port(struct port *port) | 1231 | static void remove_port(struct kref *kref) |
| 1232 | { | ||
| 1233 | struct port *port; | ||
| 1234 | |||
| 1235 | port = container_of(kref, struct port, kref); | ||
| 1236 | |||
| 1237 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
| 1238 | device_destroy(pdrvdata.class, port->dev->devt); | ||
| 1239 | cdev_del(port->cdev); | ||
| 1240 | |||
| 1241 | kfree(port->name); | ||
| 1242 | |||
| 1243 | debugfs_remove(port->debugfs_file); | ||
| 1244 | |||
| 1245 | kfree(port); | ||
| 1246 | } | ||
| 1247 | |||
| 1248 | /* | ||
| 1249 | * Port got unplugged. Remove port from portdev's list and drop the | ||
| 1250 | * kref reference. If no userspace has this port opened, it will | ||
| 1251 | * result in immediate removal the port. | ||
| 1252 | */ | ||
| 1253 | static void unplug_port(struct port *port) | ||
| 1107 | { | 1254 | { |
| 1108 | struct port_buffer *buf; | 1255 | struct port_buffer *buf; |
| 1109 | 1256 | ||
| 1257 | spin_lock_irq(&port->portdev->ports_lock); | ||
| 1258 | list_del(&port->list); | ||
| 1259 | spin_unlock_irq(&port->portdev->ports_lock); | ||
| 1260 | |||
| 1110 | if (port->guest_connected) { | 1261 | if (port->guest_connected) { |
| 1111 | port->guest_connected = false; | 1262 | port->guest_connected = false; |
| 1112 | port->host_connected = false; | 1263 | port->host_connected = false; |
| 1113 | wake_up_interruptible(&port->waitqueue); | 1264 | wake_up_interruptible(&port->waitqueue); |
| 1114 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | ||
| 1115 | } | ||
| 1116 | 1265 | ||
| 1117 | spin_lock_irq(&port->portdev->ports_lock); | 1266 | /* Let the app know the port is going down. */ |
| 1118 | list_del(&port->list); | 1267 | send_sigio_to_port(port); |
| 1119 | spin_unlock_irq(&port->portdev->ports_lock); | 1268 | } |
| 1120 | 1269 | ||
| 1121 | if (is_console_port(port)) { | 1270 | if (is_console_port(port)) { |
| 1122 | spin_lock_irq(&pdrvdata_lock); | 1271 | spin_lock_irq(&pdrvdata_lock); |
| @@ -1135,9 +1284,6 @@ static int remove_port(struct port *port) | |||
| 1135 | hvc_remove(port->cons.hvc); | 1284 | hvc_remove(port->cons.hvc); |
| 1136 | #endif | 1285 | #endif |
| 1137 | } | 1286 | } |
| 1138 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
| 1139 | device_destroy(pdrvdata.class, port->dev->devt); | ||
| 1140 | cdev_del(&port->cdev); | ||
| 1141 | 1287 | ||
| 1142 | /* Remove unused data this port might have received. */ | 1288 | /* Remove unused data this port might have received. */ |
| 1143 | discard_port_data(port); | 1289 | discard_port_data(port); |
| @@ -1148,12 +1294,19 @@ static int remove_port(struct port *port) | |||
| 1148 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1294 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
| 1149 | free_buf(buf); | 1295 | free_buf(buf); |
| 1150 | 1296 | ||
| 1151 | kfree(port->name); | 1297 | /* |
| 1152 | 1298 | * We should just assume the device itself has gone off -- | |
| 1153 | debugfs_remove(port->debugfs_file); | 1299 | * else a close on an open port later will try to send out a |
| 1300 | * control message. | ||
| 1301 | */ | ||
| 1302 | port->portdev = NULL; | ||
| 1154 | 1303 | ||
| 1155 | kfree(port); | 1304 | /* |
| 1156 | return 0; | 1305 | * Locks around here are not necessary - a port can't be |
| 1306 | * opened after we removed the port struct from ports_list | ||
| 1307 | * above. | ||
| 1308 | */ | ||
| 1309 | kref_put(&port->kref, remove_port); | ||
| 1157 | } | 1310 | } |
| 1158 | 1311 | ||
| 1159 | /* Any private messages that the Host and Guest want to share */ | 1312 | /* Any private messages that the Host and Guest want to share */ |
| @@ -1192,7 +1345,7 @@ static void handle_control_message(struct ports_device *portdev, | |||
| 1192 | add_port(portdev, cpkt->id); | 1345 | add_port(portdev, cpkt->id); |
| 1193 | break; | 1346 | break; |
| 1194 | case VIRTIO_CONSOLE_PORT_REMOVE: | 1347 | case VIRTIO_CONSOLE_PORT_REMOVE: |
| 1195 | remove_port(port); | 1348 | unplug_port(port); |
| 1196 | break; | 1349 | break; |
| 1197 | case VIRTIO_CONSOLE_CONSOLE_PORT: | 1350 | case VIRTIO_CONSOLE_CONSOLE_PORT: |
| 1198 | if (!cpkt->value) | 1351 | if (!cpkt->value) |
| @@ -1234,6 +1387,12 @@ static void handle_control_message(struct ports_device *portdev, | |||
| 1234 | spin_lock_irq(&port->outvq_lock); | 1387 | spin_lock_irq(&port->outvq_lock); |
| 1235 | reclaim_consumed_buffers(port); | 1388 | reclaim_consumed_buffers(port); |
| 1236 | spin_unlock_irq(&port->outvq_lock); | 1389 | spin_unlock_irq(&port->outvq_lock); |
| 1390 | |||
| 1391 | /* | ||
| 1392 | * If the guest is connected, it'll be interested in | ||
| 1393 | * knowing the host connection state changed. | ||
| 1394 | */ | ||
| 1395 | send_sigio_to_port(port); | ||
| 1237 | break; | 1396 | break; |
| 1238 | case VIRTIO_CONSOLE_PORT_NAME: | 1397 | case VIRTIO_CONSOLE_PORT_NAME: |
| 1239 | /* | 1398 | /* |
| @@ -1330,6 +1489,9 @@ static void in_intr(struct virtqueue *vq) | |||
| 1330 | 1489 | ||
| 1331 | wake_up_interruptible(&port->waitqueue); | 1490 | wake_up_interruptible(&port->waitqueue); |
| 1332 | 1491 | ||
| 1492 | /* Send a SIGIO indicating new data in case the process asked for it */ | ||
| 1493 | send_sigio_to_port(port); | ||
| 1494 | |||
| 1333 | if (is_console_port(port) && hvc_poll(port->cons.hvc)) | 1495 | if (is_console_port(port) && hvc_poll(port->cons.hvc)) |
| 1334 | hvc_kick(); | 1496 | hvc_kick(); |
| 1335 | } | 1497 | } |
| @@ -1566,6 +1728,10 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
| 1566 | add_port(portdev, 0); | 1728 | add_port(portdev, 0); |
| 1567 | } | 1729 | } |
| 1568 | 1730 | ||
| 1731 | spin_lock_irq(&pdrvdata_lock); | ||
| 1732 | list_add_tail(&portdev->list, &pdrvdata.portdevs); | ||
| 1733 | spin_unlock_irq(&pdrvdata_lock); | ||
| 1734 | |||
| 1569 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, | 1735 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, |
| 1570 | VIRTIO_CONSOLE_DEVICE_READY, 1); | 1736 | VIRTIO_CONSOLE_DEVICE_READY, 1); |
| 1571 | return 0; | 1737 | return 0; |
| @@ -1589,23 +1755,41 @@ static void virtcons_remove(struct virtio_device *vdev) | |||
| 1589 | { | 1755 | { |
| 1590 | struct ports_device *portdev; | 1756 | struct ports_device *portdev; |
| 1591 | struct port *port, *port2; | 1757 | struct port *port, *port2; |
| 1592 | struct port_buffer *buf; | ||
| 1593 | unsigned int len; | ||
| 1594 | 1758 | ||
| 1595 | portdev = vdev->priv; | 1759 | portdev = vdev->priv; |
| 1596 | 1760 | ||
| 1761 | spin_lock_irq(&pdrvdata_lock); | ||
| 1762 | list_del(&portdev->list); | ||
| 1763 | spin_unlock_irq(&pdrvdata_lock); | ||
| 1764 | |||
| 1765 | /* Disable interrupts for vqs */ | ||
| 1766 | vdev->config->reset(vdev); | ||
| 1767 | /* Finish up work that's lined up */ | ||
| 1597 | cancel_work_sync(&portdev->control_work); | 1768 | cancel_work_sync(&portdev->control_work); |
| 1598 | 1769 | ||
| 1599 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 1770 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
| 1600 | remove_port(port); | 1771 | unplug_port(port); |
| 1601 | 1772 | ||
| 1602 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | 1773 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); |
| 1603 | 1774 | ||
| 1604 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) | 1775 | /* |
| 1605 | free_buf(buf); | 1776 | * When yanking out a device, we immediately lose the |
| 1777 | * (device-side) queues. So there's no point in keeping the | ||
| 1778 | * guest side around till we drop our final reference. This | ||
| 1779 | * also means that any ports which are in an open state will | ||
| 1780 | * have to just stop using the port, as the vqs are going | ||
| 1781 | * away. | ||
| 1782 | */ | ||
| 1783 | if (use_multiport(portdev)) { | ||
| 1784 | struct port_buffer *buf; | ||
| 1785 | unsigned int len; | ||
| 1606 | 1786 | ||
| 1607 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | 1787 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) |
| 1608 | free_buf(buf); | 1788 | free_buf(buf); |
| 1789 | |||
| 1790 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | ||
| 1791 | free_buf(buf); | ||
| 1792 | } | ||
| 1609 | 1793 | ||
| 1610 | vdev->config->del_vqs(vdev); | 1794 | vdev->config->del_vqs(vdev); |
| 1611 | kfree(portdev->in_vqs); | 1795 | kfree(portdev->in_vqs); |
| @@ -1652,6 +1836,7 @@ static int __init init(void) | |||
| 1652 | PTR_ERR(pdrvdata.debugfs_dir)); | 1836 | PTR_ERR(pdrvdata.debugfs_dir)); |
| 1653 | } | 1837 | } |
| 1654 | INIT_LIST_HEAD(&pdrvdata.consoles); | 1838 | INIT_LIST_HEAD(&pdrvdata.consoles); |
| 1839 | INIT_LIST_HEAD(&pdrvdata.portdevs); | ||
| 1655 | 1840 | ||
| 1656 | return register_virtio_driver(&virtio_console); | 1841 | return register_virtio_driver(&virtio_console); |
| 1657 | } | 1842 | } |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index c2408bbe9c2e..f508690eb958 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -80,7 +80,7 @@ | |||
| 80 | * Limiting Performance Impact | 80 | * Limiting Performance Impact |
| 81 | * --------------------------- | 81 | * --------------------------- |
| 82 | * C states, especially those with large exit latencies, can have a real | 82 | * C states, especially those with large exit latencies, can have a real |
| 83 | * noticable impact on workloads, which is not acceptable for most sysadmins, | 83 | * noticeable impact on workloads, which is not acceptable for most sysadmins, |
| 84 | * and in addition, less performance has a power price of its own. | 84 | * and in addition, less performance has a power price of its own. |
| 85 | * | 85 | * |
| 86 | * As a general rule of thumb, menu assumes that the following heuristic | 86 | * As a general rule of thumb, menu assumes that the following heuristic |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 216f9d383b5b..effd140fc042 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | |||
| 879 | dma->device_issue_pending = ioat2_issue_pending; | 879 | dma->device_issue_pending = ioat2_issue_pending; |
| 880 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 880 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
| 881 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 881 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
| 882 | dma->device_tx_status = ioat_tx_status; | 882 | dma->device_tx_status = ioat_dma_tx_status; |
| 883 | 883 | ||
| 884 | err = ioat_probe(device); | 884 | err = ioat_probe(device); |
| 885 | if (err) | 885 | if (err) |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index fb64cf36ba61..eb6b54dbb806 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
| @@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | |||
| 580 | 580 | ||
| 581 | sh_chan = to_sh_chan(chan); | 581 | sh_chan = to_sh_chan(chan); |
| 582 | param = chan->private; | 582 | param = chan->private; |
| 583 | slave_addr = param->config->addr; | ||
| 584 | 583 | ||
| 585 | /* Someone calling slave DMA on a public channel? */ | 584 | /* Someone calling slave DMA on a public channel? */ |
| 586 | if (!param || !sg_len) { | 585 | if (!param || !sg_len) { |
| @@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | |||
| 589 | return NULL; | 588 | return NULL; |
| 590 | } | 589 | } |
| 591 | 590 | ||
| 591 | slave_addr = param->config->addr; | ||
| 592 | |||
| 592 | /* | 593 | /* |
| 593 | * if (param != NULL), this is a successfully requested slave channel, | 594 | * if (param != NULL), this is a successfully requested slave channel, |
| 594 | * therefore param->config != NULL too. | 595 | * therefore param->config != NULL too. |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e0187d16dd7c..0fd5b85a0f75 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
| @@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { | |||
| 1140 | ATTR_COUNTER(0), | 1140 | ATTR_COUNTER(0), |
| 1141 | ATTR_COUNTER(1), | 1141 | ATTR_COUNTER(1), |
| 1142 | ATTR_COUNTER(2), | 1142 | ATTR_COUNTER(2), |
| 1143 | { .attr = { .name = NULL } } | ||
| 1143 | }; | 1144 | }; |
| 1144 | 1145 | ||
| 1145 | static struct mcidev_sysfs_group i7core_udimm_counters = { | 1146 | static struct mcidev_sysfs_group i7core_udimm_counters = { |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 1b05896648bc..9dcb17d51aee 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
| @@ -2840,7 +2840,7 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
| 2840 | const struct pci_device_id *ent) | 2840 | const struct pci_device_id *ent) |
| 2841 | { | 2841 | { |
| 2842 | struct fw_ohci *ohci; | 2842 | struct fw_ohci *ohci; |
| 2843 | u32 bus_options, max_receive, link_speed, version, link_enh; | 2843 | u32 bus_options, max_receive, link_speed, version; |
| 2844 | u64 guid; | 2844 | u64 guid; |
| 2845 | int i, err, n_ir, n_it; | 2845 | int i, err, n_ir, n_it; |
| 2846 | size_t size; | 2846 | size_t size; |
| @@ -2894,23 +2894,6 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
| 2894 | if (param_quirks) | 2894 | if (param_quirks) |
| 2895 | ohci->quirks = param_quirks; | 2895 | ohci->quirks = param_quirks; |
| 2896 | 2896 | ||
| 2897 | /* TI OHCI-Lynx and compatible: set recommended configuration bits. */ | ||
| 2898 | if (dev->vendor == PCI_VENDOR_ID_TI) { | ||
| 2899 | pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh); | ||
| 2900 | |||
| 2901 | /* adjust latency of ATx FIFO: use 1.7 KB threshold */ | ||
| 2902 | link_enh &= ~TI_LinkEnh_atx_thresh_mask; | ||
| 2903 | link_enh |= TI_LinkEnh_atx_thresh_1_7K; | ||
| 2904 | |||
| 2905 | /* use priority arbitration for asynchronous responses */ | ||
| 2906 | link_enh |= TI_LinkEnh_enab_unfair; | ||
| 2907 | |||
| 2908 | /* required for aPhyEnhanceEnable to work */ | ||
| 2909 | link_enh |= TI_LinkEnh_enab_accel; | ||
| 2910 | |||
| 2911 | pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh); | ||
| 2912 | } | ||
| 2913 | |||
| 2914 | ar_context_init(&ohci->ar_request_ctx, ohci, | 2897 | ar_context_init(&ohci->ar_request_ctx, ohci, |
| 2915 | OHCI1394_AsReqRcvContextControlSet); | 2898 | OHCI1394_AsReqRcvContextControlSet); |
| 2916 | 2899 | ||
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index 0e6c5a466908..ef5e7336da68 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h | |||
| @@ -155,12 +155,4 @@ | |||
| 155 | 155 | ||
| 156 | #define OHCI1394_phy_tcode 0xe | 156 | #define OHCI1394_phy_tcode 0xe |
| 157 | 157 | ||
| 158 | /* TI extensions */ | ||
| 159 | |||
| 160 | #define PCI_CFG_TI_LinkEnh 0xf4 | ||
| 161 | #define TI_LinkEnh_enab_accel 0x00000002 | ||
| 162 | #define TI_LinkEnh_enab_unfair 0x00000080 | ||
| 163 | #define TI_LinkEnh_atx_thresh_mask 0x00003000 | ||
| 164 | #define TI_LinkEnh_atx_thresh_1_7K 0x00001000 | ||
| 165 | |||
| 166 | #endif /* _FIREWIRE_OHCI_H */ | 158 | #endif /* _FIREWIRE_OHCI_H */ |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf92d07510df..5663d2719063 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
| @@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev, | |||
| 148 | return -ENOMEM; | 148 | return -ENOMEM; |
| 149 | 149 | ||
| 150 | kref_init(&obj->refcount); | 150 | kref_init(&obj->refcount); |
| 151 | kref_init(&obj->handlecount); | 151 | atomic_set(&obj->handle_count, 0); |
| 152 | obj->size = size; | 152 | obj->size = size; |
| 153 | 153 | ||
| 154 | atomic_inc(&dev->object_count); | 154 | atomic_inc(&dev->object_count); |
| @@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref) | |||
| 462 | } | 462 | } |
| 463 | EXPORT_SYMBOL(drm_gem_object_free); | 463 | EXPORT_SYMBOL(drm_gem_object_free); |
| 464 | 464 | ||
| 465 | /** | ||
| 466 | * Called after the last reference to the object has been lost. | ||
| 467 | * Must be called without holding struct_mutex | ||
| 468 | * | ||
| 469 | * Frees the object | ||
| 470 | */ | ||
| 471 | void | ||
| 472 | drm_gem_object_free_unlocked(struct kref *kref) | ||
| 473 | { | ||
| 474 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
| 475 | struct drm_device *dev = obj->dev; | ||
| 476 | |||
| 477 | if (dev->driver->gem_free_object_unlocked != NULL) | ||
| 478 | dev->driver->gem_free_object_unlocked(obj); | ||
| 479 | else if (dev->driver->gem_free_object != NULL) { | ||
| 480 | mutex_lock(&dev->struct_mutex); | ||
| 481 | dev->driver->gem_free_object(obj); | ||
| 482 | mutex_unlock(&dev->struct_mutex); | ||
| 483 | } | ||
| 484 | } | ||
| 485 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
| 486 | |||
| 487 | static void drm_gem_object_ref_bug(struct kref *list_kref) | 465 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
| 488 | { | 466 | { |
| 489 | BUG(); | 467 | BUG(); |
| @@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) | |||
| 496 | * called before drm_gem_object_free or we'll be touching | 474 | * called before drm_gem_object_free or we'll be touching |
| 497 | * freed memory | 475 | * freed memory |
| 498 | */ | 476 | */ |
| 499 | void | 477 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
| 500 | drm_gem_object_handle_free(struct kref *kref) | ||
| 501 | { | 478 | { |
| 502 | struct drm_gem_object *obj = container_of(kref, | ||
| 503 | struct drm_gem_object, | ||
| 504 | handlecount); | ||
| 505 | struct drm_device *dev = obj->dev; | 479 | struct drm_device *dev = obj->dev; |
| 506 | 480 | ||
| 507 | /* Remove any name for this object */ | 481 | /* Remove any name for this object */ |
| @@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) | |||
| 528 | struct drm_gem_object *obj = vma->vm_private_data; | 502 | struct drm_gem_object *obj = vma->vm_private_data; |
| 529 | 503 | ||
| 530 | drm_gem_object_reference(obj); | 504 | drm_gem_object_reference(obj); |
| 505 | |||
| 506 | mutex_lock(&obj->dev->struct_mutex); | ||
| 507 | drm_vm_open_locked(vma); | ||
| 508 | mutex_unlock(&obj->dev->struct_mutex); | ||
| 531 | } | 509 | } |
| 532 | EXPORT_SYMBOL(drm_gem_vm_open); | 510 | EXPORT_SYMBOL(drm_gem_vm_open); |
| 533 | 511 | ||
| @@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) | |||
| 535 | { | 513 | { |
| 536 | struct drm_gem_object *obj = vma->vm_private_data; | 514 | struct drm_gem_object *obj = vma->vm_private_data; |
| 537 | 515 | ||
| 538 | drm_gem_object_unreference_unlocked(obj); | 516 | mutex_lock(&obj->dev->struct_mutex); |
| 517 | drm_vm_close_locked(vma); | ||
| 518 | drm_gem_object_unreference(obj); | ||
| 519 | mutex_unlock(&obj->dev->struct_mutex); | ||
| 539 | } | 520 | } |
| 540 | EXPORT_SYMBOL(drm_gem_vm_close); | 521 | EXPORT_SYMBOL(drm_gem_vm_close); |
| 541 | 522 | ||
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 2ef2c7827243..974e970ce3f8 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
| @@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) | |||
| 255 | 255 | ||
| 256 | seq_printf(m, "%6d %8zd %7d %8d\n", | 256 | seq_printf(m, "%6d %8zd %7d %8d\n", |
| 257 | obj->name, obj->size, | 257 | obj->name, obj->size, |
| 258 | atomic_read(&obj->handlecount.refcount), | 258 | atomic_read(&obj->handle_count), |
| 259 | atomic_read(&obj->refcount.refcount)); | 259 | atomic_read(&obj->refcount.refcount)); |
| 260 | return 0; | 260 | return 0; |
| 261 | } | 261 | } |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index fda67468e603..5df450683aab 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
| @@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
| 433 | mutex_unlock(&dev->struct_mutex); | 433 | mutex_unlock(&dev->struct_mutex); |
| 434 | } | 434 | } |
| 435 | 435 | ||
| 436 | /** | 436 | void drm_vm_close_locked(struct vm_area_struct *vma) |
| 437 | * \c close method for all virtual memory types. | ||
| 438 | * | ||
| 439 | * \param vma virtual memory area. | ||
| 440 | * | ||
| 441 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
| 442 | * free it. | ||
| 443 | */ | ||
| 444 | static void drm_vm_close(struct vm_area_struct *vma) | ||
| 445 | { | 437 | { |
| 446 | struct drm_file *priv = vma->vm_file->private_data; | 438 | struct drm_file *priv = vma->vm_file->private_data; |
| 447 | struct drm_device *dev = priv->minor->dev; | 439 | struct drm_device *dev = priv->minor->dev; |
| @@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
| 451 | vma->vm_start, vma->vm_end - vma->vm_start); | 443 | vma->vm_start, vma->vm_end - vma->vm_start); |
| 452 | atomic_dec(&dev->vma_count); | 444 | atomic_dec(&dev->vma_count); |
| 453 | 445 | ||
| 454 | mutex_lock(&dev->struct_mutex); | ||
| 455 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | 446 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
| 456 | if (pt->vma == vma) { | 447 | if (pt->vma == vma) { |
| 457 | list_del(&pt->head); | 448 | list_del(&pt->head); |
| @@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
| 459 | break; | 450 | break; |
| 460 | } | 451 | } |
| 461 | } | 452 | } |
| 453 | } | ||
| 454 | |||
| 455 | /** | ||
| 456 | * \c close method for all virtual memory types. | ||
| 457 | * | ||
| 458 | * \param vma virtual memory area. | ||
| 459 | * | ||
| 460 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
| 461 | * free it. | ||
| 462 | */ | ||
| 463 | static void drm_vm_close(struct vm_area_struct *vma) | ||
| 464 | { | ||
| 465 | struct drm_file *priv = vma->vm_file->private_data; | ||
| 466 | struct drm_device *dev = priv->minor->dev; | ||
| 467 | |||
| 468 | mutex_lock(&dev->struct_mutex); | ||
| 469 | drm_vm_close_locked(vma); | ||
| 462 | mutex_unlock(&dev->struct_mutex); | 470 | mutex_unlock(&dev->struct_mutex); |
| 463 | } | 471 | } |
| 464 | 472 | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220fa..fb07e73581e8 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
| @@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
| 116 | static const struct file_operations i810_buffer_fops = { | 116 | static const struct file_operations i810_buffer_fops = { |
| 117 | .open = drm_open, | 117 | .open = drm_open, |
| 118 | .release = drm_release, | 118 | .release = drm_release, |
| 119 | .unlocked_ioctl = drm_ioctl, | 119 | .unlocked_ioctl = i810_ioctl, |
| 120 | .mmap = i810_mmap_buffers, | 120 | .mmap = i810_mmap_buffers, |
| 121 | .fasync = drm_fasync, | 121 | .fasync = drm_fasync, |
| 122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415ac..cc92c7e6236f 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
| @@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
| 118 | static const struct file_operations i830_buffer_fops = { | 118 | static const struct file_operations i830_buffer_fops = { |
| 119 | .open = drm_open, | 119 | .open = drm_open, |
| 120 | .release = drm_release, | 120 | .release = drm_release, |
| 121 | .unlocked_ioctl = drm_ioctl, | 121 | .unlocked_ioctl = i830_ioctl, |
| 122 | .mmap = i830_mmap_buffers, | 122 | .mmap = i830_mmap_buffers, |
| 123 | .fasync = drm_fasync, | 123 | .fasync = drm_fasync, |
| 124 | }; | 124 | }; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9d67b4853030..2dd2c93ebfa3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
| 1787 | } | 1787 | } |
| 1788 | } | 1788 | } |
| 1789 | 1789 | ||
| 1790 | div_u64(diff, diff1); | 1790 | diff = div_u64(diff, diff1); |
| 1791 | ret = ((m * diff) + c); | 1791 | ret = ((m * diff) + c); |
| 1792 | div_u64(ret, 10); | 1792 | ret = div_u64(ret, 10); |
| 1793 | 1793 | ||
| 1794 | dev_priv->last_count1 = total_count; | 1794 | dev_priv->last_count1 = total_count; |
| 1795 | dev_priv->last_time1 = now; | 1795 | dev_priv->last_time1 = now; |
| @@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |||
| 1858 | 1858 | ||
| 1859 | /* More magic constants... */ | 1859 | /* More magic constants... */ |
| 1860 | diff = diff * 1181; | 1860 | diff = diff * 1181; |
| 1861 | div_u64(diff, diffms * 10); | 1861 | diff = div_u64(diff, diffms * 10); |
| 1862 | dev_priv->gfx_power = diff; | 1862 | dev_priv->gfx_power = diff; |
| 1863 | } | 1863 | } |
| 1864 | 1864 | ||
| @@ -2231,6 +2231,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 2231 | dev_priv->mchdev_lock = &mchdev_lock; | 2231 | dev_priv->mchdev_lock = &mchdev_lock; |
| 2232 | spin_unlock(&mchdev_lock); | 2232 | spin_unlock(&mchdev_lock); |
| 2233 | 2233 | ||
| 2234 | /* XXX Prevent module unload due to memory corruption bugs. */ | ||
| 2235 | __module_get(THIS_MODULE); | ||
| 2236 | |||
| 2234 | return 0; | 2237 | return 0; |
| 2235 | 2238 | ||
| 2236 | out_workqueue_free: | 2239 | out_workqueue_free: |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bced9b25c71e..90b1d6753b9d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
| 136 | return -ENOMEM; | 136 | return -ENOMEM; |
| 137 | 137 | ||
| 138 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 138 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
| 139 | /* drop reference from allocate - handle holds it now */ | ||
| 140 | drm_gem_object_unreference_unlocked(obj); | ||
| 139 | if (ret) { | 141 | if (ret) { |
| 140 | drm_gem_object_unreference_unlocked(obj); | ||
| 141 | return ret; | 142 | return ret; |
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | /* Sink the floating reference from kref_init(handlecount) */ | ||
| 145 | drm_gem_object_handle_unreference_unlocked(obj); | ||
| 146 | |||
| 147 | args->handle = handle; | 145 | args->handle = handle; |
| 148 | return 0; | 146 | return 0; |
| 149 | } | 147 | } |
| @@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
| 471 | return -ENOENT; | 469 | return -ENOENT; |
| 472 | obj_priv = to_intel_bo(obj); | 470 | obj_priv = to_intel_bo(obj); |
| 473 | 471 | ||
| 474 | /* Bounds check source. | 472 | /* Bounds check source. */ |
| 475 | * | 473 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
| 476 | * XXX: This could use review for overflow issues... | 474 | ret = -EINVAL; |
| 477 | */ | 475 | goto err; |
| 478 | if (args->offset > obj->size || args->size > obj->size || | 476 | } |
| 479 | args->offset + args->size > obj->size) { | 477 | |
| 480 | drm_gem_object_unreference_unlocked(obj); | 478 | if (!access_ok(VERIFY_WRITE, |
| 481 | return -EINVAL; | 479 | (char __user *)(uintptr_t)args->data_ptr, |
| 480 | args->size)) { | ||
| 481 | ret = -EFAULT; | ||
| 482 | goto err; | ||
| 482 | } | 483 | } |
| 483 | 484 | ||
| 484 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 485 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
| @@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
| 490 | file_priv); | 491 | file_priv); |
| 491 | } | 492 | } |
| 492 | 493 | ||
| 494 | err: | ||
| 493 | drm_gem_object_unreference_unlocked(obj); | 495 | drm_gem_object_unreference_unlocked(obj); |
| 494 | |||
| 495 | return ret; | 496 | return ret; |
| 496 | } | 497 | } |
| 497 | 498 | ||
| @@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 580 | 581 | ||
| 581 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 582 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
| 582 | remain = args->size; | 583 | remain = args->size; |
| 583 | if (!access_ok(VERIFY_READ, user_data, remain)) | ||
| 584 | return -EFAULT; | ||
| 585 | 584 | ||
| 586 | 585 | ||
| 587 | mutex_lock(&dev->struct_mutex); | 586 | mutex_lock(&dev->struct_mutex); |
| @@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 934 | return -ENOENT; | 933 | return -ENOENT; |
| 935 | obj_priv = to_intel_bo(obj); | 934 | obj_priv = to_intel_bo(obj); |
| 936 | 935 | ||
| 937 | /* Bounds check destination. | 936 | /* Bounds check destination. */ |
| 938 | * | 937 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
| 939 | * XXX: This could use review for overflow issues... | 938 | ret = -EINVAL; |
| 940 | */ | 939 | goto err; |
| 941 | if (args->offset > obj->size || args->size > obj->size || | 940 | } |
| 942 | args->offset + args->size > obj->size) { | 941 | |
| 943 | drm_gem_object_unreference_unlocked(obj); | 942 | if (!access_ok(VERIFY_READ, |
| 944 | return -EINVAL; | 943 | (char __user *)(uintptr_t)args->data_ptr, |
| 944 | args->size)) { | ||
| 945 | ret = -EFAULT; | ||
| 946 | goto err; | ||
| 945 | } | 947 | } |
| 946 | 948 | ||
| 947 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 949 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
| @@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 975 | DRM_INFO("pwrite failed %d\n", ret); | 977 | DRM_INFO("pwrite failed %d\n", ret); |
| 976 | #endif | 978 | #endif |
| 977 | 979 | ||
| 980 | err: | ||
| 978 | drm_gem_object_unreference_unlocked(obj); | 981 | drm_gem_object_unreference_unlocked(obj); |
| 979 | |||
| 980 | return ret; | 982 | return ret; |
| 981 | } | 983 | } |
| 982 | 984 | ||
| @@ -3258,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3258 | (int) reloc->offset, | 3260 | (int) reloc->offset, |
| 3259 | reloc->read_domains, | 3261 | reloc->read_domains, |
| 3260 | reloc->write_domain); | 3262 | reloc->write_domain); |
| 3263 | drm_gem_object_unreference(target_obj); | ||
| 3264 | i915_gem_object_unpin(obj); | ||
| 3261 | return -EINVAL; | 3265 | return -EINVAL; |
| 3262 | } | 3266 | } |
| 3263 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3267 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index e85246ef691c..5c428fa3e0b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
| @@ -93,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
| 93 | { | 93 | { |
| 94 | drm_i915_private_t *dev_priv = dev->dev_private; | 94 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 95 | struct list_head eviction_list, unwind_list; | 95 | struct list_head eviction_list, unwind_list; |
| 96 | struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; | 96 | struct drm_i915_gem_object *obj_priv; |
| 97 | struct list_head *render_iter, *bsd_iter; | 97 | struct list_head *render_iter, *bsd_iter; |
| 98 | int ret = 0; | 98 | int ret = 0; |
| 99 | 99 | ||
| @@ -175,39 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
| 175 | return -ENOSPC; | 175 | return -ENOSPC; |
| 176 | 176 | ||
| 177 | found: | 177 | found: |
| 178 | /* drm_mm doesn't allow any other other operations while | ||
| 179 | * scanning, therefore store to be evicted objects on a | ||
| 180 | * temporary list. */ | ||
| 178 | INIT_LIST_HEAD(&eviction_list); | 181 | INIT_LIST_HEAD(&eviction_list); |
| 179 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 182 | while (!list_empty(&unwind_list)) { |
| 180 | &unwind_list, evict_list) { | 183 | obj_priv = list_first_entry(&unwind_list, |
| 184 | struct drm_i915_gem_object, | ||
| 185 | evict_list); | ||
| 181 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 186 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { |
| 182 | /* drm_mm doesn't allow any other other operations while | ||
| 183 | * scanning, therefore store to be evicted objects on a | ||
| 184 | * temporary list. */ | ||
| 185 | list_move(&obj_priv->evict_list, &eviction_list); | 187 | list_move(&obj_priv->evict_list, &eviction_list); |
| 186 | } else | 188 | continue; |
| 187 | drm_gem_object_unreference(&obj_priv->base); | 189 | } |
| 190 | list_del(&obj_priv->evict_list); | ||
| 191 | drm_gem_object_unreference(&obj_priv->base); | ||
| 188 | } | 192 | } |
| 189 | 193 | ||
| 190 | /* Unbinding will emit any required flushes */ | 194 | /* Unbinding will emit any required flushes */ |
| 191 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | 195 | while (!list_empty(&eviction_list)) { |
| 192 | &eviction_list, evict_list) { | 196 | obj_priv = list_first_entry(&eviction_list, |
| 193 | #if WATCH_LRU | 197 | struct drm_i915_gem_object, |
| 194 | DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); | 198 | evict_list); |
| 195 | #endif | 199 | if (ret == 0) |
| 196 | ret = i915_gem_object_unbind(&obj_priv->base); | 200 | ret = i915_gem_object_unbind(&obj_priv->base); |
| 197 | if (ret) | 201 | list_del(&obj_priv->evict_list); |
| 198 | return ret; | ||
| 199 | |||
| 200 | drm_gem_object_unreference(&obj_priv->base); | 202 | drm_gem_object_unreference(&obj_priv->base); |
| 201 | } | 203 | } |
| 202 | 204 | ||
| 203 | /* The just created free hole should be on the top of the free stack | 205 | return ret; |
| 204 | * maintained by drm_mm, so this BUG_ON actually executes in O(1). | ||
| 205 | * Furthermore all accessed data has just recently been used, so it | ||
| 206 | * should be really fast, too. */ | ||
| 207 | BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, | ||
| 208 | alignment, 0)); | ||
| 209 | |||
| 210 | return 0; | ||
| 211 | } | 206 | } |
| 212 | 207 | ||
| 213 | int | 208 | int |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b5bf51a4502d..979228594599 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
| 1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
| 1014 | } | 1014 | } |
| 1015 | 1015 | ||
| 1016 | /** | 1016 | /* |
| 1017 | * intel_wait_for_vblank_off - wait for vblank after disabling a pipe | 1017 | * intel_wait_for_pipe_off - wait for pipe to turn off |
| 1018 | * @dev: drm device | 1018 | * @dev: drm device |
| 1019 | * @pipe: pipe to wait for | 1019 | * @pipe: pipe to wait for |
| 1020 | * | 1020 | * |
| @@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
| 1022 | * spinning on the vblank interrupt status bit, since we won't actually | 1022 | * spinning on the vblank interrupt status bit, since we won't actually |
| 1023 | * see an interrupt when the pipe is disabled. | 1023 | * see an interrupt when the pipe is disabled. |
| 1024 | * | 1024 | * |
| 1025 | * So this function waits for the display line value to settle (it | 1025 | * On Gen4 and above: |
| 1026 | * usually ends up stopping at the start of the next frame). | 1026 | * wait for the pipe register state bit to turn off |
| 1027 | * | ||
| 1028 | * Otherwise: | ||
| 1029 | * wait for the display line value to settle (it usually | ||
| 1030 | * ends up stopping at the start of the next frame). | ||
| 1031 | * | ||
| 1027 | */ | 1032 | */ |
| 1028 | void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) | 1033 | static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
| 1029 | { | 1034 | { |
| 1030 | struct drm_i915_private *dev_priv = dev->dev_private; | 1035 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1031 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | 1036 | |
| 1032 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | 1037 | if (INTEL_INFO(dev)->gen >= 4) { |
| 1033 | u32 last_line; | 1038 | int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); |
| 1034 | 1039 | ||
| 1035 | /* Wait for the display line to settle */ | 1040 | /* Wait for the Pipe State to go off */ |
| 1036 | do { | 1041 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, |
| 1037 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | 1042 | 100, 0)) |
| 1038 | mdelay(5); | 1043 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
| 1039 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | 1044 | } else { |
| 1040 | time_after(timeout, jiffies)); | 1045 | u32 last_line; |
| 1041 | 1046 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | |
| 1042 | if (time_after(jiffies, timeout)) | 1047 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
| 1043 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 1048 | |
| 1049 | /* Wait for the display line to settle */ | ||
| 1050 | do { | ||
| 1051 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | ||
| 1052 | mdelay(5); | ||
| 1053 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | ||
| 1054 | time_after(timeout, jiffies)); | ||
| 1055 | if (time_after(jiffies, timeout)) | ||
| 1056 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | ||
| 1057 | } | ||
| 1044 | } | 1058 | } |
| 1045 | 1059 | ||
| 1046 | /* Parameters have changed, update FBC info */ | 1060 | /* Parameters have changed, update FBC info */ |
| @@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 2328 | I915_READ(dspbase_reg); | 2342 | I915_READ(dspbase_reg); |
| 2329 | } | 2343 | } |
| 2330 | 2344 | ||
| 2331 | /* Wait for vblank for the disable to take effect */ | ||
| 2332 | intel_wait_for_vblank_off(dev, pipe); | ||
| 2333 | |||
| 2334 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2345 | /* Don't disable pipe A or pipe A PLLs if needed */ |
| 2335 | if (pipeconf_reg == PIPEACONF && | 2346 | if (pipeconf_reg == PIPEACONF && |
| 2336 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 2347 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { |
| 2348 | /* Wait for vblank for the disable to take effect */ | ||
| 2349 | intel_wait_for_vblank(dev, pipe); | ||
| 2337 | goto skip_pipe_off; | 2350 | goto skip_pipe_off; |
| 2351 | } | ||
| 2338 | 2352 | ||
| 2339 | /* Next, disable display pipes */ | 2353 | /* Next, disable display pipes */ |
| 2340 | temp = I915_READ(pipeconf_reg); | 2354 | temp = I915_READ(pipeconf_reg); |
| @@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 2343 | I915_READ(pipeconf_reg); | 2357 | I915_READ(pipeconf_reg); |
| 2344 | } | 2358 | } |
| 2345 | 2359 | ||
| 2346 | /* Wait for vblank for the disable to take effect. */ | 2360 | /* Wait for the pipe to turn off */ |
| 2347 | intel_wait_for_vblank_off(dev, pipe); | 2361 | intel_wait_for_pipe_off(dev, pipe); |
| 2348 | 2362 | ||
| 2349 | temp = I915_READ(dpll_reg); | 2363 | temp = I915_READ(dpll_reg); |
| 2350 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2364 | if ((temp & DPLL_VCO_ENABLE) != 0) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1a51ee07de3e..9ab8708ac6ba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1138,18 +1138,14 @@ static bool | |||
| 1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
| 1139 | uint32_t dp_reg_value, | 1139 | uint32_t dp_reg_value, |
| 1140 | uint8_t dp_train_pat, | 1140 | uint8_t dp_train_pat, |
| 1141 | uint8_t train_set[4], | 1141 | uint8_t train_set[4]) |
| 1142 | bool first) | ||
| 1143 | { | 1142 | { |
| 1144 | struct drm_device *dev = intel_dp->base.enc.dev; | 1143 | struct drm_device *dev = intel_dp->base.enc.dev; |
| 1145 | struct drm_i915_private *dev_priv = dev->dev_private; | 1144 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1146 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
| 1147 | int ret; | 1145 | int ret; |
| 1148 | 1146 | ||
| 1149 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1147 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
| 1150 | POSTING_READ(intel_dp->output_reg); | 1148 | POSTING_READ(intel_dp->output_reg); |
| 1151 | if (first) | ||
| 1152 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
| 1153 | 1149 | ||
| 1154 | intel_dp_aux_native_write_1(intel_dp, | 1150 | intel_dp_aux_native_write_1(intel_dp, |
| 1155 | DP_TRAINING_PATTERN_SET, | 1151 | DP_TRAINING_PATTERN_SET, |
| @@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
| 1174 | uint8_t voltage; | 1170 | uint8_t voltage; |
| 1175 | bool clock_recovery = false; | 1171 | bool clock_recovery = false; |
| 1176 | bool channel_eq = false; | 1172 | bool channel_eq = false; |
| 1177 | bool first = true; | ||
| 1178 | int tries; | 1173 | int tries; |
| 1179 | u32 reg; | 1174 | u32 reg; |
| 1180 | uint32_t DP = intel_dp->DP; | 1175 | uint32_t DP = intel_dp->DP; |
| 1176 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
| 1177 | |||
| 1178 | /* Enable output, wait for it to become active */ | ||
| 1179 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | ||
| 1180 | POSTING_READ(intel_dp->output_reg); | ||
| 1181 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
| 1181 | 1182 | ||
| 1182 | /* Write the link configuration data */ | 1183 | /* Write the link configuration data */ |
| 1183 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1184 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
| @@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
| 1210 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1211 | reg = DP | DP_LINK_TRAIN_PAT_1; |
| 1211 | 1212 | ||
| 1212 | if (!intel_dp_set_link_train(intel_dp, reg, | 1213 | if (!intel_dp_set_link_train(intel_dp, reg, |
| 1213 | DP_TRAINING_PATTERN_1, train_set, first)) | 1214 | DP_TRAINING_PATTERN_1, train_set)) |
| 1214 | break; | 1215 | break; |
| 1215 | first = false; | ||
| 1216 | /* Set training pattern 1 */ | 1216 | /* Set training pattern 1 */ |
| 1217 | 1217 | ||
| 1218 | udelay(100); | 1218 | udelay(100); |
| @@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
| 1266 | 1266 | ||
| 1267 | /* channel eq pattern */ | 1267 | /* channel eq pattern */ |
| 1268 | if (!intel_dp_set_link_train(intel_dp, reg, | 1268 | if (!intel_dp_set_link_train(intel_dp, reg, |
| 1269 | DP_TRAINING_PATTERN_2, train_set, | 1269 | DP_TRAINING_PATTERN_2, train_set)) |
| 1270 | false)) | ||
| 1271 | break; | 1270 | break; |
| 1272 | 1271 | ||
| 1273 | udelay(400); | 1272 | udelay(400); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ad312ca6b3e5..8828b3ac6414 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 229 | struct drm_crtc *crtc); | 229 | struct drm_crtc *crtc); |
| 230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
| 231 | struct drm_file *file_priv); | 231 | struct drm_file *file_priv); |
| 232 | extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); | ||
| 233 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 232 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
| 234 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 233 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
| 235 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 234 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7bdc96256bf5..b61966c126d3 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
| 237 | drm_fb_helper_fini(&ifbdev->helper); | 237 | drm_fb_helper_fini(&ifbdev->helper); |
| 238 | 238 | ||
| 239 | drm_framebuffer_cleanup(&ifb->base); | 239 | drm_framebuffer_cleanup(&ifb->base); |
| 240 | if (ifb->obj) | 240 | if (ifb->obj) { |
| 241 | drm_gem_object_unreference(ifb->obj); | 241 | drm_gem_object_unreference(ifb->obj); |
| 242 | ifb->obj = NULL; | ||
| 243 | } | ||
| 242 | 244 | ||
| 243 | return 0; | 245 | return 0; |
| 244 | } | 246 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ead7b8fc53fc..19620a6709f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
| 167 | goto out; | 167 | goto out; |
| 168 | 168 | ||
| 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 169 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
| 170 | /* drop reference from allocate - handle holds it now */ | ||
| 171 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
| 170 | out: | 172 | out: |
| 171 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); | ||
| 172 | |||
| 173 | if (ret) | ||
| 174 | drm_gem_object_unreference_unlocked(nvbo->gem); | ||
| 175 | return ret; | 173 | return ret; |
| 176 | } | 174 | } |
| 177 | 175 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 79082d4398ae..2f93d46ae69a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -1137,7 +1137,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1137 | 1137 | ||
| 1138 | WREG32(RCU_IND_INDEX, 0x203); | 1138 | WREG32(RCU_IND_INDEX, 0x203); |
| 1139 | efuse_straps_3 = RREG32(RCU_IND_DATA); | 1139 | efuse_straps_3 = RREG32(RCU_IND_DATA); |
| 1140 | efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; | 1140 | efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); |
| 1141 | 1141 | ||
| 1142 | switch(efuse_box_bit_127_124) { | 1142 | switch(efuse_box_bit_127_124) { |
| 1143 | case 0x0: | 1143 | case 0x0: |
| @@ -1407,6 +1407,7 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
| 1407 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 1407 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; |
| 1408 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 1408 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; |
| 1409 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1409 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 1410 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 1410 | r600_vram_gtt_location(rdev, &rdev->mc); | 1411 | r600_vram_gtt_location(rdev, &rdev->mc); |
| 1411 | radeon_update_bandwidth_info(rdev); | 1412 | radeon_update_bandwidth_info(rdev); |
| 1412 | 1413 | ||
| @@ -1520,7 +1521,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
| 1520 | { | 1521 | { |
| 1521 | u32 tmp; | 1522 | u32 tmp; |
| 1522 | 1523 | ||
| 1523 | WREG32(CP_INT_CNTL, 0); | 1524 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
| 1524 | WREG32(GRBM_INT_CNTL, 0); | 1525 | WREG32(GRBM_INT_CNTL, 0); |
| 1525 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 1526 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
| 1526 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 1527 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e151f16a8f86..e59422320bb6 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -1030,6 +1030,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 1030 | return r; | 1030 | return r; |
| 1031 | } | 1031 | } |
| 1032 | rdev->cp.ready = true; | 1032 | rdev->cp.ready = true; |
| 1033 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | ||
| 1033 | return 0; | 1034 | return 0; |
| 1034 | } | 1035 | } |
| 1035 | 1036 | ||
| @@ -1047,6 +1048,7 @@ void r100_cp_fini(struct radeon_device *rdev) | |||
| 1047 | void r100_cp_disable(struct radeon_device *rdev) | 1048 | void r100_cp_disable(struct radeon_device *rdev) |
| 1048 | { | 1049 | { |
| 1049 | /* Disable ring */ | 1050 | /* Disable ring */ |
| 1051 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 1050 | rdev->cp.ready = false; | 1052 | rdev->cp.ready = false; |
| 1051 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1053 | WREG32(RADEON_CP_CSQ_MODE, 0); |
| 1052 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1054 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
| @@ -2295,6 +2297,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
| 2295 | /* FIXME we don't use the second aperture yet when we could use it */ | 2297 | /* FIXME we don't use the second aperture yet when we could use it */ |
| 2296 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) | 2298 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
| 2297 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2299 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 2300 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 2298 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | 2301 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
| 2299 | if (rdev->flags & RADEON_IS_IGP) { | 2302 | if (rdev->flags & RADEON_IS_IGP) { |
| 2300 | uint32_t tom; | 2303 | uint32_t tom; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ddc3adea1dda..7b65e4efe8af 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -1248,6 +1248,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 1248 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1248 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
| 1249 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1249 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
| 1250 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1250 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 1251 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 1251 | r600_vram_gtt_location(rdev, &rdev->mc); | 1252 | r600_vram_gtt_location(rdev, &rdev->mc); |
| 1252 | 1253 | ||
| 1253 | if (rdev->flags & RADEON_IS_IGP) { | 1254 | if (rdev->flags & RADEON_IS_IGP) { |
| @@ -1917,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
| 1917 | */ | 1918 | */ |
| 1918 | void r600_cp_stop(struct radeon_device *rdev) | 1919 | void r600_cp_stop(struct radeon_device *rdev) |
| 1919 | { | 1920 | { |
| 1921 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 1920 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1922 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
| 1921 | } | 1923 | } |
| 1922 | 1924 | ||
| @@ -2910,7 +2912,7 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) | |||
| 2910 | { | 2912 | { |
| 2911 | u32 tmp; | 2913 | u32 tmp; |
| 2912 | 2914 | ||
| 2913 | WREG32(CP_INT_CNTL, 0); | 2915 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
| 2914 | WREG32(GRBM_INT_CNTL, 0); | 2916 | WREG32(GRBM_INT_CNTL, 0); |
| 2915 | WREG32(DxMODE_INT_MASK, 0); | 2917 | WREG32(DxMODE_INT_MASK, 0); |
| 2916 | if (ASIC_IS_DCE3(rdev)) { | 2918 | if (ASIC_IS_DCE3(rdev)) { |
| @@ -3528,7 +3530,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
| 3528 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3530 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
| 3529 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3531 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
| 3530 | */ | 3532 | */ |
| 3531 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 3533 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
| 3534 | rdev->vram_scratch.ptr) { | ||
| 3532 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | 3535 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
| 3533 | u32 tmp; | 3536 | u32 tmp; |
| 3534 | 3537 | ||
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 9ceb2a1ce799..3473c00781ff 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -532,6 +532,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
| 532 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 532 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); |
| 533 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 533 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
| 534 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 534 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
| 535 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | ||
| 535 | return 0; | 536 | return 0; |
| 536 | } | 537 | } |
| 537 | 538 | ||
| @@ -539,6 +540,7 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
| 539 | { | 540 | { |
| 540 | int r; | 541 | int r; |
| 541 | 542 | ||
| 543 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 542 | if (rdev->r600_blit.shader_obj == NULL) | 544 | if (rdev->r600_blit.shader_obj == NULL) |
| 543 | return; | 545 | return; |
| 544 | /* If we can't reserve the bo, unref should be enough to destroy | 546 | /* If we can't reserve the bo, unref should be enough to destroy |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a168d644bf9e..9ff38c99a6ea 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -344,6 +344,7 @@ struct radeon_mc { | |||
| 344 | * about vram size near mc fb location */ | 344 | * about vram size near mc fb location */ |
| 345 | u64 mc_vram_size; | 345 | u64 mc_vram_size; |
| 346 | u64 visible_vram_size; | 346 | u64 visible_vram_size; |
| 347 | u64 active_vram_size; | ||
| 347 | u64 gtt_size; | 348 | u64 gtt_size; |
| 348 | u64 gtt_start; | 349 | u64 gtt_start; |
| 349 | u64 gtt_end; | 350 | u64 gtt_end; |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ebae14c4b768..8e43ddae70cc 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
| 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 317 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ | ||
| 321 | if ((dev->pdev->device == 0x796e) && | ||
| 322 | (dev->pdev->subsystem_vendor == 0x1462) && | ||
| 323 | (dev->pdev->subsystem_device == 0x7302)) { | ||
| 324 | if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || | ||
| 325 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
| 326 | return false; | ||
| 327 | } | ||
| 328 | |||
| 320 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 329 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
| 321 | if ((dev->pdev->device == 0x7941) && | 330 | if ((dev->pdev->device == 0x7941) && |
| 322 | (dev->pdev->subsystem_vendor == 0x147b) && | 331 | (dev->pdev->subsystem_vendor == 0x147b) && |
| @@ -1549,39 +1558,39 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev) | |||
| 1549 | switch (tv_info->ucTV_BootUpDefaultStandard) { | 1558 | switch (tv_info->ucTV_BootUpDefaultStandard) { |
| 1550 | case ATOM_TV_NTSC: | 1559 | case ATOM_TV_NTSC: |
| 1551 | tv_std = TV_STD_NTSC; | 1560 | tv_std = TV_STD_NTSC; |
| 1552 | DRM_INFO("Default TV standard: NTSC\n"); | 1561 | DRM_DEBUG_KMS("Default TV standard: NTSC\n"); |
| 1553 | break; | 1562 | break; |
| 1554 | case ATOM_TV_NTSCJ: | 1563 | case ATOM_TV_NTSCJ: |
| 1555 | tv_std = TV_STD_NTSC_J; | 1564 | tv_std = TV_STD_NTSC_J; |
| 1556 | DRM_INFO("Default TV standard: NTSC-J\n"); | 1565 | DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); |
| 1557 | break; | 1566 | break; |
| 1558 | case ATOM_TV_PAL: | 1567 | case ATOM_TV_PAL: |
| 1559 | tv_std = TV_STD_PAL; | 1568 | tv_std = TV_STD_PAL; |
| 1560 | DRM_INFO("Default TV standard: PAL\n"); | 1569 | DRM_DEBUG_KMS("Default TV standard: PAL\n"); |
| 1561 | break; | 1570 | break; |
| 1562 | case ATOM_TV_PALM: | 1571 | case ATOM_TV_PALM: |
| 1563 | tv_std = TV_STD_PAL_M; | 1572 | tv_std = TV_STD_PAL_M; |
| 1564 | DRM_INFO("Default TV standard: PAL-M\n"); | 1573 | DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); |
| 1565 | break; | 1574 | break; |
| 1566 | case ATOM_TV_PALN: | 1575 | case ATOM_TV_PALN: |
| 1567 | tv_std = TV_STD_PAL_N; | 1576 | tv_std = TV_STD_PAL_N; |
| 1568 | DRM_INFO("Default TV standard: PAL-N\n"); | 1577 | DRM_DEBUG_KMS("Default TV standard: PAL-N\n"); |
| 1569 | break; | 1578 | break; |
| 1570 | case ATOM_TV_PALCN: | 1579 | case ATOM_TV_PALCN: |
| 1571 | tv_std = TV_STD_PAL_CN; | 1580 | tv_std = TV_STD_PAL_CN; |
| 1572 | DRM_INFO("Default TV standard: PAL-CN\n"); | 1581 | DRM_DEBUG_KMS("Default TV standard: PAL-CN\n"); |
| 1573 | break; | 1582 | break; |
| 1574 | case ATOM_TV_PAL60: | 1583 | case ATOM_TV_PAL60: |
| 1575 | tv_std = TV_STD_PAL_60; | 1584 | tv_std = TV_STD_PAL_60; |
| 1576 | DRM_INFO("Default TV standard: PAL-60\n"); | 1585 | DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); |
| 1577 | break; | 1586 | break; |
| 1578 | case ATOM_TV_SECAM: | 1587 | case ATOM_TV_SECAM: |
| 1579 | tv_std = TV_STD_SECAM; | 1588 | tv_std = TV_STD_SECAM; |
| 1580 | DRM_INFO("Default TV standard: SECAM\n"); | 1589 | DRM_DEBUG_KMS("Default TV standard: SECAM\n"); |
| 1581 | break; | 1590 | break; |
| 1582 | default: | 1591 | default: |
| 1583 | tv_std = TV_STD_NTSC; | 1592 | tv_std = TV_STD_NTSC; |
| 1584 | DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); | 1593 | DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n"); |
| 1585 | break; | 1594 | break; |
| 1586 | } | 1595 | } |
| 1587 | } | 1596 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index a04b7a6ad95f..7b7ea269549c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -913,47 +913,47 @@ radeon_combios_get_tv_info(struct radeon_device *rdev) | |||
| 913 | switch (RBIOS8(tv_info + 7) & 0xf) { | 913 | switch (RBIOS8(tv_info + 7) & 0xf) { |
| 914 | case 1: | 914 | case 1: |
| 915 | tv_std = TV_STD_NTSC; | 915 | tv_std = TV_STD_NTSC; |
| 916 | DRM_INFO("Default TV standard: NTSC\n"); | 916 | DRM_DEBUG_KMS("Default TV standard: NTSC\n"); |
| 917 | break; | 917 | break; |
| 918 | case 2: | 918 | case 2: |
| 919 | tv_std = TV_STD_PAL; | 919 | tv_std = TV_STD_PAL; |
| 920 | DRM_INFO("Default TV standard: PAL\n"); | 920 | DRM_DEBUG_KMS("Default TV standard: PAL\n"); |
| 921 | break; | 921 | break; |
| 922 | case 3: | 922 | case 3: |
| 923 | tv_std = TV_STD_PAL_M; | 923 | tv_std = TV_STD_PAL_M; |
| 924 | DRM_INFO("Default TV standard: PAL-M\n"); | 924 | DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); |
| 925 | break; | 925 | break; |
| 926 | case 4: | 926 | case 4: |
| 927 | tv_std = TV_STD_PAL_60; | 927 | tv_std = TV_STD_PAL_60; |
| 928 | DRM_INFO("Default TV standard: PAL-60\n"); | 928 | DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); |
| 929 | break; | 929 | break; |
| 930 | case 5: | 930 | case 5: |
| 931 | tv_std = TV_STD_NTSC_J; | 931 | tv_std = TV_STD_NTSC_J; |
| 932 | DRM_INFO("Default TV standard: NTSC-J\n"); | 932 | DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); |
| 933 | break; | 933 | break; |
| 934 | case 6: | 934 | case 6: |
| 935 | tv_std = TV_STD_SCART_PAL; | 935 | tv_std = TV_STD_SCART_PAL; |
| 936 | DRM_INFO("Default TV standard: SCART-PAL\n"); | 936 | DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n"); |
| 937 | break; | 937 | break; |
| 938 | default: | 938 | default: |
| 939 | tv_std = TV_STD_NTSC; | 939 | tv_std = TV_STD_NTSC; |
| 940 | DRM_INFO | 940 | DRM_DEBUG_KMS |
| 941 | ("Unknown TV standard; defaulting to NTSC\n"); | 941 | ("Unknown TV standard; defaulting to NTSC\n"); |
| 942 | break; | 942 | break; |
| 943 | } | 943 | } |
| 944 | 944 | ||
| 945 | switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) { | 945 | switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) { |
| 946 | case 0: | 946 | case 0: |
| 947 | DRM_INFO("29.498928713 MHz TV ref clk\n"); | 947 | DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n"); |
| 948 | break; | 948 | break; |
| 949 | case 1: | 949 | case 1: |
| 950 | DRM_INFO("28.636360000 MHz TV ref clk\n"); | 950 | DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n"); |
| 951 | break; | 951 | break; |
| 952 | case 2: | 952 | case 2: |
| 953 | DRM_INFO("14.318180000 MHz TV ref clk\n"); | 953 | DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n"); |
| 954 | break; | 954 | break; |
| 955 | case 3: | 955 | case 3: |
| 956 | DRM_INFO("27.000000000 MHz TV ref clk\n"); | 956 | DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n"); |
| 957 | break; | 957 | break; |
| 958 | default: | 958 | default: |
| 959 | break; | 959 | break; |
| @@ -1324,7 +1324,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | |||
| 1324 | 1324 | ||
| 1325 | if (tmds_info) { | 1325 | if (tmds_info) { |
| 1326 | ver = RBIOS8(tmds_info); | 1326 | ver = RBIOS8(tmds_info); |
| 1327 | DRM_INFO("DFP table revision: %d\n", ver); | 1327 | DRM_DEBUG_KMS("DFP table revision: %d\n", ver); |
| 1328 | if (ver == 3) { | 1328 | if (ver == 3) { |
| 1329 | n = RBIOS8(tmds_info + 5) + 1; | 1329 | n = RBIOS8(tmds_info + 5) + 1; |
| 1330 | if (n > 4) | 1330 | if (n > 4) |
| @@ -1408,7 +1408,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder | |||
| 1408 | offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | 1408 | offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); |
| 1409 | if (offset) { | 1409 | if (offset) { |
| 1410 | ver = RBIOS8(offset); | 1410 | ver = RBIOS8(offset); |
| 1411 | DRM_INFO("External TMDS Table revision: %d\n", ver); | 1411 | DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver); |
| 1412 | tmds->slave_addr = RBIOS8(offset + 4 + 2); | 1412 | tmds->slave_addr = RBIOS8(offset + 4 + 2); |
| 1413 | tmds->slave_addr >>= 1; /* 7 bit addressing */ | 1413 | tmds->slave_addr >>= 1; /* 7 bit addressing */ |
| 1414 | gpio = RBIOS8(offset + 4 + 3); | 1414 | gpio = RBIOS8(offset + 4 + 3); |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5731fc9b1ae3..3eef567b0421 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
| @@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 203 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 203 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 204 | struct radeon_device *rdev = crtc->dev->dev_private; | 204 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 205 | int xorigin = 0, yorigin = 0; | 205 | int xorigin = 0, yorigin = 0; |
| 206 | int w = radeon_crtc->cursor_width; | ||
| 206 | 207 | ||
| 207 | if (x < 0) | 208 | if (x < 0) |
| 208 | xorigin = -x + 1; | 209 | xorigin = -x + 1; |
| @@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 213 | if (yorigin >= CURSOR_HEIGHT) | 214 | if (yorigin >= CURSOR_HEIGHT) |
| 214 | yorigin = CURSOR_HEIGHT - 1; | 215 | yorigin = CURSOR_HEIGHT - 1; |
| 215 | 216 | ||
| 216 | radeon_lock_cursor(crtc, true); | 217 | if (ASIC_IS_AVIVO(rdev)) { |
| 217 | if (ASIC_IS_DCE4(rdev)) { | ||
| 218 | /* cursors are offset into the total surface */ | ||
| 219 | x += crtc->x; | ||
| 220 | y += crtc->y; | ||
| 221 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
| 222 | |||
| 223 | /* XXX: check if evergreen has the same issues as avivo chips */ | ||
| 224 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | ||
| 225 | ((xorigin ? 0 : x) << 16) | | ||
| 226 | (yorigin ? 0 : y)); | ||
| 227 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | ||
| 228 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | ||
| 229 | ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1)); | ||
| 230 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
| 231 | int w = radeon_crtc->cursor_width; | ||
| 232 | int i = 0; | 218 | int i = 0; |
| 233 | struct drm_crtc *crtc_p; | 219 | struct drm_crtc *crtc_p; |
| 234 | 220 | ||
| @@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 260 | if (w <= 0) | 246 | if (w <= 0) |
| 261 | w = 1; | 247 | w = 1; |
| 262 | } | 248 | } |
| 249 | } | ||
| 263 | 250 | ||
| 251 | radeon_lock_cursor(crtc, true); | ||
| 252 | if (ASIC_IS_DCE4(rdev)) { | ||
| 253 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | ||
| 254 | ((xorigin ? 0 : x) << 16) | | ||
| 255 | (yorigin ? 0 : y)); | ||
| 256 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | ||
| 257 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | ||
| 258 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | ||
| 259 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
| 264 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, | 260 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, |
| 265 | ((xorigin ? 0 : x) << 16) | | 261 | ((xorigin ? 0 : x) << 16) | |
| 266 | (yorigin ? 0 : y)); | 262 | (yorigin ? 0 : y)); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 127a395f70fb..b92d2f2fcbed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
| 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); | 349 | DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); |
| 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | 350 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) |
| 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); | 351 | DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); |
| 352 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | ||
| 353 | DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); | ||
| 352 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | 354 | if (devices & ATOM_DEVICE_TV1_SUPPORT) |
| 353 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); | 355 | DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); |
| 354 | if (devices & ATOM_DEVICE_CV_SUPPORT) | 356 | if (devices & ATOM_DEVICE_CV_SUPPORT) |
| @@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
| 841 | { | 843 | { |
| 842 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 844 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
| 843 | 845 | ||
| 844 | if (radeon_fb->obj) | 846 | if (radeon_fb->obj) { |
| 845 | drm_gem_object_unreference_unlocked(radeon_fb->obj); | 847 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
| 848 | } | ||
| 846 | drm_framebuffer_cleanup(fb); | 849 | drm_framebuffer_cleanup(fb); |
| 847 | kfree(radeon_fb); | 850 | kfree(radeon_fb); |
| 848 | } | 851 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d941..40b0c087b592 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -94,6 +94,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
| 94 | ret = radeon_bo_reserve(rbo, false); | 94 | ret = radeon_bo_reserve(rbo, false); |
| 95 | if (likely(ret == 0)) { | 95 | if (likely(ret == 0)) { |
| 96 | radeon_bo_kunmap(rbo); | 96 | radeon_bo_kunmap(rbo); |
| 97 | radeon_bo_unpin(rbo); | ||
| 97 | radeon_bo_unreserve(rbo); | 98 | radeon_bo_unreserve(rbo); |
| 98 | } | 99 | } |
| 99 | drm_gem_object_unreference_unlocked(gobj); | 100 | drm_gem_object_unreference_unlocked(gobj); |
| @@ -325,8 +326,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
| 325 | { | 326 | { |
| 326 | struct fb_info *info; | 327 | struct fb_info *info; |
| 327 | struct radeon_framebuffer *rfb = &rfbdev->rfb; | 328 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
| 328 | struct radeon_bo *rbo; | ||
| 329 | int r; | ||
| 330 | 329 | ||
| 331 | if (rfbdev->helper.fbdev) { | 330 | if (rfbdev->helper.fbdev) { |
| 332 | info = rfbdev->helper.fbdev; | 331 | info = rfbdev->helper.fbdev; |
| @@ -338,14 +337,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb | |||
| 338 | } | 337 | } |
| 339 | 338 | ||
| 340 | if (rfb->obj) { | 339 | if (rfb->obj) { |
| 341 | rbo = rfb->obj->driver_private; | 340 | radeonfb_destroy_pinned_object(rfb->obj); |
| 342 | r = radeon_bo_reserve(rbo, false); | 341 | rfb->obj = NULL; |
| 343 | if (likely(r == 0)) { | ||
| 344 | radeon_bo_kunmap(rbo); | ||
| 345 | radeon_bo_unpin(rbo); | ||
| 346 | radeon_bo_unreserve(rbo); | ||
| 347 | } | ||
| 348 | drm_gem_object_unreference_unlocked(rfb->obj); | ||
| 349 | } | 342 | } |
| 350 | drm_fb_helper_fini(&rfbdev->helper); | 343 | drm_fb_helper_fini(&rfbdev->helper); |
| 351 | drm_framebuffer_cleanup(&rfb->base); | 344 | drm_framebuffer_cleanup(&rfb->base); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24c..d1e595d91723 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
| 201 | return r; | 201 | return r; |
| 202 | } | 202 | } |
| 203 | r = drm_gem_handle_create(filp, gobj, &handle); | 203 | r = drm_gem_handle_create(filp, gobj, &handle); |
| 204 | /* drop reference from allocate - handle holds it now */ | ||
| 205 | drm_gem_object_unreference_unlocked(gobj); | ||
| 204 | if (r) { | 206 | if (r) { |
| 205 | drm_gem_object_unreference_unlocked(gobj); | ||
| 206 | return r; | 207 | return r; |
| 207 | } | 208 | } |
| 208 | drm_gem_object_handle_unreference_unlocked(gobj); | ||
| 209 | args->handle = handle; | 209 | args->handle = handle; |
| 210 | return 0; | 210 | return 0; |
| 211 | } | 211 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0afd1e62347d..b3b5306bb578 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
| 69 | u32 c = 0; | 69 | u32 c = 0; |
| 70 | 70 | ||
| 71 | rbo->placement.fpfn = 0; | 71 | rbo->placement.fpfn = 0; |
| 72 | rbo->placement.lpfn = 0; | 72 | rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; |
| 73 | rbo->placement.placement = rbo->placements; | 73 | rbo->placement.placement = rbo->placements; |
| 74 | rbo->placement.busy_placement = rbo->placements; | 74 | rbo->placement.busy_placement = rbo->placements; |
| 75 | if (domain & RADEON_GEM_DOMAIN_VRAM) | 75 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 353998dc2c03..3481bc7f6f58 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
| @@ -124,11 +124,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | |||
| 124 | int r; | 124 | int r; |
| 125 | 125 | ||
| 126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | 126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); |
| 127 | if (unlikely(r != 0)) { | 127 | if (unlikely(r != 0)) |
| 128 | if (r != -ERESTARTSYS) | ||
| 129 | dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo); | ||
| 130 | return r; | 128 | return r; |
| 131 | } | ||
| 132 | spin_lock(&bo->tbo.lock); | 129 | spin_lock(&bo->tbo.lock); |
| 133 | if (mem_type) | 130 | if (mem_type) |
| 134 | *mem_type = bo->tbo.mem.mem_type; | 131 | *mem_type = bo->tbo.mem.mem_type; |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index cc05b230d7ef..51d5f7b5ab21 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -693,6 +693,7 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
| 693 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 693 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
| 694 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 694 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
| 695 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 695 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 696 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 696 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 697 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 697 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 698 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
| 698 | base = G_000004_MC_FB_START(base) << 16; | 699 | base = G_000004_MC_FB_START(base) << 16; |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3e3f75718be3..4dc2a87ea680 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
| 157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
| 158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
| 159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 160 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 160 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 161 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
| 161 | base = G_000100_MC_FB_START(base) << 16; | 162 | base = G_000100_MC_FB_START(base) << 16; |
| 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bfa59db374d2..9490da700749 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -267,6 +267,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
| 267 | */ | 267 | */ |
| 268 | void r700_cp_stop(struct radeon_device *rdev) | 268 | void r700_cp_stop(struct radeon_device *rdev) |
| 269 | { | 269 | { |
| 270 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 270 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 271 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
| 271 | } | 272 | } |
| 272 | 273 | ||
| @@ -992,6 +993,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
| 992 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 993 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
| 993 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 994 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
| 994 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 995 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 996 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
| 995 | r600_vram_gtt_location(rdev, &rdev->mc); | 997 | r600_vram_gtt_location(rdev, &rdev->mc); |
| 996 | radeon_update_bandwidth_info(rdev); | 998 | radeon_update_bandwidth_info(rdev); |
| 997 | 999 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1e..db809e034cc4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -442,6 +442,43 @@ out_err: | |||
| 442 | } | 442 | } |
| 443 | 443 | ||
| 444 | /** | 444 | /** |
| 445 | * Call bo::reserved and with the lru lock held. | ||
| 446 | * Will release GPU memory type usage on destruction. | ||
| 447 | * This is the place to put in driver specific hooks. | ||
| 448 | * Will release the bo::reserved lock and the | ||
| 449 | * lru lock on exit. | ||
| 450 | */ | ||
| 451 | |||
| 452 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | ||
| 453 | { | ||
| 454 | struct ttm_bo_global *glob = bo->glob; | ||
| 455 | |||
| 456 | if (bo->ttm) { | ||
| 457 | |||
| 458 | /** | ||
| 459 | * Release the lru_lock, since we don't want to have | ||
| 460 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
| 461 | */ | ||
| 462 | |||
| 463 | spin_unlock(&glob->lru_lock); | ||
| 464 | ttm_tt_unbind(bo->ttm); | ||
| 465 | ttm_tt_destroy(bo->ttm); | ||
| 466 | bo->ttm = NULL; | ||
| 467 | spin_lock(&glob->lru_lock); | ||
| 468 | } | ||
| 469 | |||
| 470 | if (bo->mem.mm_node) { | ||
| 471 | drm_mm_put_block(bo->mem.mm_node); | ||
| 472 | bo->mem.mm_node = NULL; | ||
| 473 | } | ||
| 474 | |||
| 475 | atomic_set(&bo->reserved, 0); | ||
| 476 | wake_up_all(&bo->event_queue); | ||
| 477 | spin_unlock(&glob->lru_lock); | ||
| 478 | } | ||
| 479 | |||
| 480 | |||
| 481 | /** | ||
| 445 | * If bo idle, remove from delayed- and lru lists, and unref. | 482 | * If bo idle, remove from delayed- and lru lists, and unref. |
| 446 | * If not idle, and already on delayed list, do nothing. | 483 | * If not idle, and already on delayed list, do nothing. |
| 447 | * If not idle, and not on delayed list, put on delayed list, | 484 | * If not idle, and not on delayed list, put on delayed list, |
| @@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
| 456 | int ret; | 493 | int ret; |
| 457 | 494 | ||
| 458 | spin_lock(&bo->lock); | 495 | spin_lock(&bo->lock); |
| 496 | retry: | ||
| 459 | (void) ttm_bo_wait(bo, false, false, !remove_all); | 497 | (void) ttm_bo_wait(bo, false, false, !remove_all); |
| 460 | 498 | ||
| 461 | if (!bo->sync_obj) { | 499 | if (!bo->sync_obj) { |
| @@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
| 464 | spin_unlock(&bo->lock); | 502 | spin_unlock(&bo->lock); |
| 465 | 503 | ||
| 466 | spin_lock(&glob->lru_lock); | 504 | spin_lock(&glob->lru_lock); |
| 467 | put_count = ttm_bo_del_from_lru(bo); | 505 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); |
| 506 | |||
| 507 | /** | ||
| 508 | * Someone else has the object reserved. Bail and retry. | ||
| 509 | */ | ||
| 468 | 510 | ||
| 469 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | 511 | if (unlikely(ret == -EBUSY)) { |
| 470 | BUG_ON(ret); | 512 | spin_unlock(&glob->lru_lock); |
| 471 | if (bo->ttm) | 513 | spin_lock(&bo->lock); |
| 472 | ttm_tt_unbind(bo->ttm); | 514 | goto requeue; |
| 515 | } | ||
| 516 | |||
| 517 | /** | ||
| 518 | * We can re-check for sync object without taking | ||
| 519 | * the bo::lock since setting the sync object requires | ||
| 520 | * also bo::reserved. A busy object at this point may | ||
| 521 | * be caused by another thread starting an accelerated | ||
| 522 | * eviction. | ||
| 523 | */ | ||
| 524 | |||
| 525 | if (unlikely(bo->sync_obj)) { | ||
| 526 | atomic_set(&bo->reserved, 0); | ||
| 527 | wake_up_all(&bo->event_queue); | ||
| 528 | spin_unlock(&glob->lru_lock); | ||
| 529 | spin_lock(&bo->lock); | ||
| 530 | if (remove_all) | ||
| 531 | goto retry; | ||
| 532 | else | ||
| 533 | goto requeue; | ||
| 534 | } | ||
| 535 | |||
| 536 | put_count = ttm_bo_del_from_lru(bo); | ||
| 473 | 537 | ||
| 474 | if (!list_empty(&bo->ddestroy)) { | 538 | if (!list_empty(&bo->ddestroy)) { |
| 475 | list_del_init(&bo->ddestroy); | 539 | list_del_init(&bo->ddestroy); |
| 476 | ++put_count; | 540 | ++put_count; |
| 477 | } | 541 | } |
| 478 | if (bo->mem.mm_node) { | ||
| 479 | drm_mm_put_block(bo->mem.mm_node); | ||
| 480 | bo->mem.mm_node = NULL; | ||
| 481 | } | ||
| 482 | spin_unlock(&glob->lru_lock); | ||
| 483 | 542 | ||
| 484 | atomic_set(&bo->reserved, 0); | 543 | ttm_bo_cleanup_memtype_use(bo); |
| 485 | 544 | ||
| 486 | while (put_count--) | 545 | while (put_count--) |
| 487 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 546 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
| 488 | 547 | ||
| 489 | return 0; | 548 | return 0; |
| 490 | } | 549 | } |
| 491 | 550 | requeue: | |
| 492 | spin_lock(&glob->lru_lock); | 551 | spin_lock(&glob->lru_lock); |
| 493 | if (list_empty(&bo->ddestroy)) { | 552 | if (list_empty(&bo->ddestroy)) { |
| 494 | void *sync_obj = bo->sync_obj; | 553 | void *sync_obj = bo->sync_obj; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 72ec2e2b6e97..a96ed6d9d010 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { | |||
| 148 | {0, 0, 0} | 148 | {0, 0, 0} |
| 149 | }; | 149 | }; |
| 150 | 150 | ||
| 151 | static char *vmw_devname = "vmwgfx"; | 151 | static int enable_fbdev; |
| 152 | 152 | ||
| 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
| 154 | static void vmw_master_init(struct vmw_master *); | 154 | static void vmw_master_init(struct vmw_master *); |
| 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | 155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 156 | void *ptr); | 156 | void *ptr); |
| 157 | 157 | ||
| 158 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | ||
| 159 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | ||
| 160 | |||
| 158 | static void vmw_print_capabilities(uint32_t capabilities) | 161 | static void vmw_print_capabilities(uint32_t capabilities) |
| 159 | { | 162 | { |
| 160 | DRM_INFO("Capabilities:\n"); | 163 | DRM_INFO("Capabilities:\n"); |
| @@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
| 192 | { | 195 | { |
| 193 | int ret; | 196 | int ret; |
| 194 | 197 | ||
| 195 | vmw_kms_save_vga(dev_priv); | ||
| 196 | |||
| 197 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | 198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
| 198 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
| 199 | DRM_ERROR("Unable to initialize FIFO.\n"); | 200 | DRM_ERROR("Unable to initialize FIFO.\n"); |
| @@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
| 206 | static void vmw_release_device(struct vmw_private *dev_priv) | 207 | static void vmw_release_device(struct vmw_private *dev_priv) |
| 207 | { | 208 | { |
| 208 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 209 | vmw_kms_restore_vga(dev_priv); | ||
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | ||
| 213 | { | ||
| 214 | int ret = 0; | ||
| 215 | |||
| 216 | mutex_lock(&dev_priv->release_mutex); | ||
| 217 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { | ||
| 218 | ret = vmw_request_device(dev_priv); | ||
| 219 | if (unlikely(ret != 0)) | ||
| 220 | --dev_priv->num_3d_resources; | ||
| 221 | } | ||
| 222 | mutex_unlock(&dev_priv->release_mutex); | ||
| 223 | return ret; | ||
| 224 | } | ||
| 225 | |||
| 226 | |||
| 227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) | ||
| 228 | { | ||
| 229 | int32_t n3d; | ||
| 230 | |||
| 231 | mutex_lock(&dev_priv->release_mutex); | ||
| 232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | ||
| 233 | vmw_release_device(dev_priv); | ||
| 234 | n3d = (int32_t) dev_priv->num_3d_resources; | ||
| 235 | mutex_unlock(&dev_priv->release_mutex); | ||
| 236 | |||
| 237 | BUG_ON(n3d < 0); | ||
| 238 | } | ||
| 212 | 239 | ||
| 213 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 214 | { | 241 | { |
| @@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 228 | dev_priv->last_read_sequence = (uint32_t) -100; | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
| 229 | mutex_init(&dev_priv->hw_mutex); | 256 | mutex_init(&dev_priv->hw_mutex); |
| 230 | mutex_init(&dev_priv->cmdbuf_mutex); | 257 | mutex_init(&dev_priv->cmdbuf_mutex); |
| 258 | mutex_init(&dev_priv->release_mutex); | ||
| 231 | rwlock_init(&dev_priv->resource_lock); | 259 | rwlock_init(&dev_priv->resource_lock); |
| 232 | idr_init(&dev_priv->context_idr); | 260 | idr_init(&dev_priv->context_idr); |
| 233 | idr_init(&dev_priv->surface_idr); | 261 | idr_init(&dev_priv->surface_idr); |
| @@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 244 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
| 245 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | 273 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
| 246 | 274 | ||
| 275 | dev_priv->enable_fb = enable_fbdev; | ||
| 276 | |||
| 247 | mutex_lock(&dev_priv->hw_mutex); | 277 | mutex_lock(&dev_priv->hw_mutex); |
| 248 | 278 | ||
| 249 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 279 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| @@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 343 | 373 | ||
| 344 | dev->dev_private = dev_priv; | 374 | dev->dev_private = dev_priv; |
| 345 | 375 | ||
| 346 | if (!dev->devname) | ||
| 347 | dev->devname = vmw_devname; | ||
| 348 | |||
| 349 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
| 350 | ret = drm_irq_install(dev); | ||
| 351 | if (unlikely(ret != 0)) { | ||
| 352 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
| 353 | goto out_no_irq; | ||
| 354 | } | ||
| 355 | } | ||
| 356 | |||
| 357 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | 376 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
| 358 | dev_priv->stealth = (ret != 0); | 377 | dev_priv->stealth = (ret != 0); |
| 359 | if (dev_priv->stealth) { | 378 | if (dev_priv->stealth) { |
| @@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 369 | goto out_no_device; | 388 | goto out_no_device; |
| 370 | } | 389 | } |
| 371 | } | 390 | } |
| 372 | ret = vmw_request_device(dev_priv); | 391 | ret = vmw_kms_init(dev_priv); |
| 373 | if (unlikely(ret != 0)) | 392 | if (unlikely(ret != 0)) |
| 374 | goto out_no_device; | 393 | goto out_no_kms; |
| 375 | vmw_kms_init(dev_priv); | ||
| 376 | vmw_overlay_init(dev_priv); | 394 | vmw_overlay_init(dev_priv); |
| 377 | vmw_fb_init(dev_priv); | 395 | if (dev_priv->enable_fb) { |
| 396 | ret = vmw_3d_resource_inc(dev_priv); | ||
| 397 | if (unlikely(ret != 0)) | ||
| 398 | goto out_no_fifo; | ||
| 399 | vmw_kms_save_vga(dev_priv); | ||
| 400 | vmw_fb_init(dev_priv); | ||
| 401 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
| 402 | "Detected device 3D availability.\n" : | ||
| 403 | "Detected no device 3D availability.\n"); | ||
| 404 | } else { | ||
| 405 | DRM_INFO("Delayed 3D detection since we're not " | ||
| 406 | "running the device in SVGA mode yet.\n"); | ||
| 407 | } | ||
| 408 | |||
| 409 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
| 410 | ret = drm_irq_install(dev); | ||
| 411 | if (unlikely(ret != 0)) { | ||
| 412 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
| 413 | goto out_no_irq; | ||
| 414 | } | ||
| 415 | } | ||
| 378 | 416 | ||
| 379 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 417 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
| 380 | register_pm_notifier(&dev_priv->pm_nb); | 418 | register_pm_notifier(&dev_priv->pm_nb); |
| 381 | 419 | ||
| 382 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
| 383 | |||
| 384 | return 0; | 420 | return 0; |
| 385 | 421 | ||
| 386 | out_no_device: | ||
| 387 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
| 388 | drm_irq_uninstall(dev_priv->dev); | ||
| 389 | if (dev->devname == vmw_devname) | ||
| 390 | dev->devname = NULL; | ||
| 391 | out_no_irq: | 422 | out_no_irq: |
| 423 | if (dev_priv->enable_fb) { | ||
| 424 | vmw_fb_close(dev_priv); | ||
| 425 | vmw_kms_restore_vga(dev_priv); | ||
| 426 | vmw_3d_resource_dec(dev_priv); | ||
| 427 | } | ||
| 428 | out_no_fifo: | ||
| 429 | vmw_overlay_close(dev_priv); | ||
| 430 | vmw_kms_close(dev_priv); | ||
| 431 | out_no_kms: | ||
| 432 | if (dev_priv->stealth) | ||
| 433 | pci_release_region(dev->pdev, 2); | ||
| 434 | else | ||
| 435 | pci_release_regions(dev->pdev); | ||
| 436 | out_no_device: | ||
| 392 | ttm_object_device_release(&dev_priv->tdev); | 437 | ttm_object_device_release(&dev_priv->tdev); |
| 393 | out_err4: | 438 | out_err4: |
| 394 | iounmap(dev_priv->mmio_virt); | 439 | iounmap(dev_priv->mmio_virt); |
| @@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 415 | 460 | ||
| 416 | unregister_pm_notifier(&dev_priv->pm_nb); | 461 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 417 | 462 | ||
| 418 | vmw_fb_close(dev_priv); | 463 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 464 | drm_irq_uninstall(dev_priv->dev); | ||
| 465 | if (dev_priv->enable_fb) { | ||
| 466 | vmw_fb_close(dev_priv); | ||
| 467 | vmw_kms_restore_vga(dev_priv); | ||
| 468 | vmw_3d_resource_dec(dev_priv); | ||
| 469 | } | ||
| 419 | vmw_kms_close(dev_priv); | 470 | vmw_kms_close(dev_priv); |
| 420 | vmw_overlay_close(dev_priv); | 471 | vmw_overlay_close(dev_priv); |
| 421 | vmw_release_device(dev_priv); | ||
| 422 | if (dev_priv->stealth) | 472 | if (dev_priv->stealth) |
| 423 | pci_release_region(dev->pdev, 2); | 473 | pci_release_region(dev->pdev, 2); |
| 424 | else | 474 | else |
| 425 | pci_release_regions(dev->pdev); | 475 | pci_release_regions(dev->pdev); |
| 426 | 476 | ||
| 427 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
| 428 | drm_irq_uninstall(dev_priv->dev); | ||
| 429 | if (dev->devname == vmw_devname) | ||
| 430 | dev->devname = NULL; | ||
| 431 | ttm_object_device_release(&dev_priv->tdev); | 477 | ttm_object_device_release(&dev_priv->tdev); |
| 432 | iounmap(dev_priv->mmio_virt); | 478 | iounmap(dev_priv->mmio_virt); |
| 433 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
| @@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
| 500 | struct drm_ioctl_desc *ioctl = | 546 | struct drm_ioctl_desc *ioctl = |
| 501 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 547 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
| 502 | 548 | ||
| 503 | if (unlikely(ioctl->cmd != cmd)) { | 549 | if (unlikely(ioctl->cmd_drv != cmd)) { |
| 504 | DRM_ERROR("Invalid command format, ioctl %d\n", | 550 | DRM_ERROR("Invalid command format, ioctl %d\n", |
| 505 | nr - DRM_COMMAND_BASE); | 551 | nr - DRM_COMMAND_BASE); |
| 506 | return -EINVAL; | 552 | return -EINVAL; |
| @@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 589 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 635 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 590 | int ret = 0; | 636 | int ret = 0; |
| 591 | 637 | ||
| 638 | if (!dev_priv->enable_fb) { | ||
| 639 | ret = vmw_3d_resource_inc(dev_priv); | ||
| 640 | if (unlikely(ret != 0)) | ||
| 641 | return ret; | ||
| 642 | vmw_kms_save_vga(dev_priv); | ||
| 643 | mutex_lock(&dev_priv->hw_mutex); | ||
| 644 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | ||
| 645 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 646 | } | ||
| 647 | |||
| 592 | if (active) { | 648 | if (active) { |
| 593 | BUG_ON(active != &dev_priv->fbdev_master); | 649 | BUG_ON(active != &dev_priv->fbdev_master); |
| 594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 650 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
| @@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 617 | return 0; | 673 | return 0; |
| 618 | 674 | ||
| 619 | out_no_active_lock: | 675 | out_no_active_lock: |
| 620 | vmw_release_device(dev_priv); | 676 | if (!dev_priv->enable_fb) { |
| 677 | mutex_lock(&dev_priv->hw_mutex); | ||
| 678 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
| 679 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 680 | vmw_kms_restore_vga(dev_priv); | ||
| 681 | vmw_3d_resource_dec(dev_priv); | ||
| 682 | } | ||
| 621 | return ret; | 683 | return ret; |
| 622 | } | 684 | } |
| 623 | 685 | ||
| @@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 645 | 707 | ||
| 646 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 708 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
| 647 | 709 | ||
| 710 | if (!dev_priv->enable_fb) { | ||
| 711 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 712 | if (unlikely(ret != 0)) | ||
| 713 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
| 714 | mutex_lock(&dev_priv->hw_mutex); | ||
| 715 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | ||
| 716 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 717 | vmw_kms_restore_vga(dev_priv); | ||
| 718 | vmw_3d_resource_dec(dev_priv); | ||
| 719 | } | ||
| 720 | |||
| 648 | dev_priv->active_master = &dev_priv->fbdev_master; | 721 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 649 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 722 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 650 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 723 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
| 651 | 724 | ||
| 652 | vmw_fb_on(dev_priv); | 725 | if (dev_priv->enable_fb) |
| 726 | vmw_fb_on(dev_priv); | ||
| 653 | } | 727 | } |
| 654 | 728 | ||
| 655 | 729 | ||
| @@ -722,6 +796,7 @@ static struct drm_driver driver = { | |||
| 722 | .irq_postinstall = vmw_irq_postinstall, | 796 | .irq_postinstall = vmw_irq_postinstall, |
| 723 | .irq_uninstall = vmw_irq_uninstall, | 797 | .irq_uninstall = vmw_irq_uninstall, |
| 724 | .irq_handler = vmw_irq_handler, | 798 | .irq_handler = vmw_irq_handler, |
| 799 | .get_vblank_counter = vmw_get_vblank_counter, | ||
| 725 | .reclaim_buffers_locked = NULL, | 800 | .reclaim_buffers_locked = NULL, |
| 726 | .get_map_ofs = drm_core_get_map_ofs, | 801 | .get_map_ofs = drm_core_get_map_ofs, |
| 727 | .get_reg_ofs = drm_core_get_reg_ofs, | 802 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60bf..58de6393f611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -277,6 +277,7 @@ struct vmw_private { | |||
| 277 | 277 | ||
| 278 | bool stealth; | 278 | bool stealth; |
| 279 | bool is_opened; | 279 | bool is_opened; |
| 280 | bool enable_fb; | ||
| 280 | 281 | ||
| 281 | /** | 282 | /** |
| 282 | * Master management. | 283 | * Master management. |
| @@ -285,6 +286,9 @@ struct vmw_private { | |||
| 285 | struct vmw_master *active_master; | 286 | struct vmw_master *active_master; |
| 286 | struct vmw_master fbdev_master; | 287 | struct vmw_master fbdev_master; |
| 287 | struct notifier_block pm_nb; | 288 | struct notifier_block pm_nb; |
| 289 | |||
| 290 | struct mutex release_mutex; | ||
| 291 | uint32_t num_3d_resources; | ||
| 288 | }; | 292 | }; |
| 289 | 293 | ||
| 290 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 294 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
| @@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, | |||
| 319 | return val; | 323 | return val; |
| 320 | } | 324 | } |
| 321 | 325 | ||
| 326 | int vmw_3d_resource_inc(struct vmw_private *dev_priv); | ||
| 327 | void vmw_3d_resource_dec(struct vmw_private *dev_priv); | ||
| 328 | |||
| 322 | /** | 329 | /** |
| 323 | * GMR utilities - vmwgfx_gmr.c | 330 | * GMR utilities - vmwgfx_gmr.c |
| 324 | */ | 331 | */ |
| @@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
| 511 | unsigned bbp, unsigned depth); | 518 | unsigned bbp, unsigned depth); |
| 512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
| 513 | struct drm_file *file_priv); | 520 | struct drm_file *file_priv); |
| 521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | ||
| 514 | 522 | ||
| 515 | /** | 523 | /** |
| 516 | * Overlay control - vmwgfx_overlay.c | 524 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c15..409e172f4abf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
| 615 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) |
| 616 | goto err_unlock; | 616 | goto err_unlock; |
| 617 | 617 | ||
| 618 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
| 619 | bo->mem.mm_node->start < bo->num_pages) | ||
| 620 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
| 621 | false, false); | ||
| 622 | |||
| 618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 623 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
| 619 | 624 | ||
| 620 | /* Could probably bug on */ | 625 | /* Could probably bug on */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea954..0fe31766e4cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 106 | mutex_lock(&dev_priv->hw_mutex); | 106 | mutex_lock(&dev_priv->hw_mutex); |
| 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 107 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
| 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 108 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
| 109 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | ||
| 109 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | 110 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); |
| 110 | 111 | ||
| 111 | min = 4; | 112 | min = 4; |
| @@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 175 | dev_priv->config_done_state); | 176 | dev_priv->config_done_state); |
| 176 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 177 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 177 | dev_priv->enable_state); | 178 | dev_priv->enable_state); |
| 179 | vmw_write(dev_priv, SVGA_REG_TRACES, | ||
| 180 | dev_priv->traces_state); | ||
| 178 | 181 | ||
| 179 | mutex_unlock(&dev_priv->hw_mutex); | 182 | mutex_unlock(&dev_priv->hw_mutex); |
| 180 | vmw_fence_queue_takedown(&fifo->fence_queue); | 183 | vmw_fence_queue_takedown(&fifo->fence_queue); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df868..e882ba099f0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
| 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); |
| 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); |
| 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
| 901 | if (i == 0 && vmw_priv->num_displays == 1 && | ||
| 902 | save->width == 0 && save->height == 0) { | ||
| 903 | |||
| 904 | /* | ||
| 905 | * It should be fairly safe to assume that these | ||
| 906 | * values are uninitialized. | ||
| 907 | */ | ||
| 908 | |||
| 909 | save->width = vmw_priv->vga_width - save->pos_x; | ||
| 910 | save->height = vmw_priv->vga_height - save->pos_y; | ||
| 911 | } | ||
| 901 | } | 912 | } |
| 913 | |||
| 902 | return 0; | 914 | return 0; |
| 903 | } | 915 | } |
| 904 | 916 | ||
| @@ -984,3 +996,8 @@ out_unlock: | |||
| 984 | ttm_read_unlock(&vmaster->lock); | 996 | ttm_read_unlock(&vmaster->lock); |
| 985 | return ret; | 997 | return ret; |
| 986 | } | 998 | } |
| 999 | |||
| 1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | ||
| 1001 | { | ||
| 1002 | return 0; | ||
| 1003 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 7083b1a24df3..11cb39e3accb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | 27 | ||
| 28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
| 29 | 29 | ||
| 30 | #define VMWGFX_LDU_NUM_DU 8 | ||
| 31 | |||
| 30 | #define vmw_crtc_to_ldu(x) \ | 32 | #define vmw_crtc_to_ldu(x) \ |
| 31 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
| 32 | #define vmw_encoder_to_ldu(x) \ | 34 | #define vmw_encoder_to_ldu(x) \ |
| @@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
| 536 | 538 | ||
| 537 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | 539 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) |
| 538 | { | 540 | { |
| 541 | struct drm_device *dev = dev_priv->dev; | ||
| 542 | int i; | ||
| 543 | int ret; | ||
| 544 | |||
| 539 | if (dev_priv->ldu_priv) { | 545 | if (dev_priv->ldu_priv) { |
| 540 | DRM_INFO("ldu system already on\n"); | 546 | DRM_INFO("ldu system already on\n"); |
| 541 | return -EINVAL; | 547 | return -EINVAL; |
| @@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
| 553 | 559 | ||
| 554 | drm_mode_create_dirty_info_property(dev_priv->dev); | 560 | drm_mode_create_dirty_info_property(dev_priv->dev); |
| 555 | 561 | ||
| 556 | vmw_ldu_init(dev_priv, 0); | ||
| 557 | /* for old hardware without multimon only enable one display */ | ||
| 558 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 562 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
| 559 | vmw_ldu_init(dev_priv, 1); | 563 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) |
| 560 | vmw_ldu_init(dev_priv, 2); | 564 | vmw_ldu_init(dev_priv, i); |
| 561 | vmw_ldu_init(dev_priv, 3); | 565 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); |
| 562 | vmw_ldu_init(dev_priv, 4); | 566 | } else { |
| 563 | vmw_ldu_init(dev_priv, 5); | 567 | /* for old hardware without multimon only enable one display */ |
| 564 | vmw_ldu_init(dev_priv, 6); | 568 | vmw_ldu_init(dev_priv, 0); |
| 565 | vmw_ldu_init(dev_priv, 7); | 569 | ret = drm_vblank_init(dev, 1); |
| 566 | } | 570 | } |
| 567 | 571 | ||
| 568 | return 0; | 572 | return ret; |
| 569 | } | 573 | } |
| 570 | 574 | ||
| 571 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | 575 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) |
| 572 | { | 576 | { |
| 577 | struct drm_device *dev = dev_priv->dev; | ||
| 578 | |||
| 579 | drm_vblank_cleanup(dev); | ||
| 573 | if (!dev_priv->ldu_priv) | 580 | if (!dev_priv->ldu_priv) |
| 574 | return -ENOSYS; | 581 | return -ENOSYS; |
| 575 | 582 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5c..c8c40e9979db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 211 | cmd->body.cid = cpu_to_le32(res->id); | 211 | cmd->body.cid = cpu_to_le32(res->id); |
| 212 | 212 | ||
| 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 214 | vmw_3d_resource_dec(dev_priv); | ||
| 214 | } | 215 | } |
| 215 | 216 | ||
| 216 | static int vmw_context_init(struct vmw_private *dev_priv, | 217 | static int vmw_context_init(struct vmw_private *dev_priv, |
| @@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
| 247 | cmd->body.cid = cpu_to_le32(res->id); | 248 | cmd->body.cid = cpu_to_le32(res->id); |
| 248 | 249 | ||
| 249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 250 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 251 | (void) vmw_3d_resource_inc(dev_priv); | ||
| 250 | vmw_resource_activate(res, vmw_hw_context_destroy); | 252 | vmw_resource_activate(res, vmw_hw_context_destroy); |
| 251 | return 0; | 253 | return 0; |
| 252 | } | 254 | } |
| @@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
| 406 | cmd->body.sid = cpu_to_le32(res->id); | 408 | cmd->body.sid = cpu_to_le32(res->id); |
| 407 | 409 | ||
| 408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 410 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 411 | vmw_3d_resource_dec(dev_priv); | ||
| 409 | } | 412 | } |
| 410 | 413 | ||
| 411 | void vmw_surface_res_free(struct vmw_resource *res) | 414 | void vmw_surface_res_free(struct vmw_resource *res) |
| @@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, | |||
| 473 | } | 476 | } |
| 474 | 477 | ||
| 475 | vmw_fifo_commit(dev_priv, submit_size); | 478 | vmw_fifo_commit(dev_priv, submit_size); |
| 479 | (void) vmw_3d_resource_inc(dev_priv); | ||
| 476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 480 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
| 477 | return 0; | 481 | return 0; |
| 478 | } | 482 | } |
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c index 4267a6fdc277..5925bdcd417d 100644 --- a/drivers/hid/hid-cando.c +++ b/drivers/hid/hid-cando.c | |||
| @@ -237,6 +237,8 @@ static const struct hid_device_id cando_devices[] = { | |||
| 237 | USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, | 237 | USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, |
| 238 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, | 238 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, |
| 239 | USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, | 239 | USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, |
| 240 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, | ||
| 241 | USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, | ||
| 240 | { } | 242 | { } |
| 241 | }; | 243 | }; |
| 242 | MODULE_DEVICE_TABLE(hid, cando_devices); | 244 | MODULE_DEVICE_TABLE(hid, cando_devices); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3f7292486024..a0dea3d1296e 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -1292,6 +1292,7 @@ static const struct hid_device_id hid_blacklist[] = { | |||
| 1292 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | 1292 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, |
| 1293 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, | 1293 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, |
| 1294 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, | 1294 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, |
| 1295 | { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, | ||
| 1295 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, | 1296 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, |
| 1296 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, | 1297 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, |
| 1297 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, | 1298 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 765a4f53eb5c..c5ae5f1545bd 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -134,6 +134,7 @@ | |||
| 134 | #define USB_VENDOR_ID_CANDO 0x2087 | 134 | #define USB_VENDOR_ID_CANDO 0x2087 |
| 135 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 | 135 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 |
| 136 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 | 136 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 |
| 137 | #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01 | ||
| 137 | 138 | ||
| 138 | #define USB_VENDOR_ID_CH 0x068e | 139 | #define USB_VENDOR_ID_CH 0x068e |
| 139 | #define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 | 140 | #define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 |
| @@ -503,6 +504,7 @@ | |||
| 503 | 504 | ||
| 504 | #define USB_VENDOR_ID_TURBOX 0x062a | 505 | #define USB_VENDOR_ID_TURBOX 0x062a |
| 505 | #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 | 506 | #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 |
| 507 | #define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100 | ||
| 506 | 508 | ||
| 507 | #define USB_VENDOR_ID_TWINHAN 0x6253 | 509 | #define USB_VENDOR_ID_TWINHAN 0x6253 |
| 508 | #define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100 | 510 | #define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100 |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 47d70c523d93..a3866b5c0c43 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
| @@ -109,6 +109,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t | |||
| 109 | int ret = 0; | 109 | int ret = 0; |
| 110 | 110 | ||
| 111 | mutex_lock(&minors_lock); | 111 | mutex_lock(&minors_lock); |
| 112 | |||
| 113 | if (!hidraw_table[minor]) { | ||
| 114 | ret = -ENODEV; | ||
| 115 | goto out; | ||
| 116 | } | ||
| 117 | |||
| 112 | dev = hidraw_table[minor]->hid; | 118 | dev = hidraw_table[minor]->hid; |
| 113 | 119 | ||
| 114 | if (!dev->hid_output_raw_report) { | 120 | if (!dev->hid_output_raw_report) { |
| @@ -244,6 +250,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, | |||
| 244 | 250 | ||
| 245 | mutex_lock(&minors_lock); | 251 | mutex_lock(&minors_lock); |
| 246 | dev = hidraw_table[minor]; | 252 | dev = hidraw_table[minor]; |
| 253 | if (!dev) { | ||
| 254 | ret = -ENODEV; | ||
| 255 | goto out; | ||
| 256 | } | ||
| 247 | 257 | ||
| 248 | switch (cmd) { | 258 | switch (cmd) { |
| 249 | case HIDIOCGRDESCSIZE: | 259 | case HIDIOCGRDESCSIZE: |
| @@ -317,6 +327,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, | |||
| 317 | 327 | ||
| 318 | ret = -ENOTTY; | 328 | ret = -ENOTTY; |
| 319 | } | 329 | } |
| 330 | out: | ||
| 320 | mutex_unlock(&minors_lock); | 331 | mutex_unlock(&minors_lock); |
| 321 | return ret; | 332 | return ret; |
| 322 | } | 333 | } |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 70da3181c8a0..f0260c699adb 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
| @@ -36,6 +36,7 @@ static const struct hid_blacklist { | |||
| 36 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, | 36 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, |
| 37 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, | 37 | { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, |
| 38 | { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, | 38 | { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, |
| 39 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT }, | ||
| 39 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, | 40 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, |
| 40 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, | 41 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, |
| 41 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, | 42 | { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, |
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 537841ef44b9..75afb3b0e076 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c | |||
| @@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev; | |||
| 111 | /* Super-I/O Function prototypes */ | 111 | /* Super-I/O Function prototypes */ |
| 112 | static inline int superio_inb(int base, int reg); | 112 | static inline int superio_inb(int base, int reg); |
| 113 | static inline int superio_inw(int base, int reg); | 113 | static inline int superio_inw(int base, int reg); |
| 114 | static inline void superio_enter(int base); | 114 | static inline int superio_enter(int base); |
| 115 | static inline void superio_select(int base, int ld); | 115 | static inline void superio_select(int base, int ld); |
| 116 | static inline void superio_exit(int base); | 116 | static inline void superio_exit(int base); |
| 117 | 117 | ||
| @@ -861,11 +861,20 @@ static int superio_inw(int base, int reg) | |||
| 861 | return val; | 861 | return val; |
| 862 | } | 862 | } |
| 863 | 863 | ||
| 864 | static inline void superio_enter(int base) | 864 | static inline int superio_enter(int base) |
| 865 | { | 865 | { |
| 866 | /* Don't step on other drivers' I/O space by accident */ | ||
| 867 | if (!request_muxed_region(base, 2, DRVNAME)) { | ||
| 868 | printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", | ||
| 869 | base); | ||
| 870 | return -EBUSY; | ||
| 871 | } | ||
| 872 | |||
| 866 | /* according to the datasheet the key must be send twice! */ | 873 | /* according to the datasheet the key must be send twice! */ |
| 867 | outb(SIO_UNLOCK_KEY, base); | 874 | outb(SIO_UNLOCK_KEY, base); |
| 868 | outb(SIO_UNLOCK_KEY, base); | 875 | outb(SIO_UNLOCK_KEY, base); |
| 876 | |||
| 877 | return 0; | ||
| 869 | } | 878 | } |
| 870 | 879 | ||
| 871 | static inline void superio_select(int base, int ld) | 880 | static inline void superio_select(int base, int ld) |
| @@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld) | |||
| 877 | static inline void superio_exit(int base) | 886 | static inline void superio_exit(int base) |
| 878 | { | 887 | { |
| 879 | outb(SIO_LOCK_KEY, base); | 888 | outb(SIO_LOCK_KEY, base); |
| 889 | release_region(base, 2); | ||
| 880 | } | 890 | } |
| 881 | 891 | ||
| 882 | static inline int fan_from_reg(u16 reg) | 892 | static inline int fan_from_reg(u16 reg) |
| @@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev) | |||
| 2175 | static int __init f71882fg_find(int sioaddr, unsigned short *address, | 2185 | static int __init f71882fg_find(int sioaddr, unsigned short *address, |
| 2176 | struct f71882fg_sio_data *sio_data) | 2186 | struct f71882fg_sio_data *sio_data) |
| 2177 | { | 2187 | { |
| 2178 | int err = -ENODEV; | ||
| 2179 | u16 devid; | 2188 | u16 devid; |
| 2180 | 2189 | int err = superio_enter(sioaddr); | |
| 2181 | /* Don't step on other drivers' I/O space by accident */ | 2190 | if (err) |
| 2182 | if (!request_region(sioaddr, 2, DRVNAME)) { | 2191 | return err; |
| 2183 | printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", | ||
| 2184 | (int)sioaddr); | ||
| 2185 | return -EBUSY; | ||
| 2186 | } | ||
| 2187 | |||
| 2188 | superio_enter(sioaddr); | ||
| 2189 | 2192 | ||
| 2190 | devid = superio_inw(sioaddr, SIO_REG_MANID); | 2193 | devid = superio_inw(sioaddr, SIO_REG_MANID); |
| 2191 | if (devid != SIO_FINTEK_ID) { | 2194 | if (devid != SIO_FINTEK_ID) { |
| 2192 | pr_debug(DRVNAME ": Not a Fintek device\n"); | 2195 | pr_debug(DRVNAME ": Not a Fintek device\n"); |
| 2196 | err = -ENODEV; | ||
| 2193 | goto exit; | 2197 | goto exit; |
| 2194 | } | 2198 | } |
| 2195 | 2199 | ||
| @@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
| 2213 | default: | 2217 | default: |
| 2214 | printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", | 2218 | printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", |
| 2215 | (unsigned int)devid); | 2219 | (unsigned int)devid); |
| 2220 | err = -ENODEV; | ||
| 2216 | goto exit; | 2221 | goto exit; |
| 2217 | } | 2222 | } |
| 2218 | 2223 | ||
| @@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
| 2223 | 2228 | ||
| 2224 | if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { | 2229 | if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { |
| 2225 | printk(KERN_WARNING DRVNAME ": Device not activated\n"); | 2230 | printk(KERN_WARNING DRVNAME ": Device not activated\n"); |
| 2231 | err = -ENODEV; | ||
| 2226 | goto exit; | 2232 | goto exit; |
| 2227 | } | 2233 | } |
| 2228 | 2234 | ||
| 2229 | *address = superio_inw(sioaddr, SIO_REG_ADDR); | 2235 | *address = superio_inw(sioaddr, SIO_REG_ADDR); |
| 2230 | if (*address == 0) { | 2236 | if (*address == 0) { |
| 2231 | printk(KERN_WARNING DRVNAME ": Base address not set\n"); | 2237 | printk(KERN_WARNING DRVNAME ": Base address not set\n"); |
| 2238 | err = -ENODEV; | ||
| 2232 | goto exit; | 2239 | goto exit; |
| 2233 | } | 2240 | } |
| 2234 | *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ | 2241 | *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ |
| @@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
| 2239 | (int)superio_inb(sioaddr, SIO_REG_DEVREV)); | 2246 | (int)superio_inb(sioaddr, SIO_REG_DEVREV)); |
| 2240 | exit: | 2247 | exit: |
| 2241 | superio_exit(sioaddr); | 2248 | superio_exit(sioaddr); |
| 2242 | release_region(sioaddr, 2); | ||
| 2243 | return err; | 2249 | return err; |
| 2244 | } | 2250 | } |
| 2245 | 2251 | ||
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index f7bd2613cecc..f2de3be35df3 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c | |||
| @@ -677,6 +677,11 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev, | |||
| 677 | dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", | 677 | dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", |
| 678 | cpm->adap.name); | 678 | cpm->adap.name); |
| 679 | 679 | ||
| 680 | /* | ||
| 681 | * register OF I2C devices | ||
| 682 | */ | ||
| 683 | of_i2c_register_devices(&cpm->adap); | ||
| 684 | |||
| 680 | return 0; | 685 | return 0; |
| 681 | out_shut: | 686 | out_shut: |
| 682 | cpm_i2c_shutdown(cpm); | 687 | cpm_i2c_shutdown(cpm); |
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 2222c87876b9..5795c8398c7c 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c | |||
| @@ -331,21 +331,16 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) | |||
| 331 | INIT_COMPLETION(dev->cmd_complete); | 331 | INIT_COMPLETION(dev->cmd_complete); |
| 332 | dev->cmd_err = 0; | 332 | dev->cmd_err = 0; |
| 333 | 333 | ||
| 334 | /* Take I2C out of reset, configure it as master and set the | 334 | /* Take I2C out of reset and configure it as master */ |
| 335 | * start bit */ | 335 | flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST; |
| 336 | flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT; | ||
| 337 | 336 | ||
| 338 | /* if the slave address is ten bit address, enable XA bit */ | 337 | /* if the slave address is ten bit address, enable XA bit */ |
| 339 | if (msg->flags & I2C_M_TEN) | 338 | if (msg->flags & I2C_M_TEN) |
| 340 | flag |= DAVINCI_I2C_MDR_XA; | 339 | flag |= DAVINCI_I2C_MDR_XA; |
| 341 | if (!(msg->flags & I2C_M_RD)) | 340 | if (!(msg->flags & I2C_M_RD)) |
| 342 | flag |= DAVINCI_I2C_MDR_TRX; | 341 | flag |= DAVINCI_I2C_MDR_TRX; |
| 343 | if (stop) | 342 | if (msg->len == 0) |
| 344 | flag |= DAVINCI_I2C_MDR_STP; | ||
| 345 | if (msg->len == 0) { | ||
| 346 | flag |= DAVINCI_I2C_MDR_RM; | 343 | flag |= DAVINCI_I2C_MDR_RM; |
| 347 | flag &= ~DAVINCI_I2C_MDR_STP; | ||
| 348 | } | ||
| 349 | 344 | ||
| 350 | /* Enable receive or transmit interrupts */ | 345 | /* Enable receive or transmit interrupts */ |
| 351 | w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); | 346 | w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); |
| @@ -357,7 +352,11 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) | |||
| 357 | 352 | ||
| 358 | dev->terminate = 0; | 353 | dev->terminate = 0; |
| 359 | 354 | ||
| 360 | /* write the data into mode register */ | 355 | /* |
| 356 | * Write mode register first as needed for correct behaviour | ||
| 357 | * on OMAP-L138, but don't set STT yet to avoid a race with XRDY | ||
| 358 | * occuring before we have loaded DXR | ||
| 359 | */ | ||
| 361 | davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); | 360 | davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); |
| 362 | 361 | ||
| 363 | /* | 362 | /* |
| @@ -365,12 +364,19 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) | |||
| 365 | * because transmit-data-ready interrupt can come before | 364 | * because transmit-data-ready interrupt can come before |
| 366 | * NACK-interrupt during sending of previous message and | 365 | * NACK-interrupt during sending of previous message and |
| 367 | * ICDXR may have wrong data | 366 | * ICDXR may have wrong data |
| 367 | * It also saves us one interrupt, slightly faster | ||
| 368 | */ | 368 | */ |
| 369 | if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) { | 369 | if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) { |
| 370 | davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); | 370 | davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); |
| 371 | dev->buf_len--; | 371 | dev->buf_len--; |
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | /* Set STT to begin transmit now DXR is loaded */ | ||
| 375 | flag |= DAVINCI_I2C_MDR_STT; | ||
| 376 | if (stop && msg->len != 0) | ||
| 377 | flag |= DAVINCI_I2C_MDR_STP; | ||
| 378 | davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); | ||
| 379 | |||
| 374 | r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, | 380 | r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, |
| 375 | dev->adapter.timeout); | 381 | dev->adapter.timeout); |
| 376 | if (r == 0) { | 382 | if (r == 0) { |
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 43ca32fddde2..89eedf45d30e 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c | |||
| @@ -761,6 +761,9 @@ static int __devinit iic_probe(struct platform_device *ofdev, | |||
| 761 | dev_info(&ofdev->dev, "using %s mode\n", | 761 | dev_info(&ofdev->dev, "using %s mode\n", |
| 762 | dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); | 762 | dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); |
| 763 | 763 | ||
| 764 | /* Now register all the child nodes */ | ||
| 765 | of_i2c_register_devices(adap); | ||
| 766 | |||
| 764 | return 0; | 767 | return 0; |
| 765 | 768 | ||
| 766 | error_cleanup: | 769 | error_cleanup: |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index d1ff9408dc1f..4c2a62b75b5c 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -159,15 +159,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) | |||
| 159 | 159 | ||
| 160 | static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) | 160 | static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) |
| 161 | { | 161 | { |
| 162 | int result; | 162 | wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); |
| 163 | |||
| 164 | result = wait_event_interruptible_timeout(i2c_imx->queue, | ||
| 165 | i2c_imx->i2csr & I2SR_IIF, HZ / 10); | ||
| 166 | 163 | ||
| 167 | if (unlikely(result < 0)) { | 164 | if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { |
| 168 | dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__); | ||
| 169 | return result; | ||
| 170 | } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { | ||
| 171 | dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); | 165 | dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); |
| 172 | return -ETIMEDOUT; | 166 | return -ETIMEDOUT; |
| 173 | } | 167 | } |
| @@ -295,7 +289,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) | |||
| 295 | i2c_imx->i2csr = temp; | 289 | i2c_imx->i2csr = temp; |
| 296 | temp &= ~I2SR_IIF; | 290 | temp &= ~I2SR_IIF; |
| 297 | writeb(temp, i2c_imx->base + IMX_I2C_I2SR); | 291 | writeb(temp, i2c_imx->base + IMX_I2C_I2SR); |
| 298 | wake_up_interruptible(&i2c_imx->queue); | 292 | wake_up(&i2c_imx->queue); |
| 299 | return IRQ_HANDLED; | 293 | return IRQ_HANDLED; |
| 300 | } | 294 | } |
| 301 | 295 | ||
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index a1c419a716af..b74e6dc6886c 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c | |||
| @@ -632,6 +632,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op, | |||
| 632 | dev_err(i2c->dev, "failed to add adapter\n"); | 632 | dev_err(i2c->dev, "failed to add adapter\n"); |
| 633 | goto fail_add; | 633 | goto fail_add; |
| 634 | } | 634 | } |
| 635 | of_i2c_register_devices(&i2c->adap); | ||
| 635 | 636 | ||
| 636 | return result; | 637 | return result; |
| 637 | 638 | ||
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index 0e9f85d0a835..56dbe54e8811 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c | |||
| @@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c) | |||
| 218 | return result; | 218 | return result; |
| 219 | } else if (result == 0) { | 219 | } else if (result == 0) { |
| 220 | dev_dbg(i2c->dev, "%s: timeout\n", __func__); | 220 | dev_dbg(i2c->dev, "%s: timeout\n", __func__); |
| 221 | result = -ETIMEDOUT; | 221 | return -ETIMEDOUT; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | return 0; | 224 | return 0; |
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index bbd77603a417..29933f87d8fa 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c | |||
| @@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg) | |||
| 71 | 71 | ||
| 72 | static int pca_isa_waitforcompletion(void *pd) | 72 | static int pca_isa_waitforcompletion(void *pd) |
| 73 | { | 73 | { |
| 74 | long ret = ~0; | ||
| 75 | unsigned long timeout; | 74 | unsigned long timeout; |
| 75 | long ret; | ||
| 76 | 76 | ||
| 77 | if (irq > -1) { | 77 | if (irq > -1) { |
| 78 | ret = wait_event_timeout(pca_wait, | 78 | ret = wait_event_timeout(pca_wait, |
| @@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd) | |||
| 81 | } else { | 81 | } else { |
| 82 | /* Do polling */ | 82 | /* Do polling */ |
| 83 | timeout = jiffies + pca_isa_ops.timeout; | 83 | timeout = jiffies + pca_isa_ops.timeout; |
| 84 | while (((pca_isa_readbyte(pd, I2C_PCA_CON) | 84 | do { |
| 85 | & I2C_PCA_CON_SI) == 0) | 85 | ret = time_before(jiffies, timeout); |
| 86 | && (ret = time_before(jiffies, timeout))) | 86 | if (pca_isa_readbyte(pd, I2C_PCA_CON) |
| 87 | & I2C_PCA_CON_SI) | ||
| 88 | break; | ||
| 87 | udelay(100); | 89 | udelay(100); |
| 90 | } while (ret); | ||
| 88 | } | 91 | } |
| 92 | |||
| 89 | return ret > 0; | 93 | return ret > 0; |
| 90 | } | 94 | } |
| 91 | 95 | ||
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index ef5c78487eb7..5f6d7f89e225 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c | |||
| @@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) | |||
| 80 | static int i2c_pca_pf_waitforcompletion(void *pd) | 80 | static int i2c_pca_pf_waitforcompletion(void *pd) |
| 81 | { | 81 | { |
| 82 | struct i2c_pca_pf_data *i2c = pd; | 82 | struct i2c_pca_pf_data *i2c = pd; |
| 83 | long ret = ~0; | ||
| 84 | unsigned long timeout; | 83 | unsigned long timeout; |
| 84 | long ret; | ||
| 85 | 85 | ||
| 86 | if (i2c->irq) { | 86 | if (i2c->irq) { |
| 87 | ret = wait_event_timeout(i2c->wait, | 87 | ret = wait_event_timeout(i2c->wait, |
| @@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd) | |||
| 90 | } else { | 90 | } else { |
| 91 | /* Do polling */ | 91 | /* Do polling */ |
| 92 | timeout = jiffies + i2c->adap.timeout; | 92 | timeout = jiffies + i2c->adap.timeout; |
| 93 | while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) | 93 | do { |
| 94 | & I2C_PCA_CON_SI) == 0) | 94 | ret = time_before(jiffies, timeout); |
| 95 | && (ret = time_before(jiffies, timeout))) | 95 | if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON) |
| 96 | & I2C_PCA_CON_SI) | ||
| 97 | break; | ||
| 96 | udelay(100); | 98 | udelay(100); |
| 99 | } while (ret); | ||
| 97 | } | 100 | } |
| 98 | 101 | ||
| 99 | return ret > 0; | 102 | return ret > 0; |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 72902e0bbfa7..bf831bf81587 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
| @@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got) | |||
| 662 | unsigned long sda_delay; | 662 | unsigned long sda_delay; |
| 663 | 663 | ||
| 664 | if (pdata->sda_delay) { | 664 | if (pdata->sda_delay) { |
| 665 | sda_delay = (freq / 1000) * pdata->sda_delay; | 665 | sda_delay = clkin * pdata->sda_delay; |
| 666 | sda_delay /= 1000000; | 666 | sda_delay = DIV_ROUND_UP(sda_delay, 1000000); |
| 667 | sda_delay = DIV_ROUND_UP(sda_delay, 5); | 667 | sda_delay = DIV_ROUND_UP(sda_delay, 5); |
| 668 | if (sda_delay > 3) | 668 | if (sda_delay > 3) |
| 669 | sda_delay = 3; | 669 | sda_delay = 3; |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 6649176de940..bea4c5021d26 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| 33 | #include <linux/idr.h> | 33 | #include <linux/idr.h> |
| 34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
| 35 | #include <linux/of_i2c.h> | ||
| 36 | #include <linux/of_device.h> | 35 | #include <linux/of_device.h> |
| 37 | #include <linux/completion.h> | 36 | #include <linux/completion.h> |
| 38 | #include <linux/hardirq.h> | 37 | #include <linux/hardirq.h> |
| @@ -197,11 +196,12 @@ static int i2c_device_pm_suspend(struct device *dev) | |||
| 197 | { | 196 | { |
| 198 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 197 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 199 | 198 | ||
| 200 | if (pm_runtime_suspended(dev)) | 199 | if (pm) { |
| 201 | return 0; | 200 | if (pm_runtime_suspended(dev)) |
| 202 | 201 | return 0; | |
| 203 | if (pm) | 202 | else |
| 204 | return pm->suspend ? pm->suspend(dev) : 0; | 203 | return pm->suspend ? pm->suspend(dev) : 0; |
| 204 | } | ||
| 205 | 205 | ||
| 206 | return i2c_legacy_suspend(dev, PMSG_SUSPEND); | 206 | return i2c_legacy_suspend(dev, PMSG_SUSPEND); |
| 207 | } | 207 | } |
| @@ -216,12 +216,6 @@ static int i2c_device_pm_resume(struct device *dev) | |||
| 216 | else | 216 | else |
| 217 | ret = i2c_legacy_resume(dev); | 217 | ret = i2c_legacy_resume(dev); |
| 218 | 218 | ||
| 219 | if (!ret) { | ||
| 220 | pm_runtime_disable(dev); | ||
| 221 | pm_runtime_set_active(dev); | ||
| 222 | pm_runtime_enable(dev); | ||
| 223 | } | ||
| 224 | |||
| 225 | return ret; | 219 | return ret; |
| 226 | } | 220 | } |
| 227 | 221 | ||
| @@ -229,11 +223,12 @@ static int i2c_device_pm_freeze(struct device *dev) | |||
| 229 | { | 223 | { |
| 230 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 224 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 231 | 225 | ||
| 232 | if (pm_runtime_suspended(dev)) | 226 | if (pm) { |
| 233 | return 0; | 227 | if (pm_runtime_suspended(dev)) |
| 234 | 228 | return 0; | |
| 235 | if (pm) | 229 | else |
| 236 | return pm->freeze ? pm->freeze(dev) : 0; | 230 | return pm->freeze ? pm->freeze(dev) : 0; |
| 231 | } | ||
| 237 | 232 | ||
| 238 | return i2c_legacy_suspend(dev, PMSG_FREEZE); | 233 | return i2c_legacy_suspend(dev, PMSG_FREEZE); |
| 239 | } | 234 | } |
| @@ -242,11 +237,12 @@ static int i2c_device_pm_thaw(struct device *dev) | |||
| 242 | { | 237 | { |
| 243 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 238 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 244 | 239 | ||
| 245 | if (pm_runtime_suspended(dev)) | 240 | if (pm) { |
| 246 | return 0; | 241 | if (pm_runtime_suspended(dev)) |
| 247 | 242 | return 0; | |
| 248 | if (pm) | 243 | else |
| 249 | return pm->thaw ? pm->thaw(dev) : 0; | 244 | return pm->thaw ? pm->thaw(dev) : 0; |
| 245 | } | ||
| 250 | 246 | ||
| 251 | return i2c_legacy_resume(dev); | 247 | return i2c_legacy_resume(dev); |
| 252 | } | 248 | } |
| @@ -255,11 +251,12 @@ static int i2c_device_pm_poweroff(struct device *dev) | |||
| 255 | { | 251 | { |
| 256 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 252 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 257 | 253 | ||
| 258 | if (pm_runtime_suspended(dev)) | 254 | if (pm) { |
| 259 | return 0; | 255 | if (pm_runtime_suspended(dev)) |
| 260 | 256 | return 0; | |
| 261 | if (pm) | 257 | else |
| 262 | return pm->poweroff ? pm->poweroff(dev) : 0; | 258 | return pm->poweroff ? pm->poweroff(dev) : 0; |
| 259 | } | ||
| 263 | 260 | ||
| 264 | return i2c_legacy_suspend(dev, PMSG_HIBERNATE); | 261 | return i2c_legacy_suspend(dev, PMSG_HIBERNATE); |
| 265 | } | 262 | } |
| @@ -876,9 +873,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap) | |||
| 876 | if (adap->nr < __i2c_first_dynamic_bus_num) | 873 | if (adap->nr < __i2c_first_dynamic_bus_num) |
| 877 | i2c_scan_static_board_info(adap); | 874 | i2c_scan_static_board_info(adap); |
| 878 | 875 | ||
| 879 | /* Register devices from the device tree */ | ||
| 880 | of_i2c_register_devices(adap); | ||
| 881 | |||
| 882 | /* Notify drivers */ | 876 | /* Notify drivers */ |
| 883 | mutex_lock(&core_lock); | 877 | mutex_lock(&core_lock); |
| 884 | bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); | 878 | bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index a10152bb1427..c37ef64d1465 100755..100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -83,7 +83,7 @@ static unsigned int mwait_substates; | |||
| 83 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ | 83 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ |
| 84 | static unsigned int lapic_timer_reliable_states; | 84 | static unsigned int lapic_timer_reliable_states; |
| 85 | 85 | ||
| 86 | static struct cpuidle_device *intel_idle_cpuidle_devices; | 86 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
| 87 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | 87 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); |
| 88 | 88 | ||
| 89 | static struct cpuidle_state *cpuidle_state_table; | 89 | static struct cpuidle_state *cpuidle_state_table; |
| @@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 108 | .name = "NHM-C3", | 108 | .name = "NHM-C3", |
| 109 | .desc = "MWAIT 0x10", | 109 | .desc = "MWAIT 0x10", |
| 110 | .driver_data = (void *) 0x10, | 110 | .driver_data = (void *) 0x10, |
| 111 | .flags = CPUIDLE_FLAG_TIME_VALID, | 111 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 112 | .exit_latency = 20, | 112 | .exit_latency = 20, |
| 113 | .power_usage = 500, | 113 | .power_usage = 500, |
| 114 | .target_residency = 80, | 114 | .target_residency = 80, |
| @@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 117 | .name = "NHM-C6", | 117 | .name = "NHM-C6", |
| 118 | .desc = "MWAIT 0x20", | 118 | .desc = "MWAIT 0x20", |
| 119 | .driver_data = (void *) 0x20, | 119 | .driver_data = (void *) 0x20, |
| 120 | .flags = CPUIDLE_FLAG_TIME_VALID, | 120 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 121 | .exit_latency = 200, | 121 | .exit_latency = 200, |
| 122 | .power_usage = 350, | 122 | .power_usage = 350, |
| 123 | .target_residency = 800, | 123 | .target_residency = 800, |
| @@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 149 | .name = "ATM-C4", | 149 | .name = "ATM-C4", |
| 150 | .desc = "MWAIT 0x30", | 150 | .desc = "MWAIT 0x30", |
| 151 | .driver_data = (void *) 0x30, | 151 | .driver_data = (void *) 0x30, |
| 152 | .flags = CPUIDLE_FLAG_TIME_VALID, | 152 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 153 | .exit_latency = 100, | 153 | .exit_latency = 100, |
| 154 | .power_usage = 250, | 154 | .power_usage = 250, |
| 155 | .target_residency = 400, | 155 | .target_residency = 400, |
| @@ -157,13 +157,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 157 | { /* MWAIT C5 */ }, | 157 | { /* MWAIT C5 */ }, |
| 158 | { /* MWAIT C6 */ | 158 | { /* MWAIT C6 */ |
| 159 | .name = "ATM-C6", | 159 | .name = "ATM-C6", |
| 160 | .desc = "MWAIT 0x40", | 160 | .desc = "MWAIT 0x52", |
| 161 | .driver_data = (void *) 0x40, | 161 | .driver_data = (void *) 0x52, |
| 162 | .flags = CPUIDLE_FLAG_TIME_VALID, | 162 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 163 | .exit_latency = 200, | 163 | .exit_latency = 140, |
| 164 | .power_usage = 150, | 164 | .power_usage = 150, |
| 165 | .target_residency = 800, | 165 | .target_residency = 560, |
| 166 | .enter = NULL }, /* disabled */ | 166 | .enter = &intel_idle }, |
| 167 | }; | 167 | }; |
| 168 | 168 | ||
| 169 | /** | 169 | /** |
| @@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | |||
| 185 | 185 | ||
| 186 | local_irq_disable(); | 186 | local_irq_disable(); |
| 187 | 187 | ||
| 188 | /* | ||
| 189 | * If the state flag indicates that the TLB will be flushed or if this | ||
| 190 | * is the deepest c-state supported, do a voluntary leave mm to avoid | ||
| 191 | * costly and mostly unnecessary wakeups for flushing the user TLB's | ||
| 192 | * associated with the active mm. | ||
| 193 | */ | ||
| 194 | if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED || | ||
| 195 | (&dev->states[dev->state_count - 1] == state)) | ||
| 196 | leave_mm(cpu); | ||
| 197 | |||
| 188 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 198 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
| 189 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 199 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
| 190 | 200 | ||
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index c908c5f83645..9ddafc30f432 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
| @@ -669,6 +669,9 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, | |||
| 669 | 669 | ||
| 670 | if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { | 670 | if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { |
| 671 | 671 | ||
| 672 | if (!dev->absinfo) | ||
| 673 | return -EINVAL; | ||
| 674 | |||
| 672 | t = _IOC_NR(cmd) & ABS_MAX; | 675 | t = _IOC_NR(cmd) & ABS_MAX; |
| 673 | abs = dev->absinfo[t]; | 676 | abs = dev->absinfo[t]; |
| 674 | 677 | ||
| @@ -680,10 +683,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, | |||
| 680 | } | 683 | } |
| 681 | } | 684 | } |
| 682 | 685 | ||
| 683 | if (_IOC_DIR(cmd) == _IOC_READ) { | 686 | if (_IOC_DIR(cmd) == _IOC_WRITE) { |
| 684 | 687 | ||
| 685 | if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { | 688 | if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { |
| 686 | 689 | ||
| 690 | if (!dev->absinfo) | ||
| 691 | return -EINVAL; | ||
| 692 | |||
| 687 | t = _IOC_NR(cmd) & ABS_MAX; | 693 | t = _IOC_NR(cmd) & ABS_MAX; |
| 688 | 694 | ||
| 689 | if (copy_from_user(&abs, p, min_t(size_t, | 695 | if (copy_from_user(&abs, p, min_t(size_t, |
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index d85bd8a7967d..22239e988498 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c | |||
| @@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, | |||
| 483 | 483 | ||
| 484 | memcpy(joydev->abspam, abspam, len); | 484 | memcpy(joydev->abspam, abspam, len); |
| 485 | 485 | ||
| 486 | for (i = 0; i < joydev->nabs; i++) | ||
| 487 | joydev->absmap[joydev->abspam[i]] = i; | ||
| 488 | |||
| 486 | out: | 489 | out: |
| 487 | kfree(abspam); | 490 | kfree(abspam); |
| 488 | return retval; | 491 | return retval; |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 0d4266a533a5..360698553eb5 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
| @@ -404,6 +404,13 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu | |||
| 404 | retval = uinput_validate_absbits(dev); | 404 | retval = uinput_validate_absbits(dev); |
| 405 | if (retval < 0) | 405 | if (retval < 0) |
| 406 | goto exit; | 406 | goto exit; |
| 407 | if (test_bit(ABS_MT_SLOT, dev->absbit)) { | ||
| 408 | int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; | ||
| 409 | input_mt_create_slots(dev, nslot); | ||
| 410 | input_set_events_per_packet(dev, 6 * nslot); | ||
| 411 | } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { | ||
| 412 | input_set_events_per_packet(dev, 60); | ||
| 413 | } | ||
| 407 | } | 414 | } |
| 408 | 415 | ||
| 409 | udev->state = UIST_SETUP_COMPLETE; | 416 | udev->state = UIST_SETUP_COMPLETE; |
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 42ba3691d908..b35876ee6908 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
| @@ -103,27 +103,26 @@ static void wacom_sys_irq(struct urb *urb) | |||
| 103 | static int wacom_open(struct input_dev *dev) | 103 | static int wacom_open(struct input_dev *dev) |
| 104 | { | 104 | { |
| 105 | struct wacom *wacom = input_get_drvdata(dev); | 105 | struct wacom *wacom = input_get_drvdata(dev); |
| 106 | int retval = 0; | ||
| 106 | 107 | ||
| 107 | mutex_lock(&wacom->lock); | 108 | if (usb_autopm_get_interface(wacom->intf) < 0) |
| 108 | |||
| 109 | wacom->irq->dev = wacom->usbdev; | ||
| 110 | |||
| 111 | if (usb_autopm_get_interface(wacom->intf) < 0) { | ||
| 112 | mutex_unlock(&wacom->lock); | ||
| 113 | return -EIO; | 109 | return -EIO; |
| 114 | } | 110 | |
| 111 | mutex_lock(&wacom->lock); | ||
| 115 | 112 | ||
| 116 | if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { | 113 | if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { |
| 117 | usb_autopm_put_interface(wacom->intf); | 114 | retval = -EIO; |
| 118 | mutex_unlock(&wacom->lock); | 115 | goto out; |
| 119 | return -EIO; | ||
| 120 | } | 116 | } |
| 121 | 117 | ||
| 122 | wacom->open = true; | 118 | wacom->open = true; |
| 123 | wacom->intf->needs_remote_wakeup = 1; | 119 | wacom->intf->needs_remote_wakeup = 1; |
| 124 | 120 | ||
| 121 | out: | ||
| 125 | mutex_unlock(&wacom->lock); | 122 | mutex_unlock(&wacom->lock); |
| 126 | return 0; | 123 | if (retval) |
| 124 | usb_autopm_put_interface(wacom->intf); | ||
| 125 | return retval; | ||
| 127 | } | 126 | } |
| 128 | 127 | ||
| 129 | static void wacom_close(struct input_dev *dev) | 128 | static void wacom_close(struct input_dev *dev) |
| @@ -135,6 +134,8 @@ static void wacom_close(struct input_dev *dev) | |||
| 135 | wacom->open = false; | 134 | wacom->open = false; |
| 136 | wacom->intf->needs_remote_wakeup = 0; | 135 | wacom->intf->needs_remote_wakeup = 0; |
| 137 | mutex_unlock(&wacom->lock); | 136 | mutex_unlock(&wacom->lock); |
| 137 | |||
| 138 | usb_autopm_put_interface(wacom->intf); | ||
| 138 | } | 139 | } |
| 139 | 140 | ||
| 140 | static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc, | 141 | static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc, |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 6e29badb969e..47fd7a041c52 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
| @@ -442,8 +442,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom) | |||
| 442 | /* general pen packet */ | 442 | /* general pen packet */ |
| 443 | if ((data[1] & 0xb8) == 0xa0) { | 443 | if ((data[1] & 0xb8) == 0xa0) { |
| 444 | t = (data[6] << 2) | ((data[7] >> 6) & 3); | 444 | t = (data[6] << 2) | ((data[7] >> 6) & 3); |
| 445 | if (features->type >= INTUOS4S && features->type <= INTUOS4L) | 445 | if ((features->type >= INTUOS4S && features->type <= INTUOS4L) || |
| 446 | features->type == WACOM_21UX2) { | ||
| 446 | t = (t << 1) | (data[1] & 1); | 447 | t = (t << 1) | (data[1] & 1); |
| 448 | } | ||
| 447 | input_report_abs(input, ABS_PRESSURE, t); | 449 | input_report_abs(input, ABS_PRESSURE, t); |
| 448 | input_report_abs(input, ABS_TILT_X, | 450 | input_report_abs(input, ABS_TILT_X, |
| 449 | ((data[7] << 1) & 0x7e) | (data[8] >> 7)); | 451 | ((data[7] << 1) & 0x7e) | (data[8] >> 7)); |
diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c index 485be8b1e1b3..f0225bc0f267 100644 --- a/drivers/isdn/sc/interrupt.c +++ b/drivers/isdn/sc/interrupt.c | |||
| @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) | |||
| 112 | } | 112 | } |
| 113 | else if(callid>=0x0000 && callid<=0x7FFF) | 113 | else if(callid>=0x0000 && callid<=0x7FFF) |
| 114 | { | 114 | { |
| 115 | int len; | ||
| 116 | |||
| 115 | pr_debug("%s: Got Incoming Call\n", | 117 | pr_debug("%s: Got Incoming Call\n", |
| 116 | sc_adapter[card]->devicename); | 118 | sc_adapter[card]->devicename); |
| 117 | strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); | 119 | len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]), |
| 118 | strcpy(setup.eazmsn, | 120 | sizeof(setup.phone)); |
| 119 | sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); | 121 | if (len >= sizeof(setup.phone)) |
| 122 | continue; | ||
| 123 | len = strlcpy(setup.eazmsn, | ||
| 124 | sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, | ||
| 125 | sizeof(setup.eazmsn)); | ||
| 126 | if (len >= sizeof(setup.eazmsn)) | ||
| 127 | continue; | ||
| 120 | setup.si1 = 7; | 128 | setup.si1 = 7; |
| 121 | setup.si2 = 0; | 129 | setup.si2 = 0; |
| 122 | setup.plan = 0; | 130 | setup.plan = 0; |
| @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) | |||
| 176 | * Handle a GetMyNumber Rsp | 184 | * Handle a GetMyNumber Rsp |
| 177 | */ | 185 | */ |
| 178 | if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ | 186 | if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ |
| 179 | strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); | 187 | strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, |
| 188 | rcvmsg.msg_data.byte_array, | ||
| 189 | sizeof(rcvmsg.msg_data.byte_array)); | ||
| 180 | continue; | 190 | continue; |
| 181 | } | 191 | } |
| 182 | 192 | ||
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ed4900ade93a..e4fb58db5454 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
| @@ -1000,10 +1000,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
| 1000 | page = bitmap->sb_page; | 1000 | page = bitmap->sb_page; |
| 1001 | offset = sizeof(bitmap_super_t); | 1001 | offset = sizeof(bitmap_super_t); |
| 1002 | if (!file) | 1002 | if (!file) |
| 1003 | read_sb_page(bitmap->mddev, | 1003 | page = read_sb_page( |
| 1004 | bitmap->mddev->bitmap_info.offset, | 1004 | bitmap->mddev, |
| 1005 | page, | 1005 | bitmap->mddev->bitmap_info.offset, |
| 1006 | index, count); | 1006 | page, |
| 1007 | index, count); | ||
| 1007 | } else if (file) { | 1008 | } else if (file) { |
| 1008 | page = read_page(file, index, bitmap, count); | 1009 | page = read_page(file, index, bitmap, count); |
| 1009 | offset = 0; | 1010 | offset = 0; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ad83a4dcadc3..0b830bbe1d8b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -1839,7 +1839,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1839 | 1839 | ||
| 1840 | /* take from bio_init */ | 1840 | /* take from bio_init */ |
| 1841 | bio->bi_next = NULL; | 1841 | bio->bi_next = NULL; |
| 1842 | bio->bi_flags &= ~(BIO_POOL_MASK-1); | ||
| 1842 | bio->bi_flags |= 1 << BIO_UPTODATE; | 1843 | bio->bi_flags |= 1 << BIO_UPTODATE; |
| 1844 | bio->bi_comp_cpu = -1; | ||
| 1843 | bio->bi_rw = READ; | 1845 | bio->bi_rw = READ; |
| 1844 | bio->bi_vcnt = 0; | 1846 | bio->bi_vcnt = 0; |
| 1845 | bio->bi_idx = 0; | 1847 | bio->bi_idx = 0; |
| @@ -1912,7 +1914,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1912 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) | 1914 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) |
| 1913 | break; | 1915 | break; |
| 1914 | BUG_ON(sync_blocks < (PAGE_SIZE>>9)); | 1916 | BUG_ON(sync_blocks < (PAGE_SIZE>>9)); |
| 1915 | if (len > (sync_blocks<<9)) | 1917 | if ((len >> 9) > sync_blocks) |
| 1916 | len = sync_blocks<<9; | 1918 | len = sync_blocks<<9; |
| 1917 | } | 1919 | } |
| 1918 | 1920 | ||
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index 7e82a9df726b..7961d59f5cac 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c | |||
| @@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie) | |||
| 319 | * a keyup event might follow immediately after the keydown. | 319 | * a keyup event might follow immediately after the keydown. |
| 320 | */ | 320 | */ |
| 321 | spin_lock_irqsave(&ir->keylock, flags); | 321 | spin_lock_irqsave(&ir->keylock, flags); |
| 322 | if (time_is_after_eq_jiffies(ir->keyup_jiffies)) | 322 | if (time_is_before_eq_jiffies(ir->keyup_jiffies)) |
| 323 | ir_keyup(ir); | 323 | ir_keyup(ir); |
| 324 | spin_unlock_irqrestore(&ir->keylock, flags); | 324 | spin_unlock_irqrestore(&ir->keylock, flags); |
| 325 | } | 325 | } |
| @@ -510,6 +510,13 @@ int __ir_input_register(struct input_dev *input_dev, | |||
| 510 | (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ? | 510 | (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ? |
| 511 | " in raw mode" : ""); | 511 | " in raw mode" : ""); |
| 512 | 512 | ||
| 513 | /* | ||
| 514 | * Default delay of 250ms is too short for some protocols, expecially | ||
| 515 | * since the timeout is currently set to 250ms. Increase it to 500ms, | ||
| 516 | * to avoid wrong repetition of the keycodes. | ||
| 517 | */ | ||
| 518 | input_dev->rep[REP_DELAY] = 500; | ||
| 519 | |||
| 513 | return 0; | 520 | return 0; |
| 514 | 521 | ||
| 515 | out_event: | 522 | out_event: |
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c index 77b5946413c0..e63f757d5d72 100644 --- a/drivers/media/IR/ir-lirc-codec.c +++ b/drivers/media/IR/ir-lirc-codec.c | |||
| @@ -267,7 +267,7 @@ static int ir_lirc_register(struct input_dev *input_dev) | |||
| 267 | features |= LIRC_CAN_SET_SEND_CARRIER; | 267 | features |= LIRC_CAN_SET_SEND_CARRIER; |
| 268 | 268 | ||
| 269 | if (ir_dev->props->s_tx_duty_cycle) | 269 | if (ir_dev->props->s_tx_duty_cycle) |
| 270 | features |= LIRC_CAN_SET_REC_DUTY_CYCLE; | 270 | features |= LIRC_CAN_SET_SEND_DUTY_CYCLE; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | if (ir_dev->props->s_rx_carrier_range) | 273 | if (ir_dev->props->s_rx_carrier_range) |
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c index 43094e7eccfa..8e0e1b1f8c87 100644 --- a/drivers/media/IR/ir-raw-event.c +++ b/drivers/media/IR/ir-raw-event.c | |||
| @@ -279,9 +279,11 @@ int ir_raw_event_register(struct input_dev *input_dev) | |||
| 279 | "rc%u", (unsigned int)ir->devno); | 279 | "rc%u", (unsigned int)ir->devno); |
| 280 | 280 | ||
| 281 | if (IS_ERR(ir->raw->thread)) { | 281 | if (IS_ERR(ir->raw->thread)) { |
| 282 | int ret = PTR_ERR(ir->raw->thread); | ||
| 283 | |||
| 282 | kfree(ir->raw); | 284 | kfree(ir->raw); |
| 283 | ir->raw = NULL; | 285 | ir->raw = NULL; |
| 284 | return PTR_ERR(ir->raw->thread); | 286 | return ret; |
| 285 | } | 287 | } |
| 286 | 288 | ||
| 287 | mutex_lock(&ir_raw_handler_lock); | 289 | mutex_lock(&ir_raw_handler_lock); |
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c index 96dafc425c8e..46d42467f9b4 100644 --- a/drivers/media/IR/ir-sysfs.c +++ b/drivers/media/IR/ir-sysfs.c | |||
| @@ -67,13 +67,14 @@ static ssize_t show_protocols(struct device *d, | |||
| 67 | char *tmp = buf; | 67 | char *tmp = buf; |
| 68 | int i; | 68 | int i; |
| 69 | 69 | ||
| 70 | if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { | 70 | if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { |
| 71 | enabled = ir_dev->rc_tab.ir_type; | 71 | enabled = ir_dev->rc_tab.ir_type; |
| 72 | allowed = ir_dev->props->allowed_protos; | 72 | allowed = ir_dev->props->allowed_protos; |
| 73 | } else { | 73 | } else if (ir_dev->raw) { |
| 74 | enabled = ir_dev->raw->enabled_protocols; | 74 | enabled = ir_dev->raw->enabled_protocols; |
| 75 | allowed = ir_raw_get_allowed_protocols(); | 75 | allowed = ir_raw_get_allowed_protocols(); |
| 76 | } | 76 | } else |
| 77 | return sprintf(tmp, "[builtin]\n"); | ||
| 77 | 78 | ||
| 78 | IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", | 79 | IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", |
| 79 | (long long)allowed, | 80 | (long long)allowed, |
| @@ -121,10 +122,14 @@ static ssize_t store_protocols(struct device *d, | |||
| 121 | int rc, i, count = 0; | 122 | int rc, i, count = 0; |
| 122 | unsigned long flags; | 123 | unsigned long flags; |
| 123 | 124 | ||
| 124 | if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) | 125 | if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) |
| 125 | type = ir_dev->rc_tab.ir_type; | 126 | type = ir_dev->rc_tab.ir_type; |
| 126 | else | 127 | else if (ir_dev->raw) |
| 127 | type = ir_dev->raw->enabled_protocols; | 128 | type = ir_dev->raw->enabled_protocols; |
| 129 | else { | ||
| 130 | IR_dprintk(1, "Protocol switching not supported\n"); | ||
| 131 | return -EINVAL; | ||
| 132 | } | ||
| 128 | 133 | ||
| 129 | while ((tmp = strsep((char **) &data, " \n")) != NULL) { | 134 | while ((tmp = strsep((char **) &data, " \n")) != NULL) { |
| 130 | if (!*tmp) | 135 | if (!*tmp) |
| @@ -185,7 +190,7 @@ static ssize_t store_protocols(struct device *d, | |||
| 185 | } | 190 | } |
| 186 | } | 191 | } |
| 187 | 192 | ||
| 188 | if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { | 193 | if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { |
| 189 | spin_lock_irqsave(&ir_dev->rc_tab.lock, flags); | 194 | spin_lock_irqsave(&ir_dev->rc_tab.lock, flags); |
| 190 | ir_dev->rc_tab.ir_type = type; | 195 | ir_dev->rc_tab.ir_type = type; |
| 191 | spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags); | 196 | spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags); |
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c index 64264f7f838f..39557ad401b6 100644 --- a/drivers/media/IR/keymaps/rc-rc6-mce.c +++ b/drivers/media/IR/keymaps/rc-rc6-mce.c | |||
| @@ -19,6 +19,7 @@ static struct ir_scancode rc6_mce[] = { | |||
| 19 | 19 | ||
| 20 | { 0x800f0416, KEY_PLAY }, | 20 | { 0x800f0416, KEY_PLAY }, |
| 21 | { 0x800f0418, KEY_PAUSE }, | 21 | { 0x800f0418, KEY_PAUSE }, |
| 22 | { 0x800f046e, KEY_PLAYPAUSE }, | ||
| 22 | { 0x800f0419, KEY_STOP }, | 23 | { 0x800f0419, KEY_STOP }, |
| 23 | { 0x800f0417, KEY_RECORD }, | 24 | { 0x800f0417, KEY_RECORD }, |
| 24 | 25 | ||
| @@ -37,6 +38,8 @@ static struct ir_scancode rc6_mce[] = { | |||
| 37 | { 0x800f0411, KEY_VOLUMEDOWN }, | 38 | { 0x800f0411, KEY_VOLUMEDOWN }, |
| 38 | { 0x800f0412, KEY_CHANNELUP }, | 39 | { 0x800f0412, KEY_CHANNELUP }, |
| 39 | { 0x800f0413, KEY_CHANNELDOWN }, | 40 | { 0x800f0413, KEY_CHANNELDOWN }, |
| 41 | { 0x800f043a, KEY_BRIGHTNESSUP }, | ||
| 42 | { 0x800f0480, KEY_BRIGHTNESSDOWN }, | ||
| 40 | 43 | ||
| 41 | { 0x800f0401, KEY_NUMERIC_1 }, | 44 | { 0x800f0401, KEY_NUMERIC_1 }, |
| 42 | { 0x800f0402, KEY_NUMERIC_2 }, | 45 | { 0x800f0402, KEY_NUMERIC_2 }, |
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c index ac6bb2c01a48..bc620e10ef77 100644 --- a/drivers/media/IR/mceusb.c +++ b/drivers/media/IR/mceusb.c | |||
| @@ -120,6 +120,10 @@ static struct usb_device_id mceusb_dev_table[] = { | |||
| 120 | { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, | 120 | { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, |
| 121 | /* Philips eHome Infrared Transceiver */ | 121 | /* Philips eHome Infrared Transceiver */ |
| 122 | { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, | 122 | { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, |
| 123 | /* Philips/Spinel plus IR transceiver for ASUS */ | ||
| 124 | { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, | ||
| 125 | /* Philips/Spinel plus IR transceiver for ASUS */ | ||
| 126 | { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, | ||
| 123 | /* Realtek MCE IR Receiver */ | 127 | /* Realtek MCE IR Receiver */ |
| 124 | { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, | 128 | { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, |
| 125 | /* SMK/Toshiba G83C0004D410 */ | 129 | /* SMK/Toshiba G83C0004D410 */ |
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c index fe818348b8a3..48397f103d32 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c | |||
| @@ -673,9 +673,6 @@ static int dib0700_probe(struct usb_interface *intf, | |||
| 673 | else | 673 | else |
| 674 | dev->props.rc.core.bulk_mode = false; | 674 | dev->props.rc.core.bulk_mode = false; |
| 675 | 675 | ||
| 676 | /* Need a higher delay, to avoid wrong repeat */ | ||
| 677 | dev->rc_input_dev->rep[REP_DELAY] = 500; | ||
| 678 | |||
| 679 | dib0700_rc_setup(dev); | 676 | dib0700_rc_setup(dev); |
| 680 | 677 | ||
| 681 | return 0; | 678 | return 0; |
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index f634d2e784b2..e06acd1fecb6 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c | |||
| @@ -940,6 +940,58 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) | |||
| 940 | return adap->fe == NULL ? -ENODEV : 0; | 940 | return adap->fe == NULL ? -ENODEV : 0; |
| 941 | } | 941 | } |
| 942 | 942 | ||
| 943 | /* STK7770P */ | ||
| 944 | static struct dib7000p_config dib7770p_dib7000p_config = { | ||
| 945 | .output_mpeg2_in_188_bytes = 1, | ||
| 946 | |||
| 947 | .agc_config_count = 1, | ||
| 948 | .agc = &dib7070_agc_config, | ||
| 949 | .bw = &dib7070_bw_config_12_mhz, | ||
| 950 | .tuner_is_baseband = 1, | ||
| 951 | .spur_protect = 1, | ||
| 952 | |||
| 953 | .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, | ||
| 954 | .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, | ||
| 955 | .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, | ||
| 956 | |||
| 957 | .hostbus_diversity = 1, | ||
| 958 | .enable_current_mirror = 1, | ||
| 959 | .disable_sample_and_hold = 0, | ||
| 960 | }; | ||
| 961 | |||
| 962 | static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) | ||
| 963 | { | ||
| 964 | struct usb_device_descriptor *p = &adap->dev->udev->descriptor; | ||
| 965 | if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && | ||
| 966 | p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) | ||
| 967 | dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); | ||
| 968 | else | ||
| 969 | dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); | ||
| 970 | msleep(10); | ||
| 971 | dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); | ||
| 972 | dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); | ||
| 973 | dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); | ||
| 974 | dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); | ||
| 975 | |||
| 976 | dib0700_ctrl_clock(adap->dev, 72, 1); | ||
| 977 | |||
| 978 | msleep(10); | ||
| 979 | dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); | ||
| 980 | msleep(10); | ||
| 981 | dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); | ||
| 982 | |||
| 983 | if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, | ||
| 984 | &dib7770p_dib7000p_config) != 0) { | ||
| 985 | err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", | ||
| 986 | __func__); | ||
| 987 | return -ENODEV; | ||
| 988 | } | ||
| 989 | |||
| 990 | adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, | ||
| 991 | &dib7770p_dib7000p_config); | ||
| 992 | return adap->fe == NULL ? -ENODEV : 0; | ||
| 993 | } | ||
| 994 | |||
| 943 | /* DIB807x generic */ | 995 | /* DIB807x generic */ |
| 944 | static struct dibx000_agc_config dib807x_agc_config[2] = { | 996 | static struct dibx000_agc_config dib807x_agc_config[2] = { |
| 945 | { | 997 | { |
| @@ -1781,7 +1833,7 @@ struct usb_device_id dib0700_usb_id_table[] = { | |||
| 1781 | /* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, | 1833 | /* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, |
| 1782 | { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, | 1834 | { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, |
| 1783 | { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, | 1835 | { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, |
| 1784 | { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) }, | 1836 | { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) }, |
| 1785 | { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, | 1837 | { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, |
| 1786 | /* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, | 1838 | /* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, |
| 1787 | { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, | 1839 | { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, |
| @@ -2406,7 +2458,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { | |||
| 2406 | .pid_filter_count = 32, | 2458 | .pid_filter_count = 32, |
| 2407 | .pid_filter = stk70x0p_pid_filter, | 2459 | .pid_filter = stk70x0p_pid_filter, |
| 2408 | .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, | 2460 | .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, |
| 2409 | .frontend_attach = stk7070p_frontend_attach, | 2461 | .frontend_attach = stk7770p_frontend_attach, |
| 2410 | .tuner_attach = dib7770p_tuner_attach, | 2462 | .tuner_attach = dib7770p_tuner_attach, |
| 2411 | 2463 | ||
| 2412 | DIB0700_DEFAULT_STREAMING_CONFIG(0x02), | 2464 | DIB0700_DEFAULT_STREAMING_CONFIG(0x02), |
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c index 6b22ec64ab0c..f896337b4535 100644 --- a/drivers/media/dvb/dvb-usb/opera1.c +++ b/drivers/media/dvb/dvb-usb/opera1.c | |||
| @@ -483,9 +483,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev, | |||
| 483 | } | 483 | } |
| 484 | } | 484 | } |
| 485 | kfree(p); | 485 | kfree(p); |
| 486 | if (fw) { | 486 | release_firmware(fw); |
| 487 | release_firmware(fw); | ||
| 488 | } | ||
| 489 | return ret; | 487 | return ret; |
| 490 | } | 488 | } |
| 491 | 489 | ||
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c index 2e28b973dfd3..3aed0d433921 100644 --- a/drivers/media/dvb/frontends/dib7000p.c +++ b/drivers/media/dvb/frontends/dib7000p.c | |||
| @@ -260,6 +260,9 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad | |||
| 260 | 260 | ||
| 261 | // dprintk( "908: %x, 909: %x\n", reg_908, reg_909); | 261 | // dprintk( "908: %x, 909: %x\n", reg_908, reg_909); |
| 262 | 262 | ||
| 263 | reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4; | ||
| 264 | reg_908 |= (state->cfg.enable_current_mirror & 1) << 7; | ||
| 265 | |||
| 263 | dib7000p_write_word(state, 908, reg_908); | 266 | dib7000p_write_word(state, 908, reg_908); |
| 264 | dib7000p_write_word(state, 909, reg_909); | 267 | dib7000p_write_word(state, 909, reg_909); |
| 265 | } | 268 | } |
| @@ -778,7 +781,10 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte | |||
| 778 | default: | 781 | default: |
| 779 | case GUARD_INTERVAL_1_32: value *= 1; break; | 782 | case GUARD_INTERVAL_1_32: value *= 1; break; |
| 780 | } | 783 | } |
| 781 | state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO | 784 | if (state->cfg.diversity_delay == 0) |
| 785 | state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo | ||
| 786 | else | ||
| 787 | state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo | ||
| 782 | 788 | ||
| 783 | /* deactive the possibility of diversity reception if extended interleaver */ | 789 | /* deactive the possibility of diversity reception if extended interleaver */ |
| 784 | state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K; | 790 | state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K; |
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h index 805dd13a97ee..da17345bf5bd 100644 --- a/drivers/media/dvb/frontends/dib7000p.h +++ b/drivers/media/dvb/frontends/dib7000p.h | |||
| @@ -33,6 +33,11 @@ struct dib7000p_config { | |||
| 33 | int (*agc_control) (struct dvb_frontend *, u8 before); | 33 | int (*agc_control) (struct dvb_frontend *, u8 before); |
| 34 | 34 | ||
| 35 | u8 output_mode; | 35 | u8 output_mode; |
| 36 | u8 disable_sample_and_hold : 1; | ||
| 37 | |||
| 38 | u8 enable_current_mirror : 1; | ||
| 39 | u8 diversity_delay; | ||
| 40 | |||
| 36 | }; | 41 | }; |
| 37 | 42 | ||
| 38 | #define DEFAULT_DIB7000P_I2C_ADDRESS 18 | 43 | #define DEFAULT_DIB7000P_I2C_ADDRESS 18 |
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index d93468cd3a85..ff3b0fa901b3 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c | |||
| @@ -1098,33 +1098,26 @@ EXPORT_SYMBOL_GPL(smscore_onresponse); | |||
| 1098 | * | 1098 | * |
| 1099 | * @return pointer to descriptor on success, NULL on error. | 1099 | * @return pointer to descriptor on success, NULL on error. |
| 1100 | */ | 1100 | */ |
| 1101 | struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) | 1101 | |
| 1102 | struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev) | ||
| 1102 | { | 1103 | { |
| 1103 | struct smscore_buffer_t *cb = NULL; | 1104 | struct smscore_buffer_t *cb = NULL; |
| 1104 | unsigned long flags; | 1105 | unsigned long flags; |
| 1105 | 1106 | ||
| 1106 | DEFINE_WAIT(wait); | ||
| 1107 | |||
| 1108 | spin_lock_irqsave(&coredev->bufferslock, flags); | 1107 | spin_lock_irqsave(&coredev->bufferslock, flags); |
| 1109 | 1108 | if (!list_empty(&coredev->buffers)) { | |
| 1110 | /* This function must return a valid buffer, since the buffer list is | 1109 | cb = (struct smscore_buffer_t *) coredev->buffers.next; |
| 1111 | * finite, we check that there is an available buffer, if not, we wait | 1110 | list_del(&cb->entry); |
| 1112 | * until such buffer become available. | ||
| 1113 | */ | ||
| 1114 | |||
| 1115 | prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE); | ||
| 1116 | if (list_empty(&coredev->buffers)) { | ||
| 1117 | spin_unlock_irqrestore(&coredev->bufferslock, flags); | ||
| 1118 | schedule(); | ||
| 1119 | spin_lock_irqsave(&coredev->bufferslock, flags); | ||
| 1120 | } | 1111 | } |
| 1112 | spin_unlock_irqrestore(&coredev->bufferslock, flags); | ||
| 1113 | return cb; | ||
| 1114 | } | ||
| 1121 | 1115 | ||
| 1122 | finish_wait(&coredev->buffer_mng_waitq, &wait); | 1116 | struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) |
| 1123 | 1117 | { | |
| 1124 | cb = (struct smscore_buffer_t *) coredev->buffers.next; | 1118 | struct smscore_buffer_t *cb = NULL; |
| 1125 | list_del(&cb->entry); | ||
| 1126 | 1119 | ||
| 1127 | spin_unlock_irqrestore(&coredev->bufferslock, flags); | 1120 | wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev))); |
| 1128 | 1121 | ||
| 1129 | return cb; | 1122 | return cb; |
| 1130 | } | 1123 | } |
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 67a4ec8768a6..4ce541a5eb47 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c | |||
| @@ -395,7 +395,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client, | |||
| 395 | radio->registers[POWERCFG] = POWERCFG_ENABLE; | 395 | radio->registers[POWERCFG] = POWERCFG_ENABLE; |
| 396 | if (si470x_set_register(radio, POWERCFG) < 0) { | 396 | if (si470x_set_register(radio, POWERCFG) < 0) { |
| 397 | retval = -EIO; | 397 | retval = -EIO; |
| 398 | goto err_all; | 398 | goto err_video; |
| 399 | } | 399 | } |
| 400 | msleep(110); | 400 | msleep(110); |
| 401 | 401 | ||
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile index 755dd0ce65ff..6f2b57384488 100644 --- a/drivers/media/video/cx231xx/Makefile +++ b/drivers/media/video/cx231xx/Makefile | |||
| @@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video | |||
| 11 | EXTRA_CFLAGS += -Idrivers/media/common/tuners | 11 | EXTRA_CFLAGS += -Idrivers/media/common/tuners |
| 12 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core | 12 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core |
| 13 | EXTRA_CFLAGS += -Idrivers/media/dvb/frontends | 13 | EXTRA_CFLAGS += -Idrivers/media/dvb/frontends |
| 14 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb | ||
| 14 | 15 | ||
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 6bdc0ef18119..f2a4900014bc 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <media/v4l2-chip-ident.h> | 32 | #include <media/v4l2-chip-ident.h> |
| 33 | 33 | ||
| 34 | #include <media/cx25840.h> | 34 | #include <media/cx25840.h> |
| 35 | #include "dvb-usb-ids.h" | ||
| 35 | #include "xc5000.h" | 36 | #include "xc5000.h" |
| 36 | 37 | ||
| 37 | #include "cx231xx.h" | 38 | #include "cx231xx.h" |
| @@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = { | |||
| 175 | .driver_info = CX231XX_BOARD_CNXT_RDE_250}, | 176 | .driver_info = CX231XX_BOARD_CNXT_RDE_250}, |
| 176 | {USB_DEVICE(0x0572, 0x58A1), | 177 | {USB_DEVICE(0x0572, 0x58A1), |
| 177 | .driver_info = CX231XX_BOARD_CNXT_RDU_250}, | 178 | .driver_info = CX231XX_BOARD_CNXT_RDU_250}, |
| 179 | {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff), | ||
| 180 | .driver_info = CX231XX_BOARD_UNKNOWN}, | ||
| 178 | {}, | 181 | {}, |
| 179 | }; | 182 | }; |
| 180 | 183 | ||
| @@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev) | |||
| 226 | dev->board.name, dev->model); | 229 | dev->board.name, dev->model); |
| 227 | 230 | ||
| 228 | /* set the direction for GPIO pins */ | 231 | /* set the direction for GPIO pins */ |
| 229 | cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); | 232 | if (dev->board.tuner_gpio) { |
| 230 | cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); | 233 | cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); |
| 231 | cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); | 234 | cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); |
| 235 | cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); | ||
| 232 | 236 | ||
| 233 | /* request some modules if any required */ | 237 | /* request some modules if any required */ |
| 234 | 238 | ||
| 235 | /* reset the Tuner */ | 239 | /* reset the Tuner */ |
| 236 | cx231xx_gpio_set(dev, dev->board.tuner_gpio); | 240 | cx231xx_gpio_set(dev, dev->board.tuner_gpio); |
| 241 | } | ||
| 237 | 242 | ||
| 238 | /* set the mode to Analog mode initially */ | 243 | /* set the mode to Analog mode initially */ |
| 239 | cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); | 244 | cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); |
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c index 86ca8c2359dd..f5a3e74c3c7c 100644 --- a/drivers/media/video/cx25840/cx25840-core.c +++ b/drivers/media/video/cx25840/cx25840-core.c | |||
| @@ -1996,7 +1996,7 @@ static int cx25840_probe(struct i2c_client *client, | |||
| 1996 | 1996 | ||
| 1997 | state->volume = v4l2_ctrl_new_std(&state->hdl, | 1997 | state->volume = v4l2_ctrl_new_std(&state->hdl, |
| 1998 | &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, | 1998 | &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, |
| 1999 | 0, 65335, 65535 / 100, default_volume); | 1999 | 0, 65535, 65535 / 100, default_volume); |
| 2000 | state->mute = v4l2_ctrl_new_std(&state->hdl, | 2000 | state->mute = v4l2_ctrl_new_std(&state->hdl, |
| 2001 | &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, | 2001 | &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, |
| 2002 | 0, 1, 1, 0); | 2002 | 0, 1, 1, 0); |
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig index 99dbae117591..0fa85cbefbb1 100644 --- a/drivers/media/video/cx88/Kconfig +++ b/drivers/media/video/cx88/Kconfig | |||
| @@ -17,7 +17,7 @@ config VIDEO_CX88 | |||
| 17 | 17 | ||
| 18 | config VIDEO_CX88_ALSA | 18 | config VIDEO_CX88_ALSA |
| 19 | tristate "Conexant 2388x DMA audio support" | 19 | tristate "Conexant 2388x DMA audio support" |
| 20 | depends on VIDEO_CX88 && SND && EXPERIMENTAL | 20 | depends on VIDEO_CX88 && SND |
| 21 | select SND_PCM | 21 | select SND_PCM |
| 22 | ---help--- | 22 | ---help--- |
| 23 | This is a video4linux driver for direct (DMA) audio on | 23 | This is a video4linux driver for direct (DMA) audio on |
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index b9846106913e..78abc1c1f9d5 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c | |||
| @@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev, | |||
| 223 | usb_rcvintpipe(dev, ep->bEndpointAddress), | 223 | usb_rcvintpipe(dev, ep->bEndpointAddress), |
| 224 | buffer, buffer_len, | 224 | buffer, buffer_len, |
| 225 | int_irq, (void *)gspca_dev, interval); | 225 | int_irq, (void *)gspca_dev, interval); |
| 226 | urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
| 226 | gspca_dev->int_urb = urb; | 227 | gspca_dev->int_urb = urb; |
| 227 | ret = usb_submit_urb(urb, GFP_KERNEL); | 228 | ret = usb_submit_urb(urb, GFP_KERNEL); |
| 228 | if (ret < 0) { | 229 | if (ret < 0) { |
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c index 83a718f0f3f9..9052d5702556 100644 --- a/drivers/media/video/gspca/sn9c20x.c +++ b/drivers/media/video/gspca/sn9c20x.c | |||
| @@ -2357,8 +2357,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, | |||
| 2357 | (data[33] << 10); | 2357 | (data[33] << 10); |
| 2358 | avg_lum >>= 9; | 2358 | avg_lum >>= 9; |
| 2359 | atomic_set(&sd->avg_lum, avg_lum); | 2359 | atomic_set(&sd->avg_lum, avg_lum); |
| 2360 | gspca_frame_add(gspca_dev, LAST_PACKET, | 2360 | gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); |
| 2361 | data, len); | ||
| 2362 | return; | 2361 | return; |
| 2363 | } | 2362 | } |
| 2364 | if (gspca_dev->last_packet_type == LAST_PACKET) { | 2363 | if (gspca_dev->last_packet_type == LAST_PACKET) { |
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c index be03a712731c..f0316d02f09f 100644 --- a/drivers/media/video/ivtv/ivtvfb.c +++ b/drivers/media/video/ivtv/ivtvfb.c | |||
| @@ -466,6 +466,8 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar | |||
| 466 | struct fb_vblank vblank; | 466 | struct fb_vblank vblank; |
| 467 | u32 trace; | 467 | u32 trace; |
| 468 | 468 | ||
| 469 | memset(&vblank, 0, sizeof(struct fb_vblank)); | ||
| 470 | |||
| 469 | vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | | 471 | vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | |
| 470 | FB_VBLANK_HAVE_VSYNC; | 472 | FB_VBLANK_HAVE_VSYNC; |
| 471 | trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; | 473 | trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; |
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c index 4525335f9bd4..a7210d981388 100644 --- a/drivers/media/video/mem2mem_testdev.c +++ b/drivers/media/video/mem2mem_testdev.c | |||
| @@ -239,7 +239,7 @@ static int device_process(struct m2mtest_ctx *ctx, | |||
| 239 | return -EFAULT; | 239 | return -EFAULT; |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | if (in_buf->vb.size < out_buf->vb.size) { | 242 | if (in_buf->vb.size > out_buf->vb.size) { |
| 243 | v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); | 243 | v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); |
| 244 | return -EINVAL; | 244 | return -EINVAL; |
| 245 | } | 245 | } |
| @@ -1014,6 +1014,7 @@ static int m2mtest_remove(struct platform_device *pdev) | |||
| 1014 | v4l2_m2m_release(dev->m2m_dev); | 1014 | v4l2_m2m_release(dev->m2m_dev); |
| 1015 | del_timer_sync(&dev->timer); | 1015 | del_timer_sync(&dev->timer); |
| 1016 | video_unregister_device(dev->vfd); | 1016 | video_unregister_device(dev->vfd); |
| 1017 | video_device_release(dev->vfd); | ||
| 1017 | v4l2_device_unregister(&dev->v4l2_dev); | 1018 | v4l2_device_unregister(&dev->v4l2_dev); |
| 1018 | kfree(dev); | 1019 | kfree(dev); |
| 1019 | 1020 | ||
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 758a4db27d65..c71af4e0e517 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c | |||
| @@ -447,6 +447,9 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) | |||
| 447 | dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", | 447 | dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", |
| 448 | __func__, rect.left, rect.top, rect.width, rect.height); | 448 | __func__, rect.left, rect.top, rect.width, rect.height); |
| 449 | 449 | ||
| 450 | if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
| 451 | return -EINVAL; | ||
| 452 | |||
| 450 | ret = mt9m111_make_rect(client, &rect); | 453 | ret = mt9m111_make_rect(client, &rect); |
| 451 | if (!ret) | 454 | if (!ret) |
| 452 | mt9m111->rect = rect; | 455 | mt9m111->rect = rect; |
| @@ -466,12 +469,14 @@ static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) | |||
| 466 | 469 | ||
| 467 | static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) | 470 | static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) |
| 468 | { | 471 | { |
| 472 | if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
| 473 | return -EINVAL; | ||
| 474 | |||
| 469 | a->bounds.left = MT9M111_MIN_DARK_COLS; | 475 | a->bounds.left = MT9M111_MIN_DARK_COLS; |
| 470 | a->bounds.top = MT9M111_MIN_DARK_ROWS; | 476 | a->bounds.top = MT9M111_MIN_DARK_ROWS; |
| 471 | a->bounds.width = MT9M111_MAX_WIDTH; | 477 | a->bounds.width = MT9M111_MAX_WIDTH; |
| 472 | a->bounds.height = MT9M111_MAX_HEIGHT; | 478 | a->bounds.height = MT9M111_MAX_HEIGHT; |
| 473 | a->defrect = a->bounds; | 479 | a->defrect = a->bounds; |
| 474 | a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | ||
| 475 | a->pixelaspect.numerator = 1; | 480 | a->pixelaspect.numerator = 1; |
| 476 | a->pixelaspect.denominator = 1; | 481 | a->pixelaspect.denominator = 1; |
| 477 | 482 | ||
| @@ -487,6 +492,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd, | |||
| 487 | mf->width = mt9m111->rect.width; | 492 | mf->width = mt9m111->rect.width; |
| 488 | mf->height = mt9m111->rect.height; | 493 | mf->height = mt9m111->rect.height; |
| 489 | mf->code = mt9m111->fmt->code; | 494 | mf->code = mt9m111->fmt->code; |
| 495 | mf->colorspace = mt9m111->fmt->colorspace; | ||
| 490 | mf->field = V4L2_FIELD_NONE; | 496 | mf->field = V4L2_FIELD_NONE; |
| 491 | 497 | ||
| 492 | return 0; | 498 | return 0; |
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index e7cd23cd6394..b48473c7896b 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c | |||
| @@ -402,9 +402,6 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, | |||
| 402 | if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) | 402 | if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) |
| 403 | return -EINVAL; | 403 | return -EINVAL; |
| 404 | break; | 404 | break; |
| 405 | case 0: | ||
| 406 | /* No format change, only geometry */ | ||
| 407 | break; | ||
| 408 | default: | 405 | default: |
| 409 | return -EINVAL; | 406 | return -EINVAL; |
| 410 | } | 407 | } |
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c index 66ff174151b5..b6ea67221d1d 100644 --- a/drivers/media/video/mx2_camera.c +++ b/drivers/media/video/mx2_camera.c | |||
| @@ -378,6 +378,9 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb, | |||
| 378 | 378 | ||
| 379 | spin_lock_irqsave(&pcdev->lock, flags); | 379 | spin_lock_irqsave(&pcdev->lock, flags); |
| 380 | 380 | ||
| 381 | if (*fb_active == NULL) | ||
| 382 | goto out; | ||
| 383 | |||
| 381 | vb = &(*fb_active)->vb; | 384 | vb = &(*fb_active)->vb; |
| 382 | dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, | 385 | dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, |
| 383 | vb, vb->baddr, vb->bsize); | 386 | vb, vb->baddr, vb->bsize); |
| @@ -402,6 +405,7 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb, | |||
| 402 | 405 | ||
| 403 | *fb_active = buf; | 406 | *fb_active = buf; |
| 404 | 407 | ||
| 408 | out: | ||
| 405 | spin_unlock_irqrestore(&pcdev->lock, flags); | 409 | spin_unlock_irqrestore(&pcdev->lock, flags); |
| 406 | } | 410 | } |
| 407 | 411 | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c index 1b992b847198..55ea914c7fcd 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c +++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c | |||
| @@ -513,7 +513,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, | |||
| 513 | if (ret >= 0) { | 513 | if (ret >= 0) { |
| 514 | ret = pvr2_ctrl_range_check(cptr,*valptr); | 514 | ret = pvr2_ctrl_range_check(cptr,*valptr); |
| 515 | } | 515 | } |
| 516 | if (maskptr) *maskptr = ~0; | 516 | *maskptr = ~0; |
| 517 | } else if (cptr->info->type == pvr2_ctl_bool) { | 517 | } else if (cptr->info->type == pvr2_ctl_bool) { |
| 518 | ret = parse_token(ptr,len,valptr,boolNames, | 518 | ret = parse_token(ptr,len,valptr,boolNames, |
| 519 | ARRAY_SIZE(boolNames)); | 519 | ARRAY_SIZE(boolNames)); |
| @@ -522,7 +522,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, | |||
| 522 | } else if (ret == 0) { | 522 | } else if (ret == 0) { |
| 523 | *valptr = (*valptr & 1) ? !0 : 0; | 523 | *valptr = (*valptr & 1) ? !0 : 0; |
| 524 | } | 524 | } |
| 525 | if (maskptr) *maskptr = 1; | 525 | *maskptr = 1; |
| 526 | } else if (cptr->info->type == pvr2_ctl_enum) { | 526 | } else if (cptr->info->type == pvr2_ctl_enum) { |
| 527 | ret = parse_token( | 527 | ret = parse_token( |
| 528 | ptr,len,valptr, | 528 | ptr,len,valptr, |
| @@ -531,7 +531,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, | |||
| 531 | if (ret >= 0) { | 531 | if (ret >= 0) { |
| 532 | ret = pvr2_ctrl_range_check(cptr,*valptr); | 532 | ret = pvr2_ctrl_range_check(cptr,*valptr); |
| 533 | } | 533 | } |
| 534 | if (maskptr) *maskptr = ~0; | 534 | *maskptr = ~0; |
| 535 | } else if (cptr->info->type == pvr2_ctl_bitmask) { | 535 | } else if (cptr->info->type == pvr2_ctl_bitmask) { |
| 536 | ret = parse_tlist( | 536 | ret = parse_tlist( |
| 537 | ptr,len,maskptr,valptr, | 537 | ptr,len,maskptr,valptr, |
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c index b151c7be8a50..6961c55baf9b 100644 --- a/drivers/media/video/s5p-fimc/fimc-core.c +++ b/drivers/media/video/s5p-fimc/fimc-core.c | |||
| @@ -393,6 +393,37 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx) | |||
| 393 | dbg("ctx->out_order_1p= %d", ctx->out_order_1p); | 393 | dbg("ctx->out_order_1p= %d", ctx->out_order_1p); |
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f) | ||
| 397 | { | ||
| 398 | struct samsung_fimc_variant *variant = ctx->fimc_dev->variant; | ||
| 399 | |||
| 400 | f->dma_offset.y_h = f->offs_h; | ||
| 401 | if (!variant->pix_hoff) | ||
| 402 | f->dma_offset.y_h *= (f->fmt->depth >> 3); | ||
| 403 | |||
| 404 | f->dma_offset.y_v = f->offs_v; | ||
| 405 | |||
| 406 | f->dma_offset.cb_h = f->offs_h; | ||
| 407 | f->dma_offset.cb_v = f->offs_v; | ||
| 408 | |||
| 409 | f->dma_offset.cr_h = f->offs_h; | ||
| 410 | f->dma_offset.cr_v = f->offs_v; | ||
| 411 | |||
| 412 | if (!variant->pix_hoff) { | ||
| 413 | if (f->fmt->planes_cnt == 3) { | ||
| 414 | f->dma_offset.cb_h >>= 1; | ||
| 415 | f->dma_offset.cr_h >>= 1; | ||
| 416 | } | ||
| 417 | if (f->fmt->color == S5P_FIMC_YCBCR420) { | ||
| 418 | f->dma_offset.cb_v >>= 1; | ||
| 419 | f->dma_offset.cr_v >>= 1; | ||
| 420 | } | ||
| 421 | } | ||
| 422 | |||
| 423 | dbg("in_offset: color= %d, y_h= %d, y_v= %d", | ||
| 424 | f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v); | ||
| 425 | } | ||
| 426 | |||
| 396 | /** | 427 | /** |
| 397 | * fimc_prepare_config - check dimensions, operation and color mode | 428 | * fimc_prepare_config - check dimensions, operation and color mode |
| 398 | * and pre-calculate offset and the scaling coefficients. | 429 | * and pre-calculate offset and the scaling coefficients. |
| @@ -406,7 +437,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags) | |||
| 406 | { | 437 | { |
| 407 | struct fimc_frame *s_frame, *d_frame; | 438 | struct fimc_frame *s_frame, *d_frame; |
| 408 | struct fimc_vid_buffer *buf = NULL; | 439 | struct fimc_vid_buffer *buf = NULL; |
| 409 | struct samsung_fimc_variant *variant = ctx->fimc_dev->variant; | ||
| 410 | int ret = 0; | 440 | int ret = 0; |
| 411 | 441 | ||
| 412 | s_frame = &ctx->s_frame; | 442 | s_frame = &ctx->s_frame; |
| @@ -419,61 +449,16 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags) | |||
| 419 | swap(d_frame->width, d_frame->height); | 449 | swap(d_frame->width, d_frame->height); |
| 420 | } | 450 | } |
| 421 | 451 | ||
| 422 | /* Prepare the output offset ratios for scaler. */ | 452 | /* Prepare the DMA offset ratios for scaler. */ |
| 423 | d_frame->dma_offset.y_h = d_frame->offs_h; | 453 | fimc_prepare_dma_offset(ctx, &ctx->s_frame); |
| 424 | if (!variant->pix_hoff) | 454 | fimc_prepare_dma_offset(ctx, &ctx->d_frame); |
| 425 | d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3); | ||
| 426 | |||
| 427 | d_frame->dma_offset.y_v = d_frame->offs_v; | ||
| 428 | |||
| 429 | d_frame->dma_offset.cb_h = d_frame->offs_h; | ||
| 430 | d_frame->dma_offset.cb_v = d_frame->offs_v; | ||
| 431 | |||
| 432 | d_frame->dma_offset.cr_h = d_frame->offs_h; | ||
| 433 | d_frame->dma_offset.cr_v = d_frame->offs_v; | ||
| 434 | 455 | ||
| 435 | if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) { | ||
| 436 | d_frame->dma_offset.cb_h >>= 1; | ||
| 437 | d_frame->dma_offset.cb_v >>= 1; | ||
| 438 | d_frame->dma_offset.cr_h >>= 1; | ||
| 439 | d_frame->dma_offset.cr_v >>= 1; | ||
| 440 | } | ||
| 441 | |||
| 442 | dbg("out offset: color= %d, y_h= %d, y_v= %d", | ||
| 443 | d_frame->fmt->color, | ||
| 444 | d_frame->dma_offset.y_h, d_frame->dma_offset.y_v); | ||
| 445 | |||
| 446 | /* Prepare the input offset ratios for scaler. */ | ||
| 447 | s_frame->dma_offset.y_h = s_frame->offs_h; | ||
| 448 | if (!variant->pix_hoff) | ||
| 449 | s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3); | ||
| 450 | s_frame->dma_offset.y_v = s_frame->offs_v; | ||
| 451 | |||
| 452 | s_frame->dma_offset.cb_h = s_frame->offs_h; | ||
| 453 | s_frame->dma_offset.cb_v = s_frame->offs_v; | ||
| 454 | |||
| 455 | s_frame->dma_offset.cr_h = s_frame->offs_h; | ||
| 456 | s_frame->dma_offset.cr_v = s_frame->offs_v; | ||
| 457 | |||
| 458 | if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) { | ||
| 459 | s_frame->dma_offset.cb_h >>= 1; | ||
| 460 | s_frame->dma_offset.cb_v >>= 1; | ||
| 461 | s_frame->dma_offset.cr_h >>= 1; | ||
| 462 | s_frame->dma_offset.cr_v >>= 1; | ||
| 463 | } | ||
| 464 | |||
| 465 | dbg("in offset: color= %d, y_h= %d, y_v= %d", | ||
| 466 | s_frame->fmt->color, s_frame->dma_offset.y_h, | ||
| 467 | s_frame->dma_offset.y_v); | ||
| 468 | |||
| 469 | fimc_set_yuv_order(ctx); | ||
| 470 | |||
| 471 | /* Check against the scaler ratio. */ | ||
| 472 | if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) || | 456 | if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) || |
| 473 | s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) { | 457 | s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) { |
| 474 | err("out of scaler range"); | 458 | err("out of scaler range"); |
| 475 | return -EINVAL; | 459 | return -EINVAL; |
| 476 | } | 460 | } |
| 461 | fimc_set_yuv_order(ctx); | ||
| 477 | } | 462 | } |
| 478 | 463 | ||
| 479 | /* Input DMA mode is not allowed when the scaler is disabled. */ | 464 | /* Input DMA mode is not allowed when the scaler is disabled. */ |
| @@ -822,7 +807,8 @@ static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 822 | } else { | 807 | } else { |
| 823 | v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, | 808 | v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, |
| 824 | "Wrong buffer/video queue type (%d)\n", f->type); | 809 | "Wrong buffer/video queue type (%d)\n", f->type); |
| 825 | return -EINVAL; | 810 | ret = -EINVAL; |
| 811 | goto s_fmt_out; | ||
| 826 | } | 812 | } |
| 827 | 813 | ||
| 828 | pix = &f->fmt.pix; | 814 | pix = &f->fmt.pix; |
| @@ -1414,8 +1400,10 @@ static int fimc_probe(struct platform_device *pdev) | |||
| 1414 | } | 1400 | } |
| 1415 | 1401 | ||
| 1416 | fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev)); | 1402 | fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev)); |
| 1417 | if (!fimc->work_queue) | 1403 | if (!fimc->work_queue) { |
| 1404 | ret = -ENOMEM; | ||
| 1418 | goto err_irq; | 1405 | goto err_irq; |
| 1406 | } | ||
| 1419 | 1407 | ||
| 1420 | ret = fimc_register_m2m_device(fimc); | 1408 | ret = fimc_register_m2m_device(fimc); |
| 1421 | if (ret) | 1409 | if (ret) |
| @@ -1492,6 +1480,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = { | |||
| 1492 | }; | 1480 | }; |
| 1493 | 1481 | ||
| 1494 | static struct samsung_fimc_variant fimc01_variant_s5pv210 = { | 1482 | static struct samsung_fimc_variant fimc01_variant_s5pv210 = { |
| 1483 | .pix_hoff = 1, | ||
| 1495 | .has_inp_rot = 1, | 1484 | .has_inp_rot = 1, |
| 1496 | .has_out_rot = 1, | 1485 | .has_out_rot = 1, |
| 1497 | .min_inp_pixsize = 16, | 1486 | .min_inp_pixsize = 16, |
| @@ -1506,6 +1495,7 @@ static struct samsung_fimc_variant fimc01_variant_s5pv210 = { | |||
| 1506 | }; | 1495 | }; |
| 1507 | 1496 | ||
| 1508 | static struct samsung_fimc_variant fimc2_variant_s5pv210 = { | 1497 | static struct samsung_fimc_variant fimc2_variant_s5pv210 = { |
| 1498 | .pix_hoff = 1, | ||
| 1509 | .min_inp_pixsize = 16, | 1499 | .min_inp_pixsize = 16, |
| 1510 | .min_out_pixsize = 32, | 1500 | .min_out_pixsize = 32, |
| 1511 | 1501 | ||
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index ec697fcd406e..bb8d83d8ddaf 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c | |||
| @@ -4323,13 +4323,13 @@ struct saa7134_board saa7134_boards[] = { | |||
| 4323 | }, | 4323 | }, |
| 4324 | [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { | 4324 | [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { |
| 4325 | /* Beholder Intl. Ltd. 2008 */ | 4325 | /* Beholder Intl. Ltd. 2008 */ |
| 4326 | /*Dmitry Belimov <d.belimov@gmail.com> */ | 4326 | /* Dmitry Belimov <d.belimov@gmail.com> */ |
| 4327 | .name = "Beholder BeholdTV Columbus TVFM", | 4327 | .name = "Beholder BeholdTV Columbus TV/FM", |
| 4328 | .audio_clock = 0x00187de7, | 4328 | .audio_clock = 0x00187de7, |
| 4329 | .tuner_type = TUNER_ALPS_TSBE5_PAL, | 4329 | .tuner_type = TUNER_ALPS_TSBE5_PAL, |
| 4330 | .radio_type = UNSET, | 4330 | .radio_type = TUNER_TEA5767, |
| 4331 | .tuner_addr = ADDR_UNSET, | 4331 | .tuner_addr = 0xc2 >> 1, |
| 4332 | .radio_addr = ADDR_UNSET, | 4332 | .radio_addr = 0xc0 >> 1, |
| 4333 | .tda9887_conf = TDA9887_PRESENT, | 4333 | .tda9887_conf = TDA9887_PRESENT, |
| 4334 | .gpiomask = 0x000A8004, | 4334 | .gpiomask = 0x000A8004, |
| 4335 | .inputs = {{ | 4335 | .inputs = {{ |
diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c index 5713f3a4b76c..ddd25d32723d 100644 --- a/drivers/media/video/saa7164/saa7164-buffer.c +++ b/drivers/media/video/saa7164/saa7164-buffer.c | |||
| @@ -136,10 +136,11 @@ ret: | |||
| 136 | int saa7164_buffer_dealloc(struct saa7164_tsport *port, | 136 | int saa7164_buffer_dealloc(struct saa7164_tsport *port, |
| 137 | struct saa7164_buffer *buf) | 137 | struct saa7164_buffer *buf) |
| 138 | { | 138 | { |
| 139 | struct saa7164_dev *dev = port->dev; | 139 | struct saa7164_dev *dev; |
| 140 | 140 | ||
| 141 | if ((buf == 0) || (port == 0)) | 141 | if (!buf || !port) |
| 142 | return SAA_ERR_BAD_PARAMETER; | 142 | return SAA_ERR_BAD_PARAMETER; |
| 143 | dev = port->dev; | ||
| 143 | 144 | ||
| 144 | dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf); | 145 | dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf); |
| 145 | 146 | ||
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 8bdd940f32e6..2ac85d8984f0 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
| @@ -486,6 +486,12 @@ static int uvc_parse_format(struct uvc_device *dev, | |||
| 486 | max(frame->dwFrameInterval[0], | 486 | max(frame->dwFrameInterval[0], |
| 487 | frame->dwDefaultFrameInterval)); | 487 | frame->dwDefaultFrameInterval)); |
| 488 | 488 | ||
| 489 | if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) { | ||
| 490 | frame->bFrameIntervalType = 1; | ||
| 491 | frame->dwFrameInterval[0] = | ||
| 492 | frame->dwDefaultFrameInterval; | ||
| 493 | } | ||
| 494 | |||
| 489 | uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", | 495 | uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", |
| 490 | frame->wWidth, frame->wHeight, | 496 | frame->wWidth, frame->wHeight, |
| 491 | 10000000/frame->dwDefaultFrameInterval, | 497 | 10000000/frame->dwDefaultFrameInterval, |
| @@ -2026,6 +2032,15 @@ static struct usb_device_id uvc_ids[] = { | |||
| 2026 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, | 2032 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, |
| 2027 | .bInterfaceSubClass = 1, | 2033 | .bInterfaceSubClass = 1, |
| 2028 | .bInterfaceProtocol = 0 }, | 2034 | .bInterfaceProtocol = 0 }, |
| 2035 | /* Chicony CNF7129 (Asus EEE 100HE) */ | ||
| 2036 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | ||
| 2037 | | USB_DEVICE_ID_MATCH_INT_INFO, | ||
| 2038 | .idVendor = 0x04f2, | ||
| 2039 | .idProduct = 0xb071, | ||
| 2040 | .bInterfaceClass = USB_CLASS_VIDEO, | ||
| 2041 | .bInterfaceSubClass = 1, | ||
| 2042 | .bInterfaceProtocol = 0, | ||
| 2043 | .driver_info = UVC_QUIRK_RESTRICT_FRAME_RATE }, | ||
| 2029 | /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ | 2044 | /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ |
| 2030 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | 2045 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
| 2031 | | USB_DEVICE_ID_MATCH_INT_INFO, | 2046 | | USB_DEVICE_ID_MATCH_INT_INFO, |
| @@ -2091,6 +2106,15 @@ static struct usb_device_id uvc_ids[] = { | |||
| 2091 | .bInterfaceProtocol = 0, | 2106 | .bInterfaceProtocol = 0, |
| 2092 | .driver_info = UVC_QUIRK_PROBE_MINMAX | 2107 | .driver_info = UVC_QUIRK_PROBE_MINMAX |
| 2093 | | UVC_QUIRK_PROBE_DEF }, | 2108 | | UVC_QUIRK_PROBE_DEF }, |
| 2109 | /* IMC Networks (Medion Akoya) */ | ||
| 2110 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | ||
| 2111 | | USB_DEVICE_ID_MATCH_INT_INFO, | ||
| 2112 | .idVendor = 0x13d3, | ||
| 2113 | .idProduct = 0x5103, | ||
| 2114 | .bInterfaceClass = USB_CLASS_VIDEO, | ||
| 2115 | .bInterfaceSubClass = 1, | ||
| 2116 | .bInterfaceProtocol = 0, | ||
| 2117 | .driver_info = UVC_QUIRK_STREAM_NO_FID }, | ||
| 2094 | /* Syntek (HP Spartan) */ | 2118 | /* Syntek (HP Spartan) */ |
| 2095 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | 2119 | { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
| 2096 | | USB_DEVICE_ID_MATCH_INT_INFO, | 2120 | | USB_DEVICE_ID_MATCH_INT_INFO, |
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index bdacf3beabf5..892e0e51916c 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h | |||
| @@ -182,6 +182,7 @@ struct uvc_xu_control { | |||
| 182 | #define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 | 182 | #define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 |
| 183 | #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 | 183 | #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 |
| 184 | #define UVC_QUIRK_PROBE_DEF 0x00000100 | 184 | #define UVC_QUIRK_PROBE_DEF 0x00000100 |
| 185 | #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 | ||
| 185 | 186 | ||
| 186 | /* Format flags */ | 187 | /* Format flags */ |
| 187 | #define UVC_FMT_FLAG_COMPRESSED 0x00000001 | 188 | #define UVC_FMT_FLAG_COMPRESSED 0x00000001 |
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index 073f01390cdd..86294ed35c9b 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c | |||
| @@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u | |||
| 193 | struct video_code32 { | 193 | struct video_code32 { |
| 194 | char loadwhat[16]; /* name or tag of file being passed */ | 194 | char loadwhat[16]; /* name or tag of file being passed */ |
| 195 | compat_int_t datasize; | 195 | compat_int_t datasize; |
| 196 | unsigned char *data; | 196 | compat_uptr_t data; |
| 197 | }; | 197 | }; |
| 198 | 198 | ||
| 199 | static int get_microcode32(struct video_code *kp, struct video_code32 __user *up) | 199 | static struct video_code __user *get_microcode32(struct video_code32 *kp) |
| 200 | { | 200 | { |
| 201 | if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) || | 201 | struct video_code __user *up; |
| 202 | copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) || | 202 | |
| 203 | get_user(kp->datasize, &up->datasize) || | 203 | up = compat_alloc_user_space(sizeof(*up)); |
| 204 | copy_from_user(kp->data, up->data, up->datasize)) | 204 | |
| 205 | return -EFAULT; | 205 | /* |
| 206 | return 0; | 206 | * NOTE! We don't actually care if these fail. If the |
| 207 | * user address is invalid, the native ioctl will do | ||
| 208 | * the error handling for us | ||
| 209 | */ | ||
| 210 | (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat)); | ||
| 211 | (void) put_user(kp->datasize, &up->datasize); | ||
| 212 | (void) put_user(compat_ptr(kp->data), &up->data); | ||
| 213 | return up; | ||
| 207 | } | 214 | } |
| 208 | 215 | ||
| 209 | #define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32) | 216 | #define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32) |
| @@ -739,7 +746,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar | |||
| 739 | struct video_tuner vt; | 746 | struct video_tuner vt; |
| 740 | struct video_buffer vb; | 747 | struct video_buffer vb; |
| 741 | struct video_window vw; | 748 | struct video_window vw; |
| 742 | struct video_code vc; | 749 | struct video_code32 vc; |
| 743 | struct video_audio va; | 750 | struct video_audio va; |
| 744 | #endif | 751 | #endif |
| 745 | struct v4l2_format v2f; | 752 | struct v4l2_format v2f; |
| @@ -818,8 +825,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar | |||
| 818 | break; | 825 | break; |
| 819 | 826 | ||
| 820 | case VIDIOCSMICROCODE: | 827 | case VIDIOCSMICROCODE: |
| 821 | err = get_microcode32(&karg.vc, up); | 828 | /* Copy the 32-bit "video_code32" to kernel space */ |
| 822 | compatible_arg = 0; | 829 | if (copy_from_user(&karg.vc, up, sizeof(karg.vc))) |
| 830 | return -EFAULT; | ||
| 831 | /* Convert the 32-bit version to a 64-bit version in user space */ | ||
| 832 | up = get_microcode32(&karg.vc); | ||
| 823 | break; | 833 | break; |
| 824 | 834 | ||
| 825 | case VIDIOCSFREQ: | 835 | case VIDIOCSFREQ: |
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c index 372b87efcd05..6ff9e4bac3ea 100644 --- a/drivers/media/video/videobuf-dma-contig.c +++ b/drivers/media/video/videobuf-dma-contig.c | |||
| @@ -393,8 +393,10 @@ void videobuf_dma_contig_free(struct videobuf_queue *q, | |||
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | /* read() method */ | 395 | /* read() method */ |
| 396 | dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); | 396 | if (mem->vaddr) { |
| 397 | mem->vaddr = NULL; | 397 | dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); |
| 398 | mem->vaddr = NULL; | ||
| 399 | } | ||
| 398 | } | 400 | } |
| 399 | EXPORT_SYMBOL_GPL(videobuf_dma_contig_free); | 401 | EXPORT_SYMBOL_GPL(videobuf_dma_contig_free); |
| 400 | 402 | ||
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 06f9a9c2a39a..2ad0bc252b0e 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c | |||
| @@ -94,7 +94,7 @@ err: | |||
| 94 | * must free the memory. | 94 | * must free the memory. |
| 95 | */ | 95 | */ |
| 96 | static struct scatterlist *videobuf_pages_to_sg(struct page **pages, | 96 | static struct scatterlist *videobuf_pages_to_sg(struct page **pages, |
| 97 | int nr_pages, int offset) | 97 | int nr_pages, int offset, size_t size) |
| 98 | { | 98 | { |
| 99 | struct scatterlist *sglist; | 99 | struct scatterlist *sglist; |
| 100 | int i; | 100 | int i; |
| @@ -110,12 +110,14 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages, | |||
| 110 | /* DMA to highmem pages might not work */ | 110 | /* DMA to highmem pages might not work */ |
| 111 | goto highmem; | 111 | goto highmem; |
| 112 | sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); | 112 | sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); |
| 113 | size -= PAGE_SIZE - offset; | ||
| 113 | for (i = 1; i < nr_pages; i++) { | 114 | for (i = 1; i < nr_pages; i++) { |
| 114 | if (NULL == pages[i]) | 115 | if (NULL == pages[i]) |
| 115 | goto nopage; | 116 | goto nopage; |
| 116 | if (PageHighMem(pages[i])) | 117 | if (PageHighMem(pages[i])) |
| 117 | goto highmem; | 118 | goto highmem; |
| 118 | sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); | 119 | sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0); |
| 120 | size -= min(PAGE_SIZE, size); | ||
| 119 | } | 121 | } |
| 120 | return sglist; | 122 | return sglist; |
| 121 | 123 | ||
| @@ -170,7 +172,8 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, | |||
| 170 | 172 | ||
| 171 | first = (data & PAGE_MASK) >> PAGE_SHIFT; | 173 | first = (data & PAGE_MASK) >> PAGE_SHIFT; |
| 172 | last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; | 174 | last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; |
| 173 | dma->offset = data & ~PAGE_MASK; | 175 | dma->offset = data & ~PAGE_MASK; |
| 176 | dma->size = size; | ||
| 174 | dma->nr_pages = last-first+1; | 177 | dma->nr_pages = last-first+1; |
| 175 | dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); | 178 | dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); |
| 176 | if (NULL == dma->pages) | 179 | if (NULL == dma->pages) |
| @@ -252,7 +255,7 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma) | |||
| 252 | 255 | ||
| 253 | if (dma->pages) { | 256 | if (dma->pages) { |
| 254 | dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, | 257 | dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, |
| 255 | dma->offset); | 258 | dma->offset, dma->size); |
| 256 | } | 259 | } |
| 257 | if (dma->vaddr) { | 260 | if (dma->vaddr) { |
| 258 | dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, | 261 | dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, |
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 04028a9ee082..428377a5a6f5 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c | |||
| @@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq) | |||
| 429 | irq_tsc = cache_tsc; | 429 | irq_tsc = cache_tsc; |
| 430 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { | 430 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { |
| 431 | irq_data = &max8925_irqs[i]; | 431 | irq_data = &max8925_irqs[i]; |
| 432 | /* 1 -- disable, 0 -- enable */ | ||
| 432 | switch (irq_data->mask_reg) { | 433 | switch (irq_data->mask_reg) { |
| 433 | case MAX8925_CHG_IRQ1_MASK: | 434 | case MAX8925_CHG_IRQ1_MASK: |
| 434 | irq_chg[0] &= irq_data->enable; | 435 | irq_chg[0] &= ~irq_data->enable; |
| 435 | break; | 436 | break; |
| 436 | case MAX8925_CHG_IRQ2_MASK: | 437 | case MAX8925_CHG_IRQ2_MASK: |
| 437 | irq_chg[1] &= irq_data->enable; | 438 | irq_chg[1] &= ~irq_data->enable; |
| 438 | break; | 439 | break; |
| 439 | case MAX8925_ON_OFF_IRQ1_MASK: | 440 | case MAX8925_ON_OFF_IRQ1_MASK: |
| 440 | irq_on[0] &= irq_data->enable; | 441 | irq_on[0] &= ~irq_data->enable; |
| 441 | break; | 442 | break; |
| 442 | case MAX8925_ON_OFF_IRQ2_MASK: | 443 | case MAX8925_ON_OFF_IRQ2_MASK: |
| 443 | irq_on[1] &= irq_data->enable; | 444 | irq_on[1] &= ~irq_data->enable; |
| 444 | break; | 445 | break; |
| 445 | case MAX8925_RTC_IRQ_MASK: | 446 | case MAX8925_RTC_IRQ_MASK: |
| 446 | irq_rtc &= irq_data->enable; | 447 | irq_rtc &= ~irq_data->enable; |
| 447 | break; | 448 | break; |
| 448 | case MAX8925_TSC_IRQ_MASK: | 449 | case MAX8925_TSC_IRQ_MASK: |
| 449 | irq_tsc &= irq_data->enable; | 450 | irq_tsc &= ~irq_data->enable; |
| 450 | break; | 451 | break; |
| 451 | default: | 452 | default: |
| 452 | dev_err(chip->dev, "wrong IRQ\n"); | 453 | dev_err(chip->dev, "wrong IRQ\n"); |
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 7dabe4dbd373..294183b6260b 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c | |||
| @@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type) | |||
| 394 | 394 | ||
| 395 | irq = irq - wm831x->irq_base; | 395 | irq = irq - wm831x->irq_base; |
| 396 | 396 | ||
| 397 | if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) | 397 | if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { |
| 398 | return -EINVAL; | 398 | /* Ignore internal-only IRQs */ |
| 399 | if (irq >= 0 && irq < WM831X_NUM_IRQS) | ||
| 400 | return 0; | ||
| 401 | else | ||
| 402 | return -EINVAL; | ||
| 403 | } | ||
| 399 | 404 | ||
| 400 | switch (type) { | 405 | switch (type) { |
| 401 | case IRQ_TYPE_EDGE_BOTH: | 406 | case IRQ_TYPE_EDGE_BOTH: |
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c index 714c6b487313..d5f3a3fd2319 100644 --- a/drivers/misc/bh1780gli.c +++ b/drivers/misc/bh1780gli.c | |||
| @@ -190,7 +190,6 @@ static int __devexit bh1780_remove(struct i2c_client *client) | |||
| 190 | 190 | ||
| 191 | ddata = i2c_get_clientdata(client); | 191 | ddata = i2c_get_clientdata(client); |
| 192 | sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); | 192 | sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); |
| 193 | i2c_set_clientdata(client, NULL); | ||
| 194 | kfree(ddata); | 193 | kfree(ddata); |
| 195 | 194 | ||
| 196 | return 0; | 195 | return 0; |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5db49b124ffa..09eee6df0653 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
| @@ -1631,6 +1631,19 @@ int mmc_suspend_host(struct mmc_host *host) | |||
| 1631 | if (host->bus_ops && !host->bus_dead) { | 1631 | if (host->bus_ops && !host->bus_dead) { |
| 1632 | if (host->bus_ops->suspend) | 1632 | if (host->bus_ops->suspend) |
| 1633 | err = host->bus_ops->suspend(host); | 1633 | err = host->bus_ops->suspend(host); |
| 1634 | if (err == -ENOSYS || !host->bus_ops->resume) { | ||
| 1635 | /* | ||
| 1636 | * We simply "remove" the card in this case. | ||
| 1637 | * It will be redetected on resume. | ||
| 1638 | */ | ||
| 1639 | if (host->bus_ops->remove) | ||
| 1640 | host->bus_ops->remove(host); | ||
| 1641 | mmc_claim_host(host); | ||
| 1642 | mmc_detach_bus(host); | ||
| 1643 | mmc_release_host(host); | ||
| 1644 | host->pm_flags = 0; | ||
| 1645 | err = 0; | ||
| 1646 | } | ||
| 1634 | } | 1647 | } |
| 1635 | mmc_bus_put(host); | 1648 | mmc_bus_put(host); |
| 1636 | 1649 | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index b2828e84d243..214b03afdd48 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | #include <linux/clk.h> | 30 | #include <linux/clk.h> |
| 31 | #include <linux/err.h> | 31 | #include <linux/err.h> |
| 32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
| 33 | #include <linux/irq.h> | ||
| 34 | #include <linux/completion.h> | ||
| 33 | 35 | ||
| 34 | #include <asm/mach/flash.h> | 36 | #include <asm/mach/flash.h> |
| 35 | #include <mach/mxc_nand.h> | 37 | #include <mach/mxc_nand.h> |
| @@ -151,7 +153,7 @@ struct mxc_nand_host { | |||
| 151 | int irq; | 153 | int irq; |
| 152 | int eccsize; | 154 | int eccsize; |
| 153 | 155 | ||
| 154 | wait_queue_head_t irq_waitq; | 156 | struct completion op_completion; |
| 155 | 157 | ||
| 156 | uint8_t *data_buf; | 158 | uint8_t *data_buf; |
| 157 | unsigned int buf_start; | 159 | unsigned int buf_start; |
| @@ -164,6 +166,7 @@ struct mxc_nand_host { | |||
| 164 | void (*send_read_id)(struct mxc_nand_host *); | 166 | void (*send_read_id)(struct mxc_nand_host *); |
| 165 | uint16_t (*get_dev_status)(struct mxc_nand_host *); | 167 | uint16_t (*get_dev_status)(struct mxc_nand_host *); |
| 166 | int (*check_int)(struct mxc_nand_host *); | 168 | int (*check_int)(struct mxc_nand_host *); |
| 169 | void (*irq_control)(struct mxc_nand_host *, int); | ||
| 167 | }; | 170 | }; |
| 168 | 171 | ||
| 169 | /* OOB placement block for use with hardware ecc generation */ | 172 | /* OOB placement block for use with hardware ecc generation */ |
| @@ -216,9 +219,12 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) | |||
| 216 | { | 219 | { |
| 217 | struct mxc_nand_host *host = dev_id; | 220 | struct mxc_nand_host *host = dev_id; |
| 218 | 221 | ||
| 219 | disable_irq_nosync(irq); | 222 | if (!host->check_int(host)) |
| 223 | return IRQ_NONE; | ||
| 220 | 224 | ||
| 221 | wake_up(&host->irq_waitq); | 225 | host->irq_control(host, 0); |
| 226 | |||
| 227 | complete(&host->op_completion); | ||
| 222 | 228 | ||
| 223 | return IRQ_HANDLED; | 229 | return IRQ_HANDLED; |
| 224 | } | 230 | } |
| @@ -245,11 +251,54 @@ static int check_int_v1_v2(struct mxc_nand_host *host) | |||
| 245 | if (!(tmp & NFC_V1_V2_CONFIG2_INT)) | 251 | if (!(tmp & NFC_V1_V2_CONFIG2_INT)) |
| 246 | return 0; | 252 | return 0; |
| 247 | 253 | ||
| 248 | writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); | 254 | if (!cpu_is_mx21()) |
| 255 | writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); | ||
| 249 | 256 | ||
| 250 | return 1; | 257 | return 1; |
| 251 | } | 258 | } |
| 252 | 259 | ||
| 260 | /* | ||
| 261 | * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit | ||
| 262 | * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the | ||
| 263 | * driver can enable/disable the irq line rather than simply masking the | ||
| 264 | * interrupts. | ||
| 265 | */ | ||
| 266 | static void irq_control_mx21(struct mxc_nand_host *host, int activate) | ||
| 267 | { | ||
| 268 | if (activate) | ||
| 269 | enable_irq(host->irq); | ||
| 270 | else | ||
| 271 | disable_irq_nosync(host->irq); | ||
| 272 | } | ||
| 273 | |||
| 274 | static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) | ||
| 275 | { | ||
| 276 | uint16_t tmp; | ||
| 277 | |||
| 278 | tmp = readw(NFC_V1_V2_CONFIG1); | ||
| 279 | |||
| 280 | if (activate) | ||
| 281 | tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; | ||
| 282 | else | ||
| 283 | tmp |= NFC_V1_V2_CONFIG1_INT_MSK; | ||
| 284 | |||
| 285 | writew(tmp, NFC_V1_V2_CONFIG1); | ||
| 286 | } | ||
| 287 | |||
| 288 | static void irq_control_v3(struct mxc_nand_host *host, int activate) | ||
| 289 | { | ||
| 290 | uint32_t tmp; | ||
| 291 | |||
| 292 | tmp = readl(NFC_V3_CONFIG2); | ||
| 293 | |||
| 294 | if (activate) | ||
| 295 | tmp &= ~NFC_V3_CONFIG2_INT_MSK; | ||
| 296 | else | ||
| 297 | tmp |= NFC_V3_CONFIG2_INT_MSK; | ||
| 298 | |||
| 299 | writel(tmp, NFC_V3_CONFIG2); | ||
| 300 | } | ||
| 301 | |||
| 253 | /* This function polls the NANDFC to wait for the basic operation to | 302 | /* This function polls the NANDFC to wait for the basic operation to |
| 254 | * complete by checking the INT bit of config2 register. | 303 | * complete by checking the INT bit of config2 register. |
| 255 | */ | 304 | */ |
| @@ -259,10 +308,9 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq) | |||
| 259 | 308 | ||
| 260 | if (useirq) { | 309 | if (useirq) { |
| 261 | if (!host->check_int(host)) { | 310 | if (!host->check_int(host)) { |
| 262 | 311 | INIT_COMPLETION(host->op_completion); | |
| 263 | enable_irq(host->irq); | 312 | host->irq_control(host, 1); |
| 264 | 313 | wait_for_completion(&host->op_completion); | |
| 265 | wait_event(host->irq_waitq, host->check_int(host)); | ||
| 266 | } | 314 | } |
| 267 | } else { | 315 | } else { |
| 268 | while (max_retries-- > 0) { | 316 | while (max_retries-- > 0) { |
| @@ -799,6 +847,7 @@ static void preset_v3(struct mtd_info *mtd) | |||
| 799 | NFC_V3_CONFIG2_2CMD_PHASES | | 847 | NFC_V3_CONFIG2_2CMD_PHASES | |
| 800 | NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) | | 848 | NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) | |
| 801 | NFC_V3_CONFIG2_ST_CMD(0x70) | | 849 | NFC_V3_CONFIG2_ST_CMD(0x70) | |
| 850 | NFC_V3_CONFIG2_INT_MSK | | ||
| 802 | NFC_V3_CONFIG2_NUM_ADDR_PHASE0; | 851 | NFC_V3_CONFIG2_NUM_ADDR_PHASE0; |
| 803 | 852 | ||
| 804 | if (chip->ecc.mode == NAND_ECC_HW) | 853 | if (chip->ecc.mode == NAND_ECC_HW) |
| @@ -1024,6 +1073,10 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1024 | host->send_read_id = send_read_id_v1_v2; | 1073 | host->send_read_id = send_read_id_v1_v2; |
| 1025 | host->get_dev_status = get_dev_status_v1_v2; | 1074 | host->get_dev_status = get_dev_status_v1_v2; |
| 1026 | host->check_int = check_int_v1_v2; | 1075 | host->check_int = check_int_v1_v2; |
| 1076 | if (cpu_is_mx21()) | ||
| 1077 | host->irq_control = irq_control_mx21; | ||
| 1078 | else | ||
| 1079 | host->irq_control = irq_control_v1_v2; | ||
| 1027 | } | 1080 | } |
| 1028 | 1081 | ||
| 1029 | if (nfc_is_v21()) { | 1082 | if (nfc_is_v21()) { |
| @@ -1062,6 +1115,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1062 | host->send_read_id = send_read_id_v3; | 1115 | host->send_read_id = send_read_id_v3; |
| 1063 | host->check_int = check_int_v3; | 1116 | host->check_int = check_int_v3; |
| 1064 | host->get_dev_status = get_dev_status_v3; | 1117 | host->get_dev_status = get_dev_status_v3; |
| 1118 | host->irq_control = irq_control_v3; | ||
| 1065 | oob_smallpage = &nandv2_hw_eccoob_smallpage; | 1119 | oob_smallpage = &nandv2_hw_eccoob_smallpage; |
| 1066 | oob_largepage = &nandv2_hw_eccoob_largepage; | 1120 | oob_largepage = &nandv2_hw_eccoob_largepage; |
| 1067 | } else | 1121 | } else |
| @@ -1093,14 +1147,34 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
| 1093 | this->options |= NAND_USE_FLASH_BBT; | 1147 | this->options |= NAND_USE_FLASH_BBT; |
| 1094 | } | 1148 | } |
| 1095 | 1149 | ||
| 1096 | init_waitqueue_head(&host->irq_waitq); | 1150 | init_completion(&host->op_completion); |
| 1097 | 1151 | ||
| 1098 | host->irq = platform_get_irq(pdev, 0); | 1152 | host->irq = platform_get_irq(pdev, 0); |
| 1099 | 1153 | ||
| 1154 | /* | ||
| 1155 | * mask the interrupt. For i.MX21 explicitely call | ||
| 1156 | * irq_control_v1_v2 to use the mask bit. We can't call | ||
| 1157 | * disable_irq_nosync() for an interrupt we do not own yet. | ||
| 1158 | */ | ||
| 1159 | if (cpu_is_mx21()) | ||
| 1160 | irq_control_v1_v2(host, 0); | ||
| 1161 | else | ||
| 1162 | host->irq_control(host, 0); | ||
| 1163 | |||
| 1100 | err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); | 1164 | err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); |
| 1101 | if (err) | 1165 | if (err) |
| 1102 | goto eirq; | 1166 | goto eirq; |
| 1103 | 1167 | ||
| 1168 | host->irq_control(host, 0); | ||
| 1169 | |||
| 1170 | /* | ||
| 1171 | * Now that the interrupt is disabled make sure the interrupt | ||
| 1172 | * mask bit is cleared on i.MX21. Otherwise we can't read | ||
| 1173 | * the interrupt status bit on this machine. | ||
| 1174 | */ | ||
| 1175 | if (cpu_is_mx21()) | ||
| 1176 | irq_control_v1_v2(host, 1); | ||
| 1177 | |||
| 1104 | /* first scan to find the device and get the page size */ | 1178 | /* first scan to find the device and get the page size */ |
| 1105 | if (nand_scan_ident(mtd, 1, NULL)) { | 1179 | if (nand_scan_ident(mtd, 1, NULL)) { |
| 1106 | err = -ENXIO; | 1180 | err = -ENXIO; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 133d51528f8d..513e0a76a4a7 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
| @@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
| 413 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | 413 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); |
| 414 | } while (prefetch_status); | 414 | } while (prefetch_status); |
| 415 | /* disable and stop the PFPW engine */ | 415 | /* disable and stop the PFPW engine */ |
| 416 | gpmc_prefetch_reset(); | 416 | gpmc_prefetch_reset(info->gpmc_cs); |
| 417 | 417 | ||
| 418 | dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); | 418 | dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); |
| 419 | return 0; | 419 | return 0; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2cc81a54cbf3..5db667c0b371 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -2428,7 +2428,7 @@ config UGETH_TX_ON_DEMAND | |||
| 2428 | 2428 | ||
| 2429 | config MV643XX_ETH | 2429 | config MV643XX_ETH |
| 2430 | tristate "Marvell Discovery (643XX) and Orion ethernet support" | 2430 | tristate "Marvell Discovery (643XX) and Orion ethernet support" |
| 2431 | depends on MV64X60 || PPC32 || PLAT_ORION | 2431 | depends on (MV64X60 || PPC32 || PLAT_ORION) && INET |
| 2432 | select INET_LRO | 2432 | select INET_LRO |
| 2433 | select PHYLIB | 2433 | select PHYLIB |
| 2434 | help | 2434 | help |
| @@ -2803,7 +2803,7 @@ config NIU | |||
| 2803 | 2803 | ||
| 2804 | config PASEMI_MAC | 2804 | config PASEMI_MAC |
| 2805 | tristate "PA Semi 1/10Gbit MAC" | 2805 | tristate "PA Semi 1/10Gbit MAC" |
| 2806 | depends on PPC_PASEMI && PCI | 2806 | depends on PPC_PASEMI && PCI && INET |
| 2807 | select PHYLIB | 2807 | select PHYLIB |
| 2808 | select INET_LRO | 2808 | select INET_LRO |
| 2809 | help | 2809 | help |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 1e620e287ae0..efeffdf9e5fa 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
| @@ -2170,8 +2170,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev, | |||
| 2170 | dev->irq = sdev->irq; | 2170 | dev->irq = sdev->irq; |
| 2171 | SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); | 2171 | SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); |
| 2172 | 2172 | ||
| 2173 | netif_carrier_off(dev); | ||
| 2174 | |||
| 2175 | err = ssb_bus_powerup(sdev->bus, 0); | 2173 | err = ssb_bus_powerup(sdev->bus, 0); |
| 2176 | if (err) { | 2174 | if (err) { |
| 2177 | dev_err(sdev->dev, | 2175 | dev_err(sdev->dev, |
| @@ -2213,6 +2211,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev, | |||
| 2213 | goto err_out_powerdown; | 2211 | goto err_out_powerdown; |
| 2214 | } | 2212 | } |
| 2215 | 2213 | ||
| 2214 | netif_carrier_off(dev); | ||
| 2215 | |||
| 2216 | ssb_set_drvdata(sdev, dev); | 2216 | ssb_set_drvdata(sdev, dev); |
| 2217 | 2217 | ||
| 2218 | /* Chip reset provides power to the b44 MAC & PCI cores, which | 2218 | /* Chip reset provides power to the b44 MAC & PCI cores, which |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3b16f62d5606..e953c6ad6e6d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -5164,6 +5164,15 @@ int bond_create(struct net *net, const char *name) | |||
| 5164 | res = dev_alloc_name(bond_dev, "bond%d"); | 5164 | res = dev_alloc_name(bond_dev, "bond%d"); |
| 5165 | if (res < 0) | 5165 | if (res < 0) |
| 5166 | goto out; | 5166 | goto out; |
| 5167 | } else { | ||
| 5168 | /* | ||
| 5169 | * If we're given a name to register | ||
| 5170 | * we need to ensure that its not already | ||
| 5171 | * registered | ||
| 5172 | */ | ||
| 5173 | res = -EEXIST; | ||
| 5174 | if (__dev_get_by_name(net, name) != NULL) | ||
| 5175 | goto out; | ||
| 5167 | } | 5176 | } |
| 5168 | 5177 | ||
| 5169 | res = register_netdevice(bond_dev); | 5178 | res = register_netdevice(bond_dev); |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index a333b42111b8..6372610ed240 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
| @@ -533,8 +533,15 @@ static inline void ehea_fill_skb(struct net_device *dev, | |||
| 533 | int length = cqe->num_bytes_transfered - 4; /*remove CRC */ | 533 | int length = cqe->num_bytes_transfered - 4; /*remove CRC */ |
| 534 | 534 | ||
| 535 | skb_put(skb, length); | 535 | skb_put(skb, length); |
| 536 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 537 | skb->protocol = eth_type_trans(skb, dev); | 536 | skb->protocol = eth_type_trans(skb, dev); |
| 537 | |||
| 538 | /* The packet was not an IPV4 packet so a complemented checksum was | ||
| 539 | calculated. The value is found in the Internet Checksum field. */ | ||
| 540 | if (cqe->status & EHEA_CQE_BLIND_CKSUM) { | ||
| 541 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
| 542 | skb->csum = csum_unfold(~cqe->inet_checksum_value); | ||
| 543 | } else | ||
| 544 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 538 | } | 545 | } |
| 539 | 546 | ||
| 540 | static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, | 547 | static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, |
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index f608a6c54af5..38104734a3be 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h | |||
| @@ -150,6 +150,7 @@ struct ehea_rwqe { | |||
| 150 | #define EHEA_CQE_TYPE_RQ 0x60 | 150 | #define EHEA_CQE_TYPE_RQ 0x60 |
| 151 | #define EHEA_CQE_STAT_ERR_MASK 0x700F | 151 | #define EHEA_CQE_STAT_ERR_MASK 0x700F |
| 152 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF | 152 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF |
| 153 | #define EHEA_CQE_BLIND_CKSUM 0x8000 | ||
| 153 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 | 154 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 |
| 154 | #define EHEA_CQE_STAT_ERR_IP 0x2000 | 155 | #define EHEA_CQE_STAT_ERR_IP 0x2000 |
| 155 | #define EHEA_CQE_STAT_ERR_CRC 0x1000 | 156 | #define EHEA_CQE_STAT_ERR_CRC 0x1000 |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 768b840aeb6b..cce32d43175f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
| @@ -678,24 +678,37 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
| 678 | { | 678 | { |
| 679 | struct fec_enet_private *fep = netdev_priv(dev); | 679 | struct fec_enet_private *fep = netdev_priv(dev); |
| 680 | struct phy_device *phy_dev = NULL; | 680 | struct phy_device *phy_dev = NULL; |
| 681 | int ret; | 681 | char mdio_bus_id[MII_BUS_ID_SIZE]; |
| 682 | char phy_name[MII_BUS_ID_SIZE + 3]; | ||
| 683 | int phy_id; | ||
| 682 | 684 | ||
| 683 | fep->phy_dev = NULL; | 685 | fep->phy_dev = NULL; |
| 684 | 686 | ||
| 685 | /* find the first phy */ | 687 | /* check for attached phy */ |
| 686 | phy_dev = phy_find_first(fep->mii_bus); | 688 | for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { |
| 687 | if (!phy_dev) { | 689 | if ((fep->mii_bus->phy_mask & (1 << phy_id))) |
| 688 | printk(KERN_ERR "%s: no PHY found\n", dev->name); | 690 | continue; |
| 689 | return -ENODEV; | 691 | if (fep->mii_bus->phy_map[phy_id] == NULL) |
| 692 | continue; | ||
| 693 | if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) | ||
| 694 | continue; | ||
| 695 | strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); | ||
| 696 | break; | ||
| 690 | } | 697 | } |
| 691 | 698 | ||
| 692 | /* attach the mac to the phy */ | 699 | if (phy_id >= PHY_MAX_ADDR) { |
| 693 | ret = phy_connect_direct(dev, phy_dev, | 700 | printk(KERN_INFO "%s: no PHY, assuming direct connection " |
| 694 | &fec_enet_adjust_link, 0, | 701 | "to switch\n", dev->name); |
| 695 | PHY_INTERFACE_MODE_MII); | 702 | strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); |
| 696 | if (ret) { | 703 | phy_id = 0; |
| 697 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 704 | } |
| 698 | return ret; | 705 | |
| 706 | snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); | ||
| 707 | phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, | ||
| 708 | PHY_INTERFACE_MODE_MII); | ||
| 709 | if (IS_ERR(phy_dev)) { | ||
| 710 | printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); | ||
| 711 | return PTR_ERR(phy_dev); | ||
| 699 | } | 712 | } |
| 700 | 713 | ||
| 701 | /* mask with MAC supported features */ | 714 | /* mask with MAC supported features */ |
| @@ -738,7 +751,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
| 738 | fep->mii_bus->read = fec_enet_mdio_read; | 751 | fep->mii_bus->read = fec_enet_mdio_read; |
| 739 | fep->mii_bus->write = fec_enet_mdio_write; | 752 | fep->mii_bus->write = fec_enet_mdio_write; |
| 740 | fep->mii_bus->reset = fec_enet_mdio_reset; | 753 | fep->mii_bus->reset = fec_enet_mdio_reset; |
| 741 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | 754 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); |
| 742 | fep->mii_bus->priv = fep; | 755 | fep->mii_bus->priv = fep; |
| 743 | fep->mii_bus->parent = &pdev->dev; | 756 | fep->mii_bus->parent = &pdev->dev; |
| 744 | 757 | ||
| @@ -1311,6 +1324,9 @@ fec_probe(struct platform_device *pdev) | |||
| 1311 | if (ret) | 1324 | if (ret) |
| 1312 | goto failed_mii_init; | 1325 | goto failed_mii_init; |
| 1313 | 1326 | ||
| 1327 | /* Carrier starts down, phylib will bring it up */ | ||
| 1328 | netif_carrier_off(ndev); | ||
| 1329 | |||
| 1314 | ret = register_netdev(ndev); | 1330 | ret = register_netdev(ndev); |
| 1315 | if (ret) | 1331 | if (ret) |
| 1316 | goto failed_register; | 1332 | goto failed_register; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index a0da4a17b025..992db2fa136e 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
| @@ -1212,7 +1212,8 @@ static void rtl8169_update_counters(struct net_device *dev) | |||
| 1212 | if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) | 1212 | if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) |
| 1213 | return; | 1213 | return; |
| 1214 | 1214 | ||
| 1215 | counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); | 1215 | counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters), |
| 1216 | &paddr, GFP_KERNEL); | ||
| 1216 | if (!counters) | 1217 | if (!counters) |
| 1217 | return; | 1218 | return; |
| 1218 | 1219 | ||
| @@ -1233,7 +1234,8 @@ static void rtl8169_update_counters(struct net_device *dev) | |||
| 1233 | RTL_W32(CounterAddrLow, 0); | 1234 | RTL_W32(CounterAddrLow, 0); |
| 1234 | RTL_W32(CounterAddrHigh, 0); | 1235 | RTL_W32(CounterAddrHigh, 0); |
| 1235 | 1236 | ||
| 1236 | pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); | 1237 | dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters, |
| 1238 | paddr); | ||
| 1237 | } | 1239 | } |
| 1238 | 1240 | ||
| 1239 | static void rtl8169_get_ethtool_stats(struct net_device *dev, | 1241 | static void rtl8169_get_ethtool_stats(struct net_device *dev, |
| @@ -3292,15 +3294,15 @@ static int rtl8169_open(struct net_device *dev) | |||
| 3292 | 3294 | ||
| 3293 | /* | 3295 | /* |
| 3294 | * Rx and Tx desscriptors needs 256 bytes alignment. | 3296 | * Rx and Tx desscriptors needs 256 bytes alignment. |
| 3295 | * pci_alloc_consistent provides more. | 3297 | * dma_alloc_coherent provides more. |
| 3296 | */ | 3298 | */ |
| 3297 | tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, | 3299 | tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, |
| 3298 | &tp->TxPhyAddr); | 3300 | &tp->TxPhyAddr, GFP_KERNEL); |
| 3299 | if (!tp->TxDescArray) | 3301 | if (!tp->TxDescArray) |
| 3300 | goto err_pm_runtime_put; | 3302 | goto err_pm_runtime_put; |
| 3301 | 3303 | ||
| 3302 | tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, | 3304 | tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, |
| 3303 | &tp->RxPhyAddr); | 3305 | &tp->RxPhyAddr, GFP_KERNEL); |
| 3304 | if (!tp->RxDescArray) | 3306 | if (!tp->RxDescArray) |
| 3305 | goto err_free_tx_0; | 3307 | goto err_free_tx_0; |
| 3306 | 3308 | ||
| @@ -3334,12 +3336,12 @@ out: | |||
| 3334 | err_release_ring_2: | 3336 | err_release_ring_2: |
| 3335 | rtl8169_rx_clear(tp); | 3337 | rtl8169_rx_clear(tp); |
| 3336 | err_free_rx_1: | 3338 | err_free_rx_1: |
| 3337 | pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, | 3339 | dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, |
| 3338 | tp->RxPhyAddr); | 3340 | tp->RxPhyAddr); |
| 3339 | tp->RxDescArray = NULL; | 3341 | tp->RxDescArray = NULL; |
| 3340 | err_free_tx_0: | 3342 | err_free_tx_0: |
| 3341 | pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, | 3343 | dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, |
| 3342 | tp->TxPhyAddr); | 3344 | tp->TxPhyAddr); |
| 3343 | tp->TxDescArray = NULL; | 3345 | tp->TxDescArray = NULL; |
| 3344 | err_pm_runtime_put: | 3346 | err_pm_runtime_put: |
| 3345 | pm_runtime_put_noidle(&pdev->dev); | 3347 | pm_runtime_put_noidle(&pdev->dev); |
| @@ -3975,7 +3977,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp, | |||
| 3975 | { | 3977 | { |
| 3976 | struct pci_dev *pdev = tp->pci_dev; | 3978 | struct pci_dev *pdev = tp->pci_dev; |
| 3977 | 3979 | ||
| 3978 | pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, | 3980 | dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, |
| 3979 | PCI_DMA_FROMDEVICE); | 3981 | PCI_DMA_FROMDEVICE); |
| 3980 | dev_kfree_skb(*sk_buff); | 3982 | dev_kfree_skb(*sk_buff); |
| 3981 | *sk_buff = NULL; | 3983 | *sk_buff = NULL; |
| @@ -4000,7 +4002,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, | |||
| 4000 | static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, | 4002 | static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, |
| 4001 | struct net_device *dev, | 4003 | struct net_device *dev, |
| 4002 | struct RxDesc *desc, int rx_buf_sz, | 4004 | struct RxDesc *desc, int rx_buf_sz, |
| 4003 | unsigned int align) | 4005 | unsigned int align, gfp_t gfp) |
| 4004 | { | 4006 | { |
| 4005 | struct sk_buff *skb; | 4007 | struct sk_buff *skb; |
| 4006 | dma_addr_t mapping; | 4008 | dma_addr_t mapping; |
| @@ -4008,13 +4010,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, | |||
| 4008 | 4010 | ||
| 4009 | pad = align ? align : NET_IP_ALIGN; | 4011 | pad = align ? align : NET_IP_ALIGN; |
| 4010 | 4012 | ||
| 4011 | skb = netdev_alloc_skb(dev, rx_buf_sz + pad); | 4013 | skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp); |
| 4012 | if (!skb) | 4014 | if (!skb) |
| 4013 | goto err_out; | 4015 | goto err_out; |
| 4014 | 4016 | ||
| 4015 | skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); | 4017 | skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); |
| 4016 | 4018 | ||
| 4017 | mapping = pci_map_single(pdev, skb->data, rx_buf_sz, | 4019 | mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz, |
| 4018 | PCI_DMA_FROMDEVICE); | 4020 | PCI_DMA_FROMDEVICE); |
| 4019 | 4021 | ||
| 4020 | rtl8169_map_to_asic(desc, mapping, rx_buf_sz); | 4022 | rtl8169_map_to_asic(desc, mapping, rx_buf_sz); |
| @@ -4039,7 +4041,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) | |||
| 4039 | } | 4041 | } |
| 4040 | 4042 | ||
| 4041 | static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, | 4043 | static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, |
| 4042 | u32 start, u32 end) | 4044 | u32 start, u32 end, gfp_t gfp) |
| 4043 | { | 4045 | { |
| 4044 | u32 cur; | 4046 | u32 cur; |
| 4045 | 4047 | ||
| @@ -4054,7 +4056,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, | |||
| 4054 | 4056 | ||
| 4055 | skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, | 4057 | skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, |
| 4056 | tp->RxDescArray + i, | 4058 | tp->RxDescArray + i, |
| 4057 | tp->rx_buf_sz, tp->align); | 4059 | tp->rx_buf_sz, tp->align, gfp); |
| 4058 | if (!skb) | 4060 | if (!skb) |
| 4059 | break; | 4061 | break; |
| 4060 | 4062 | ||
| @@ -4082,7 +4084,7 @@ static int rtl8169_init_ring(struct net_device *dev) | |||
| 4082 | memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); | 4084 | memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); |
| 4083 | memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); | 4085 | memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); |
| 4084 | 4086 | ||
| 4085 | if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) | 4087 | if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC) |
| 4086 | goto err_out; | 4088 | goto err_out; |
| 4087 | 4089 | ||
| 4088 | rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); | 4090 | rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); |
| @@ -4099,7 +4101,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, | |||
| 4099 | { | 4101 | { |
| 4100 | unsigned int len = tx_skb->len; | 4102 | unsigned int len = tx_skb->len; |
| 4101 | 4103 | ||
| 4102 | pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); | 4104 | dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len, |
| 4105 | PCI_DMA_TODEVICE); | ||
| 4103 | desc->opts1 = 0x00; | 4106 | desc->opts1 = 0x00; |
| 4104 | desc->opts2 = 0x00; | 4107 | desc->opts2 = 0x00; |
| 4105 | desc->addr = 0x00; | 4108 | desc->addr = 0x00; |
| @@ -4243,7 +4246,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, | |||
| 4243 | txd = tp->TxDescArray + entry; | 4246 | txd = tp->TxDescArray + entry; |
| 4244 | len = frag->size; | 4247 | len = frag->size; |
| 4245 | addr = ((void *) page_address(frag->page)) + frag->page_offset; | 4248 | addr = ((void *) page_address(frag->page)) + frag->page_offset; |
| 4246 | mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); | 4249 | mapping = dma_map_single(&tp->pci_dev->dev, addr, len, |
| 4250 | PCI_DMA_TODEVICE); | ||
| 4247 | 4251 | ||
| 4248 | /* anti gcc 2.95.3 bugware (sic) */ | 4252 | /* anti gcc 2.95.3 bugware (sic) */ |
| 4249 | status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); | 4253 | status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); |
| @@ -4313,7 +4317,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
| 4313 | tp->tx_skb[entry].skb = skb; | 4317 | tp->tx_skb[entry].skb = skb; |
| 4314 | } | 4318 | } |
| 4315 | 4319 | ||
| 4316 | mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); | 4320 | mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len, |
| 4321 | PCI_DMA_TODEVICE); | ||
| 4317 | 4322 | ||
| 4318 | tp->tx_skb[entry].len = len; | 4323 | tp->tx_skb[entry].len = len; |
| 4319 | txd->addr = cpu_to_le64(mapping); | 4324 | txd->addr = cpu_to_le64(mapping); |
| @@ -4477,8 +4482,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, | |||
| 4477 | if (!skb) | 4482 | if (!skb) |
| 4478 | goto out; | 4483 | goto out; |
| 4479 | 4484 | ||
| 4480 | pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, | 4485 | dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size, |
| 4481 | PCI_DMA_FROMDEVICE); | 4486 | PCI_DMA_FROMDEVICE); |
| 4482 | skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); | 4487 | skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); |
| 4483 | *sk_buff = skb; | 4488 | *sk_buff = skb; |
| 4484 | done = true; | 4489 | done = true; |
| @@ -4549,11 +4554,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
| 4549 | rtl8169_rx_csum(skb, desc); | 4554 | rtl8169_rx_csum(skb, desc); |
| 4550 | 4555 | ||
| 4551 | if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { | 4556 | if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { |
| 4552 | pci_dma_sync_single_for_device(pdev, addr, | 4557 | dma_sync_single_for_device(&pdev->dev, addr, |
| 4553 | pkt_size, PCI_DMA_FROMDEVICE); | 4558 | pkt_size, PCI_DMA_FROMDEVICE); |
| 4554 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 4559 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); |
| 4555 | } else { | 4560 | } else { |
| 4556 | pci_unmap_single(pdev, addr, tp->rx_buf_sz, | 4561 | dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz, |
| 4557 | PCI_DMA_FROMDEVICE); | 4562 | PCI_DMA_FROMDEVICE); |
| 4558 | tp->Rx_skbuff[entry] = NULL; | 4563 | tp->Rx_skbuff[entry] = NULL; |
| 4559 | } | 4564 | } |
| @@ -4583,7 +4588,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
| 4583 | count = cur_rx - tp->cur_rx; | 4588 | count = cur_rx - tp->cur_rx; |
| 4584 | tp->cur_rx = cur_rx; | 4589 | tp->cur_rx = cur_rx; |
| 4585 | 4590 | ||
| 4586 | delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); | 4591 | delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC); |
| 4587 | if (!delta && count) | 4592 | if (!delta && count) |
| 4588 | netif_info(tp, intr, dev, "no Rx buffer allocated\n"); | 4593 | netif_info(tp, intr, dev, "no Rx buffer allocated\n"); |
| 4589 | tp->dirty_rx += delta; | 4594 | tp->dirty_rx += delta; |
| @@ -4769,10 +4774,10 @@ static int rtl8169_close(struct net_device *dev) | |||
| 4769 | 4774 | ||
| 4770 | free_irq(dev->irq, dev); | 4775 | free_irq(dev->irq, dev); |
| 4771 | 4776 | ||
| 4772 | pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, | 4777 | dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, |
| 4773 | tp->RxPhyAddr); | 4778 | tp->RxPhyAddr); |
| 4774 | pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, | 4779 | dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, |
| 4775 | tp->TxPhyAddr); | 4780 | tp->TxPhyAddr); |
| 4776 | tp->TxDescArray = NULL; | 4781 | tp->TxDescArray = NULL; |
| 4777 | tp->RxDescArray = NULL; | 4782 | tp->RxDescArray = NULL; |
| 4778 | 4783 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 40e5c46e7571..465ae7e84507 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> |
| 44 | #include <linux/mii.h> | 44 | #include <linux/mii.h> |
| 45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
| 46 | #include <linux/dmi.h> | ||
| 46 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
| 47 | 48 | ||
| 48 | #include "skge.h" | 49 | #include "skge.h" |
| @@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev) | |||
| 3868 | netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); | 3869 | netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); |
| 3869 | } | 3870 | } |
| 3870 | 3871 | ||
| 3872 | static int only_32bit_dma; | ||
| 3873 | |||
| 3871 | static int __devinit skge_probe(struct pci_dev *pdev, | 3874 | static int __devinit skge_probe(struct pci_dev *pdev, |
| 3872 | const struct pci_device_id *ent) | 3875 | const struct pci_device_id *ent) |
| 3873 | { | 3876 | { |
| @@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
| 3889 | 3892 | ||
| 3890 | pci_set_master(pdev); | 3893 | pci_set_master(pdev); |
| 3891 | 3894 | ||
| 3892 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 3895 | if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
| 3893 | using_dac = 1; | 3896 | using_dac = 1; |
| 3894 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 3897 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
| 3895 | } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | 3898 | } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { |
| @@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = { | |||
| 4147 | .shutdown = skge_shutdown, | 4150 | .shutdown = skge_shutdown, |
| 4148 | }; | 4151 | }; |
| 4149 | 4152 | ||
| 4153 | static struct dmi_system_id skge_32bit_dma_boards[] = { | ||
| 4154 | { | ||
| 4155 | .ident = "Gigabyte nForce boards", | ||
| 4156 | .matches = { | ||
| 4157 | DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), | ||
| 4158 | DMI_MATCH(DMI_BOARD_NAME, "nForce"), | ||
| 4159 | }, | ||
| 4160 | }, | ||
| 4161 | {} | ||
| 4162 | }; | ||
| 4163 | |||
| 4150 | static int __init skge_init_module(void) | 4164 | static int __init skge_init_module(void) |
| 4151 | { | 4165 | { |
| 4166 | if (dmi_check_system(skge_32bit_dma_boards)) | ||
| 4167 | only_32bit_dma = 1; | ||
| 4152 | skge_debug_init(); | 4168 | skge_debug_init(); |
| 4153 | return pci_register_driver(&skge_driver); | 4169 | return pci_register_driver(&skge_driver); |
| 4154 | } | 4170 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index bc3af78a869f..1ec4b9e0239a 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -4666,7 +4666,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
| 4666 | desc_idx, *post_ptr); | 4666 | desc_idx, *post_ptr); |
| 4667 | drop_it_no_recycle: | 4667 | drop_it_no_recycle: |
| 4668 | /* Other statistics kept track of by card. */ | 4668 | /* Other statistics kept track of by card. */ |
| 4669 | tp->net_stats.rx_dropped++; | 4669 | tp->rx_dropped++; |
| 4670 | goto next_pkt; | 4670 | goto next_pkt; |
| 4671 | } | 4671 | } |
| 4672 | 4672 | ||
| @@ -4726,7 +4726,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
| 4726 | if (len > (tp->dev->mtu + ETH_HLEN) && | 4726 | if (len > (tp->dev->mtu + ETH_HLEN) && |
| 4727 | skb->protocol != htons(ETH_P_8021Q)) { | 4727 | skb->protocol != htons(ETH_P_8021Q)) { |
| 4728 | dev_kfree_skb(skb); | 4728 | dev_kfree_skb(skb); |
| 4729 | goto next_pkt; | 4729 | goto drop_it_no_recycle; |
| 4730 | } | 4730 | } |
| 4731 | 4731 | ||
| 4732 | if (desc->type_flags & RXD_FLAG_VLAN && | 4732 | if (desc->type_flags & RXD_FLAG_VLAN && |
| @@ -9240,6 +9240,8 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, | |||
| 9240 | stats->rx_missed_errors = old_stats->rx_missed_errors + | 9240 | stats->rx_missed_errors = old_stats->rx_missed_errors + |
| 9241 | get_stat64(&hw_stats->rx_discards); | 9241 | get_stat64(&hw_stats->rx_discards); |
| 9242 | 9242 | ||
| 9243 | stats->rx_dropped = tp->rx_dropped; | ||
| 9244 | |||
| 9243 | return stats; | 9245 | return stats; |
| 9244 | } | 9246 | } |
| 9245 | 9247 | ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 4937bd190964..be7ff138a7f9 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2759,7 +2759,7 @@ struct tg3 { | |||
| 2759 | 2759 | ||
| 2760 | 2760 | ||
| 2761 | /* begin "everything else" cacheline(s) section */ | 2761 | /* begin "everything else" cacheline(s) section */ |
| 2762 | struct rtnl_link_stats64 net_stats; | 2762 | unsigned long rx_dropped; |
| 2763 | struct rtnl_link_stats64 net_stats_prev; | 2763 | struct rtnl_link_stats64 net_stats_prev; |
| 2764 | struct tg3_ethtool_stats estats; | 2764 | struct tg3_ethtool_stats estats; |
| 2765 | struct tg3_ethtool_stats estats_prev; | 2765 | struct tg3_ethtool_stats estats_prev; |
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 8cc9e319f435..1737d1488b35 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c | |||
| @@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) | |||
| 1244 | int i, result; | 1244 | int i, result; |
| 1245 | struct device *dev = i2400m_dev(i2400m); | 1245 | struct device *dev = i2400m_dev(i2400m); |
| 1246 | const struct i2400m_msg_hdr *msg_hdr; | 1246 | const struct i2400m_msg_hdr *msg_hdr; |
| 1247 | size_t pl_itr, pl_size, skb_len; | 1247 | size_t pl_itr, pl_size; |
| 1248 | unsigned long flags; | 1248 | unsigned long flags; |
| 1249 | unsigned num_pls, single_last; | 1249 | unsigned num_pls, single_last, skb_len; |
| 1250 | 1250 | ||
| 1251 | skb_len = skb->len; | 1251 | skb_len = skb->len; |
| 1252 | d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", | 1252 | d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n", |
| 1253 | i2400m, skb, skb_len); | 1253 | i2400m, skb, skb_len); |
| 1254 | result = -EIO; | 1254 | result = -EIO; |
| 1255 | msg_hdr = (void *) skb->data; | 1255 | msg_hdr = (void *) skb->data; |
| 1256 | result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); | 1256 | result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len); |
| 1257 | if (result < 0) | 1257 | if (result < 0) |
| 1258 | goto error_msg_hdr_check; | 1258 | goto error_msg_hdr_check; |
| 1259 | result = -EIO; | 1259 | result = -EIO; |
| @@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) | |||
| 1261 | pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ | 1261 | pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ |
| 1262 | num_pls * sizeof(msg_hdr->pld[0]); | 1262 | num_pls * sizeof(msg_hdr->pld[0]); |
| 1263 | pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); | 1263 | pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); |
| 1264 | if (pl_itr > skb->len) { /* got all the payload descriptors? */ | 1264 | if (pl_itr > skb_len) { /* got all the payload descriptors? */ |
| 1265 | dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " | 1265 | dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " |
| 1266 | "%u payload descriptors (%zu each, total %zu)\n", | 1266 | "%u payload descriptors (%zu each, total %zu)\n", |
| 1267 | skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); | 1267 | skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); |
| 1268 | goto error_pl_descr_short; | 1268 | goto error_pl_descr_short; |
| 1269 | } | 1269 | } |
| 1270 | /* Walk each payload payload--check we really got it */ | 1270 | /* Walk each payload payload--check we really got it */ |
| @@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) | |||
| 1272 | /* work around old gcc warnings */ | 1272 | /* work around old gcc warnings */ |
| 1273 | pl_size = i2400m_pld_size(&msg_hdr->pld[i]); | 1273 | pl_size = i2400m_pld_size(&msg_hdr->pld[i]); |
| 1274 | result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], | 1274 | result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], |
| 1275 | pl_itr, skb->len); | 1275 | pl_itr, skb_len); |
| 1276 | if (result < 0) | 1276 | if (result < 0) |
| 1277 | goto error_pl_descr_check; | 1277 | goto error_pl_descr_check; |
| 1278 | single_last = num_pls == 1 || i == num_pls - 1; | 1278 | single_last = num_pls == 1 || i == num_pls - 1; |
| @@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) | |||
| 1290 | if (i < i2400m->rx_pl_min) | 1290 | if (i < i2400m->rx_pl_min) |
| 1291 | i2400m->rx_pl_min = i; | 1291 | i2400m->rx_pl_min = i; |
| 1292 | i2400m->rx_num++; | 1292 | i2400m->rx_num++; |
| 1293 | i2400m->rx_size_acc += skb->len; | 1293 | i2400m->rx_size_acc += skb_len; |
| 1294 | if (skb->len < i2400m->rx_size_min) | 1294 | if (skb_len < i2400m->rx_size_min) |
| 1295 | i2400m->rx_size_min = skb->len; | 1295 | i2400m->rx_size_min = skb_len; |
| 1296 | if (skb->len > i2400m->rx_size_max) | 1296 | if (skb_len > i2400m->rx_size_max) |
| 1297 | i2400m->rx_size_max = skb->len; | 1297 | i2400m->rx_size_max = skb_len; |
| 1298 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | 1298 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); |
| 1299 | error_pl_descr_check: | 1299 | error_pl_descr_check: |
| 1300 | error_pl_descr_short: | 1300 | error_pl_descr_short: |
| 1301 | error_msg_hdr_check: | 1301 | error_msg_hdr_check: |
| 1302 | d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", | 1302 | d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n", |
| 1303 | i2400m, skb, skb_len, result); | 1303 | i2400m, skb, skb_len, result); |
| 1304 | return result; | 1304 | return result; |
| 1305 | } | 1305 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index cc648b6ae31c..a3d95cca8f0c 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c | |||
| @@ -543,7 +543,7 @@ static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah) | |||
| 543 | if (conf_is_ht40(conf)) | 543 | if (conf_is_ht40(conf)) |
| 544 | return clockrate * 2; | 544 | return clockrate * 2; |
| 545 | 545 | ||
| 546 | return clockrate * 2; | 546 | return clockrate; |
| 547 | } | 547 | } |
| 548 | 548 | ||
| 549 | static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) | 549 | static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 9dd9e64c2b0b..8fd00a6e5120 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
| @@ -1411,7 +1411,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) | |||
| 1411 | clear_bit(STATUS_SCAN_HW, &priv->status); | 1411 | clear_bit(STATUS_SCAN_HW, &priv->status); |
| 1412 | clear_bit(STATUS_SCANNING, &priv->status); | 1412 | clear_bit(STATUS_SCANNING, &priv->status); |
| 1413 | /* inform mac80211 scan aborted */ | 1413 | /* inform mac80211 scan aborted */ |
| 1414 | queue_work(priv->workqueue, &priv->scan_completed); | 1414 | queue_work(priv->workqueue, &priv->abort_scan); |
| 1415 | } | 1415 | } |
| 1416 | 1416 | ||
| 1417 | int iwlagn_manage_ibss_station(struct iwl_priv *priv, | 1417 | int iwlagn_manage_ibss_station(struct iwl_priv *priv, |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 59a308b02f95..d31661c1ce77 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
| @@ -3018,7 +3018,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) | |||
| 3018 | clear_bit(STATUS_SCANNING, &priv->status); | 3018 | clear_bit(STATUS_SCANNING, &priv->status); |
| 3019 | 3019 | ||
| 3020 | /* inform mac80211 scan aborted */ | 3020 | /* inform mac80211 scan aborted */ |
| 3021 | queue_work(priv->workqueue, &priv->scan_completed); | 3021 | queue_work(priv->workqueue, &priv->abort_scan); |
| 3022 | } | 3022 | } |
| 3023 | 3023 | ||
| 3024 | static void iwl3945_bg_restart(struct work_struct *data) | 3024 | static void iwl3945_bg_restart(struct work_struct *data) |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 89ed181cd90c..857ae01734a6 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d | |||
| 163 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); | 163 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); |
| 164 | 164 | ||
| 165 | /* | 165 | /* |
| 166 | * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear | ||
| 167 | * for some HT machines to use C4 w/o hanging. | ||
| 168 | */ | ||
| 169 | static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) | ||
| 170 | { | ||
| 171 | u32 pmbase; | ||
| 172 | u16 pm1a; | ||
| 173 | |||
| 174 | pci_read_config_dword(dev, 0x40, &pmbase); | ||
| 175 | pmbase = pmbase & 0xff80; | ||
| 176 | pm1a = inw(pmbase); | ||
| 177 | |||
| 178 | if (pm1a & 0x10) { | ||
| 179 | dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); | ||
| 180 | outw(0x10, pmbase); | ||
| 181 | } | ||
| 182 | } | ||
| 183 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); | ||
| 184 | |||
| 185 | /* | ||
| 166 | * Chipsets where PCI->PCI transfers vanish or hang | 186 | * Chipsets where PCI->PCI transfers vanish or hang |
| 167 | */ | 187 | */ |
| 168 | static void __devinit quirk_nopcipci(struct pci_dev *dev) | 188 | static void __devinit quirk_nopcipci(struct pci_dev *dev) |
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 9024480a8228..c44a5e8b8b82 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c | |||
| @@ -51,7 +51,6 @@ | |||
| 51 | * TODO: | 51 | * TODO: |
| 52 | * - handle CPU hotplug | 52 | * - handle CPU hotplug |
| 53 | * - provide turbo enable/disable api | 53 | * - provide turbo enable/disable api |
| 54 | * - make sure we can write turbo enable/disable reg based on MISC_EN | ||
| 55 | * | 54 | * |
| 56 | * Related documents: | 55 | * Related documents: |
| 57 | * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 | 56 | * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 |
| @@ -230,7 +229,7 @@ | |||
| 230 | #define THM_TC2 0xac | 229 | #define THM_TC2 0xac |
| 231 | #define THM_DTV 0xb0 | 230 | #define THM_DTV 0xb0 |
| 232 | #define THM_ITV 0xd8 | 231 | #define THM_ITV 0xd8 |
| 233 | #define ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */ | 232 | #define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */ |
| 234 | #define ITV_ME_SEQNO_SHIFT (16) | 233 | #define ITV_ME_SEQNO_SHIFT (16) |
| 235 | #define ITV_MCH_TEMP_MASK 0x0000ff00 | 234 | #define ITV_MCH_TEMP_MASK 0x0000ff00 |
| 236 | #define ITV_MCH_TEMP_SHIFT (8) | 235 | #define ITV_MCH_TEMP_SHIFT (8) |
| @@ -325,6 +324,7 @@ struct ips_driver { | |||
| 325 | bool gpu_preferred; | 324 | bool gpu_preferred; |
| 326 | bool poll_turbo_status; | 325 | bool poll_turbo_status; |
| 327 | bool second_cpu; | 326 | bool second_cpu; |
| 327 | bool turbo_toggle_allowed; | ||
| 328 | struct ips_mcp_limits *limits; | 328 | struct ips_mcp_limits *limits; |
| 329 | 329 | ||
| 330 | /* Optional MCH interfaces for if i915 is in use */ | 330 | /* Optional MCH interfaces for if i915 is in use */ |
| @@ -415,7 +415,7 @@ static void ips_cpu_lower(struct ips_driver *ips) | |||
| 415 | new_limit = cur_limit - 8; /* 1W decrease */ | 415 | new_limit = cur_limit - 8; /* 1W decrease */ |
| 416 | 416 | ||
| 417 | /* Clamp to SKU TDP limit */ | 417 | /* Clamp to SKU TDP limit */ |
| 418 | if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK)) | 418 | if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK)) |
| 419 | new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; | 419 | new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; |
| 420 | 420 | ||
| 421 | thm_writew(THM_MPCPC, (new_limit * 10) / 8); | 421 | thm_writew(THM_MPCPC, (new_limit * 10) / 8); |
| @@ -461,7 +461,8 @@ static void ips_enable_cpu_turbo(struct ips_driver *ips) | |||
| 461 | if (ips->__cpu_turbo_on) | 461 | if (ips->__cpu_turbo_on) |
| 462 | return; | 462 | return; |
| 463 | 463 | ||
| 464 | on_each_cpu(do_enable_cpu_turbo, ips, 1); | 464 | if (ips->turbo_toggle_allowed) |
| 465 | on_each_cpu(do_enable_cpu_turbo, ips, 1); | ||
| 465 | 466 | ||
| 466 | ips->__cpu_turbo_on = true; | 467 | ips->__cpu_turbo_on = true; |
| 467 | } | 468 | } |
| @@ -498,7 +499,8 @@ static void ips_disable_cpu_turbo(struct ips_driver *ips) | |||
| 498 | if (!ips->__cpu_turbo_on) | 499 | if (!ips->__cpu_turbo_on) |
| 499 | return; | 500 | return; |
| 500 | 501 | ||
| 501 | on_each_cpu(do_disable_cpu_turbo, ips, 1); | 502 | if (ips->turbo_toggle_allowed) |
| 503 | on_each_cpu(do_disable_cpu_turbo, ips, 1); | ||
| 502 | 504 | ||
| 503 | ips->__cpu_turbo_on = false; | 505 | ips->__cpu_turbo_on = false; |
| 504 | } | 506 | } |
| @@ -598,17 +600,29 @@ static bool mcp_exceeded(struct ips_driver *ips) | |||
| 598 | { | 600 | { |
| 599 | unsigned long flags; | 601 | unsigned long flags; |
| 600 | bool ret = false; | 602 | bool ret = false; |
| 603 | u32 temp_limit; | ||
| 604 | u32 avg_power; | ||
| 605 | const char *msg = "MCP limit exceeded: "; | ||
| 601 | 606 | ||
| 602 | spin_lock_irqsave(&ips->turbo_status_lock, flags); | 607 | spin_lock_irqsave(&ips->turbo_status_lock, flags); |
| 603 | if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100)) | 608 | |
| 604 | ret = true; | 609 | temp_limit = ips->mcp_temp_limit * 100; |
| 605 | if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit) | 610 | if (ips->mcp_avg_temp > temp_limit) { |
| 611 | dev_info(&ips->dev->dev, | ||
| 612 | "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp, | ||
| 613 | temp_limit); | ||
| 606 | ret = true; | 614 | ret = true; |
| 607 | spin_unlock_irqrestore(&ips->turbo_status_lock, flags); | 615 | } |
| 608 | 616 | ||
| 609 | if (ret) | 617 | avg_power = ips->cpu_avg_power + ips->mch_avg_power; |
| 618 | if (avg_power > ips->mcp_power_limit) { | ||
| 610 | dev_info(&ips->dev->dev, | 619 | dev_info(&ips->dev->dev, |
| 611 | "MCP power or thermal limit exceeded\n"); | 620 | "%sAvg power %u, limit %u\n", msg, avg_power, |
| 621 | ips->mcp_power_limit); | ||
| 622 | ret = true; | ||
| 623 | } | ||
| 624 | |||
| 625 | spin_unlock_irqrestore(&ips->turbo_status_lock, flags); | ||
| 612 | 626 | ||
| 613 | return ret; | 627 | return ret; |
| 614 | } | 628 | } |
| @@ -663,6 +677,27 @@ static bool mch_exceeded(struct ips_driver *ips) | |||
| 663 | } | 677 | } |
| 664 | 678 | ||
| 665 | /** | 679 | /** |
| 680 | * verify_limits - verify BIOS provided limits | ||
| 681 | * @ips: IPS structure | ||
| 682 | * | ||
| 683 | * BIOS can optionally provide non-default limits for power and temp. Check | ||
| 684 | * them here and use the defaults if the BIOS values are not provided or | ||
| 685 | * are otherwise unusable. | ||
| 686 | */ | ||
| 687 | static void verify_limits(struct ips_driver *ips) | ||
| 688 | { | ||
| 689 | if (ips->mcp_power_limit < ips->limits->mcp_power_limit || | ||
| 690 | ips->mcp_power_limit > 35000) | ||
| 691 | ips->mcp_power_limit = ips->limits->mcp_power_limit; | ||
| 692 | |||
| 693 | if (ips->mcp_temp_limit < ips->limits->core_temp_limit || | ||
| 694 | ips->mcp_temp_limit < ips->limits->mch_temp_limit || | ||
| 695 | ips->mcp_temp_limit > 150) | ||
| 696 | ips->mcp_temp_limit = min(ips->limits->core_temp_limit, | ||
| 697 | ips->limits->mch_temp_limit); | ||
| 698 | } | ||
| 699 | |||
| 700 | /** | ||
| 666 | * update_turbo_limits - get various limits & settings from regs | 701 | * update_turbo_limits - get various limits & settings from regs |
| 667 | * @ips: IPS driver struct | 702 | * @ips: IPS driver struct |
| 668 | * | 703 | * |
| @@ -680,12 +715,21 @@ static void update_turbo_limits(struct ips_driver *ips) | |||
| 680 | u32 hts = thm_readl(THM_HTS); | 715 | u32 hts = thm_readl(THM_HTS); |
| 681 | 716 | ||
| 682 | ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); | 717 | ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); |
| 683 | ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); | 718 | /* |
| 719 | * Disable turbo for now, until we can figure out why the power figures | ||
| 720 | * are wrong | ||
| 721 | */ | ||
| 722 | ips->cpu_turbo_enabled = false; | ||
| 723 | |||
| 724 | if (ips->gpu_busy) | ||
| 725 | ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); | ||
| 726 | |||
| 684 | ips->core_power_limit = thm_readw(THM_MPCPC); | 727 | ips->core_power_limit = thm_readw(THM_MPCPC); |
| 685 | ips->mch_power_limit = thm_readw(THM_MMGPC); | 728 | ips->mch_power_limit = thm_readw(THM_MMGPC); |
| 686 | ips->mcp_temp_limit = thm_readw(THM_PTL); | 729 | ips->mcp_temp_limit = thm_readw(THM_PTL); |
| 687 | ips->mcp_power_limit = thm_readw(THM_MPPC); | 730 | ips->mcp_power_limit = thm_readw(THM_MPPC); |
| 688 | 731 | ||
| 732 | verify_limits(ips); | ||
| 689 | /* Ignore BIOS CPU vs GPU pref */ | 733 | /* Ignore BIOS CPU vs GPU pref */ |
| 690 | } | 734 | } |
| 691 | 735 | ||
| @@ -858,7 +902,7 @@ static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period) | |||
| 858 | ret = (ret * 1000) / 65535; | 902 | ret = (ret * 1000) / 65535; |
| 859 | *last = val; | 903 | *last = val; |
| 860 | 904 | ||
| 861 | return ret; | 905 | return 0; |
| 862 | } | 906 | } |
| 863 | 907 | ||
| 864 | static const u16 temp_decay_factor = 2; | 908 | static const u16 temp_decay_factor = 2; |
| @@ -940,7 +984,6 @@ static int ips_monitor(void *data) | |||
| 940 | kfree(mch_samples); | 984 | kfree(mch_samples); |
| 941 | kfree(cpu_samples); | 985 | kfree(cpu_samples); |
| 942 | kfree(mchp_samples); | 986 | kfree(mchp_samples); |
| 943 | kthread_stop(ips->adjust); | ||
| 944 | return -ENOMEM; | 987 | return -ENOMEM; |
| 945 | } | 988 | } |
| 946 | 989 | ||
| @@ -948,7 +991,7 @@ static int ips_monitor(void *data) | |||
| 948 | ITV_ME_SEQNO_SHIFT; | 991 | ITV_ME_SEQNO_SHIFT; |
| 949 | seqno_timestamp = get_jiffies_64(); | 992 | seqno_timestamp = get_jiffies_64(); |
| 950 | 993 | ||
| 951 | old_cpu_power = thm_readl(THM_CEC) / 65535; | 994 | old_cpu_power = thm_readl(THM_CEC); |
| 952 | schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); | 995 | schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); |
| 953 | 996 | ||
| 954 | /* Collect an initial average */ | 997 | /* Collect an initial average */ |
| @@ -1150,11 +1193,18 @@ static irqreturn_t ips_irq_handler(int irq, void *arg) | |||
| 1150 | STS_GPL_SHIFT; | 1193 | STS_GPL_SHIFT; |
| 1151 | /* ignore EC CPU vs GPU pref */ | 1194 | /* ignore EC CPU vs GPU pref */ |
| 1152 | ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); | 1195 | ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); |
| 1153 | ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); | 1196 | /* |
| 1197 | * Disable turbo for now, until we can figure | ||
| 1198 | * out why the power figures are wrong | ||
| 1199 | */ | ||
| 1200 | ips->cpu_turbo_enabled = false; | ||
| 1201 | if (ips->gpu_busy) | ||
| 1202 | ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); | ||
| 1154 | ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> | 1203 | ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> |
| 1155 | STS_PTL_SHIFT; | 1204 | STS_PTL_SHIFT; |
| 1156 | ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> | 1205 | ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> |
| 1157 | STS_PPL_SHIFT; | 1206 | STS_PPL_SHIFT; |
| 1207 | verify_limits(ips); | ||
| 1158 | spin_unlock(&ips->turbo_status_lock); | 1208 | spin_unlock(&ips->turbo_status_lock); |
| 1159 | 1209 | ||
| 1160 | thm_writeb(THM_SEC, SEC_ACK); | 1210 | thm_writeb(THM_SEC, SEC_ACK); |
| @@ -1333,8 +1383,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) | |||
| 1333 | * turbo manually or we'll get an illegal MSR access, even though | 1383 | * turbo manually or we'll get an illegal MSR access, even though |
| 1334 | * turbo will still be available. | 1384 | * turbo will still be available. |
| 1335 | */ | 1385 | */ |
| 1336 | if (!(misc_en & IA32_MISC_TURBO_EN)) | 1386 | if (misc_en & IA32_MISC_TURBO_EN) |
| 1337 | ; /* add turbo MSR write allowed flag if necessary */ | 1387 | ips->turbo_toggle_allowed = true; |
| 1388 | else | ||
| 1389 | ips->turbo_toggle_allowed = false; | ||
| 1338 | 1390 | ||
| 1339 | if (strstr(boot_cpu_data.x86_model_id, "CPU M")) | 1391 | if (strstr(boot_cpu_data.x86_model_id, "CPU M")) |
| 1340 | limits = &ips_sv_limits; | 1392 | limits = &ips_sv_limits; |
| @@ -1351,9 +1403,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) | |||
| 1351 | tdp = turbo_power & TURBO_TDP_MASK; | 1403 | tdp = turbo_power & TURBO_TDP_MASK; |
| 1352 | 1404 | ||
| 1353 | /* Sanity check TDP against CPU */ | 1405 | /* Sanity check TDP against CPU */ |
| 1354 | if (limits->mcp_power_limit != (tdp / 8) * 1000) { | 1406 | if (limits->core_power_limit != (tdp / 8) * 1000) { |
| 1355 | dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n", | 1407 | dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n", |
| 1356 | tdp / 8, limits->mcp_power_limit / 1000); | 1408 | tdp / 8, limits->core_power_limit / 1000); |
| 1409 | limits->core_power_limit = (tdp / 8) * 1000; | ||
| 1357 | } | 1410 | } |
| 1358 | 1411 | ||
| 1359 | out: | 1412 | out: |
| @@ -1390,7 +1443,7 @@ static bool ips_get_i915_syms(struct ips_driver *ips) | |||
| 1390 | return true; | 1443 | return true; |
| 1391 | 1444 | ||
| 1392 | out_put_busy: | 1445 | out_put_busy: |
| 1393 | symbol_put(i915_gpu_turbo_disable); | 1446 | symbol_put(i915_gpu_busy); |
| 1394 | out_put_lower: | 1447 | out_put_lower: |
| 1395 | symbol_put(i915_gpu_lower); | 1448 | symbol_put(i915_gpu_lower); |
| 1396 | out_put_raise: | 1449 | out_put_raise: |
| @@ -1532,22 +1585,27 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1532 | /* Save turbo limits & ratios */ | 1585 | /* Save turbo limits & ratios */ |
| 1533 | rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); | 1586 | rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); |
| 1534 | 1587 | ||
| 1535 | ips_enable_cpu_turbo(ips); | 1588 | ips_disable_cpu_turbo(ips); |
| 1536 | ips->cpu_turbo_enabled = true; | 1589 | ips->cpu_turbo_enabled = false; |
| 1537 | 1590 | ||
| 1538 | /* Set up the work queue and monitor/adjust threads */ | 1591 | /* Create thermal adjust thread */ |
| 1539 | ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); | 1592 | ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); |
| 1540 | if (IS_ERR(ips->monitor)) { | 1593 | if (IS_ERR(ips->adjust)) { |
| 1541 | dev_err(&dev->dev, | 1594 | dev_err(&dev->dev, |
| 1542 | "failed to create thermal monitor thread, aborting\n"); | 1595 | "failed to create thermal adjust thread, aborting\n"); |
| 1543 | ret = -ENOMEM; | 1596 | ret = -ENOMEM; |
| 1544 | goto error_free_irq; | 1597 | goto error_free_irq; |
| 1598 | |||
| 1545 | } | 1599 | } |
| 1546 | 1600 | ||
| 1547 | ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); | 1601 | /* |
| 1548 | if (IS_ERR(ips->adjust)) { | 1602 | * Set up the work queue and monitor thread. The monitor thread |
| 1603 | * will wake up ips_adjust thread. | ||
| 1604 | */ | ||
| 1605 | ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); | ||
| 1606 | if (IS_ERR(ips->monitor)) { | ||
| 1549 | dev_err(&dev->dev, | 1607 | dev_err(&dev->dev, |
| 1550 | "failed to create thermal adjust thread, aborting\n"); | 1608 | "failed to create thermal monitor thread, aborting\n"); |
| 1551 | ret = -ENOMEM; | 1609 | ret = -ENOMEM; |
| 1552 | goto error_thread_cleanup; | 1610 | goto error_thread_cleanup; |
| 1553 | } | 1611 | } |
| @@ -1566,7 +1624,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1566 | return ret; | 1624 | return ret; |
| 1567 | 1625 | ||
| 1568 | error_thread_cleanup: | 1626 | error_thread_cleanup: |
| 1569 | kthread_stop(ips->monitor); | 1627 | kthread_stop(ips->adjust); |
| 1570 | error_free_irq: | 1628 | error_free_irq: |
| 1571 | free_irq(ips->dev->irq, ips); | 1629 | free_irq(ips->dev->irq, ips); |
| 1572 | error_unmap: | 1630 | error_unmap: |
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c index df1fb53c09d2..a4be41614eeb 100644 --- a/drivers/regulator/ad5398.c +++ b/drivers/regulator/ad5398.c | |||
| @@ -256,7 +256,6 @@ static int __devexit ad5398_remove(struct i2c_client *client) | |||
| 256 | 256 | ||
| 257 | regulator_unregister(chip->rdev); | 257 | regulator_unregister(chip->rdev); |
| 258 | kfree(chip); | 258 | kfree(chip); |
| 259 | i2c_set_clientdata(client, NULL); | ||
| 260 | 259 | ||
| 261 | return 0; | 260 | return 0; |
| 262 | } | 261 | } |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 422a709d271d..cc8b337b9119 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev) | |||
| 700 | constraints->min_uA != constraints->max_uA) { | 700 | constraints->min_uA != constraints->max_uA) { |
| 701 | ret = _regulator_get_current_limit(rdev); | 701 | ret = _regulator_get_current_limit(rdev); |
| 702 | if (ret > 0) | 702 | if (ret > 0) |
| 703 | count += sprintf(buf + count, "at %d uA ", ret / 1000); | 703 | count += sprintf(buf + count, "at %d mA ", ret / 1000); |
| 704 | } | 704 | } |
| 705 | 705 | ||
| 706 | if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) | 706 | if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) |
| @@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
| 2302 | dev_set_name(&rdev->dev, "regulator.%d", | 2302 | dev_set_name(&rdev->dev, "regulator.%d", |
| 2303 | atomic_inc_return(®ulator_no) - 1); | 2303 | atomic_inc_return(®ulator_no) - 1); |
| 2304 | ret = device_register(&rdev->dev); | 2304 | ret = device_register(&rdev->dev); |
| 2305 | if (ret != 0) | 2305 | if (ret != 0) { |
| 2306 | put_device(&rdev->dev); | ||
| 2306 | goto clean; | 2307 | goto clean; |
| 2308 | } | ||
| 2307 | 2309 | ||
| 2308 | dev_set_drvdata(&rdev->dev, rdev); | 2310 | dev_set_drvdata(&rdev->dev, rdev); |
| 2309 | 2311 | ||
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index d61ecb885a8c..b8cc6389a541 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c | |||
| @@ -191,8 +191,6 @@ static int __devexit isl6271a_remove(struct i2c_client *i2c) | |||
| 191 | struct isl_pmic *pmic = i2c_get_clientdata(i2c); | 191 | struct isl_pmic *pmic = i2c_get_clientdata(i2c); |
| 192 | int i; | 192 | int i; |
| 193 | 193 | ||
| 194 | i2c_set_clientdata(i2c, NULL); | ||
| 195 | |||
| 196 | for (i = 0; i < 3; i++) | 194 | for (i = 0; i < 3; i++) |
| 197 | regulator_unregister(pmic->rdev[i]); | 195 | regulator_unregister(pmic->rdev[i]); |
| 198 | 196 | ||
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 4520ace3f7e7..6b60a9c0366b 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
| @@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, | |||
| 330 | /* set external clock frequency */ | 330 | /* set external clock frequency */ |
| 331 | info->extclk_freq = pdata->extclk_freq; | 331 | info->extclk_freq = pdata->extclk_freq; |
| 332 | max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, | 332 | max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, |
| 333 | info->extclk_freq); | 333 | info->extclk_freq << 6); |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | if (pdata->ramp_timing) { | 336 | if (pdata->ramp_timing) { |
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 9daed8db83d3..9de8516e3531 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c | |||
| @@ -268,7 +268,6 @@ out_irq: | |||
| 268 | free_irq(client->irq, client); | 268 | free_irq(client->irq, client); |
| 269 | 269 | ||
| 270 | out_free: | 270 | out_free: |
| 271 | i2c_set_clientdata(client, NULL); | ||
| 272 | kfree(ds3232); | 271 | kfree(ds3232); |
| 273 | return ret; | 272 | return ret; |
| 274 | } | 273 | } |
| @@ -287,7 +286,6 @@ static int __devexit ds3232_remove(struct i2c_client *client) | |||
| 287 | } | 286 | } |
| 288 | 287 | ||
| 289 | rtc_device_unregister(ds3232->rtc); | 288 | rtc_device_unregister(ds3232->rtc); |
| 290 | i2c_set_clientdata(client, NULL); | ||
| 291 | kfree(ds3232); | 289 | kfree(ds3232); |
| 292 | return 0; | 290 | return 0; |
| 293 | } | 291 | } |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ad0ed212db4a..348fba0a8976 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -1046,13 +1046,13 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, | |||
| 1046 | 1046 | ||
| 1047 | /* If the user actually wanted this page, we can skip the rest */ | 1047 | /* If the user actually wanted this page, we can skip the rest */ |
| 1048 | if (page == 0) | 1048 | if (page == 0) |
| 1049 | return -EINVAL; | 1049 | return 0; |
| 1050 | 1050 | ||
| 1051 | for (i = 0; i < min((int)buf[3], buf_len - 4); i++) | 1051 | for (i = 0; i < min((int)buf[3], buf_len - 4); i++) |
| 1052 | if (buf[i + 4] == page) | 1052 | if (buf[i + 4] == page) |
| 1053 | goto found; | 1053 | goto found; |
| 1054 | 1054 | ||
| 1055 | if (i < buf[3] && i > buf_len) | 1055 | if (i < buf[3] && i >= buf_len - 4) |
| 1056 | /* ran off the end of the buffer, give us benefit of doubt */ | 1056 | /* ran off the end of the buffer, give us benefit of doubt */ |
| 1057 | goto found; | 1057 | goto found; |
| 1058 | /* The device claims it doesn't support the requested page */ | 1058 | /* The device claims it doesn't support the requested page */ |
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index 93de907b1208..800c54602339 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c | |||
| @@ -2044,6 +2044,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2044 | if (!port) { | 2044 | if (!port) { |
| 2045 | printk(KERN_WARNING | 2045 | printk(KERN_WARNING |
| 2046 | "IOC3 serial memory not available for port\n"); | 2046 | "IOC3 serial memory not available for port\n"); |
| 2047 | ret = -ENOMEM; | ||
| 2047 | goto out4; | 2048 | goto out4; |
| 2048 | } | 2049 | } |
| 2049 | spin_lock_init(&port->ip_lock); | 2050 | spin_lock_init(&port->ip_lock); |
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c index 324c385a653d..5dff45c76d32 100644 --- a/drivers/serial/mfd.c +++ b/drivers/serial/mfd.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
| 29 | #include <linux/sysrq.h> | 29 | #include <linux/sysrq.h> |
| 30 | #include <linux/slab.h> | ||
| 30 | #include <linux/serial_reg.h> | 31 | #include <linux/serial_reg.h> |
| 31 | #include <linux/circ_buf.h> | 32 | #include <linux/circ_buf.h> |
| 32 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c index f6ad1ecbff79..51c15f58e01e 100644 --- a/drivers/serial/mrst_max3110.c +++ b/drivers/serial/mrst_max3110.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | 29 | ||
| 30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 31 | #include <linux/ioport.h> | 31 | #include <linux/ioport.h> |
| 32 | #include <linux/irq.h> | ||
| 32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
| 33 | #include <linux/console.h> | 34 | #include <linux/console.h> |
| 34 | #include <linux/sysrq.h> | 35 | #include <linux/sysrq.h> |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0bcf4c1601a2..b5a78a1f4421 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <linux/cache.h> | 24 | #include <linux/cache.h> |
| 25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
| 26 | #include <linux/of_device.h> | ||
| 26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 27 | #include <linux/mod_devicetable.h> | 28 | #include <linux/mod_devicetable.h> |
| 28 | #include <linux/spi/spi.h> | 29 | #include <linux/spi/spi.h> |
| @@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) | |||
| 86 | const struct spi_device *spi = to_spi_device(dev); | 87 | const struct spi_device *spi = to_spi_device(dev); |
| 87 | const struct spi_driver *sdrv = to_spi_driver(drv); | 88 | const struct spi_driver *sdrv = to_spi_driver(drv); |
| 88 | 89 | ||
| 90 | /* Attempt an OF style match */ | ||
| 91 | if (of_driver_match_device(dev, drv)) | ||
| 92 | return 1; | ||
| 93 | |||
| 89 | if (sdrv->id_table) | 94 | if (sdrv->id_table) |
| 90 | return !!spi_match_id(sdrv->id_table, spi); | 95 | return !!spi_match_id(sdrv->id_table, spi); |
| 91 | 96 | ||
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index e24a63498acb..63e51b011d50 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c | |||
| @@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) | |||
| 350 | spi_gpio->bitbang.master = spi_master_get(master); | 350 | spi_gpio->bitbang.master = spi_master_get(master); |
| 351 | spi_gpio->bitbang.chipselect = spi_gpio_chipselect; | 351 | spi_gpio->bitbang.chipselect = spi_gpio_chipselect; |
| 352 | 352 | ||
| 353 | if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { | 353 | if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { |
| 354 | spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; | 354 | spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; |
| 355 | spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; | 355 | spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; |
| 356 | spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; | 356 | spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; |
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index d31b57f7baaf..1dd86b835cd8 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c | |||
| @@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | |||
| 408 | 408 | ||
| 409 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; | 409 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; |
| 410 | 410 | ||
| 411 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); | 411 | if (mspi->rx_dma == mspi->dma_dummy_rx) |
| 412 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); | ||
| 413 | else | ||
| 414 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); | ||
| 412 | out_be16(&rx_bd->cbd_datlen, 0); | 415 | out_be16(&rx_bd->cbd_datlen, 0); |
| 413 | out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); | 416 | out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); |
| 414 | 417 | ||
| 415 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); | 418 | if (mspi->tx_dma == mspi->dma_dummy_tx) |
| 419 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); | ||
| 420 | else | ||
| 421 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); | ||
| 416 | out_be16(&tx_bd->cbd_datlen, xfer_len); | 422 | out_be16(&tx_bd->cbd_datlen, xfer_len); |
| 417 | out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | | 423 | out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | |
| 418 | BD_SC_LAST); | 424 | BD_SC_LAST); |
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig index c725356cc346..de7ebb99d8f6 100644 --- a/drivers/staging/tm6000/Kconfig +++ b/drivers/staging/tm6000/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config VIDEO_TM6000 | 1 | config VIDEO_TM6000 |
| 2 | tristate "TV Master TM5600/6000/6010 driver" | 2 | tristate "TV Master TM5600/6000/6010 driver" |
| 3 | depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL | 3 | depends on VIDEO_DEV && I2C && INPUT && IR_CORE && USB && EXPERIMENTAL |
| 4 | select VIDEO_TUNER | 4 | select VIDEO_TUNER |
| 5 | select MEDIA_TUNER_XC2028 | 5 | select MEDIA_TUNER_XC2028 |
| 6 | select MEDIA_TUNER_XC5000 | 6 | select MEDIA_TUNER_XC5000 |
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c index 32f7a0af6938..54f7667cc706 100644 --- a/drivers/staging/tm6000/tm6000-input.c +++ b/drivers/staging/tm6000/tm6000-input.c | |||
| @@ -46,7 +46,7 @@ MODULE_PARM_DESC(enable_ir, "enable ir (default is enable"); | |||
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | struct tm6000_ir_poll_result { | 48 | struct tm6000_ir_poll_result { |
| 49 | u8 rc_data[4]; | 49 | u16 rc_data; |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | struct tm6000_IR { | 52 | struct tm6000_IR { |
| @@ -60,9 +60,9 @@ struct tm6000_IR { | |||
| 60 | int polling; | 60 | int polling; |
| 61 | struct delayed_work work; | 61 | struct delayed_work work; |
| 62 | u8 wait:1; | 62 | u8 wait:1; |
| 63 | u8 key:1; | ||
| 63 | struct urb *int_urb; | 64 | struct urb *int_urb; |
| 64 | u8 *urb_data; | 65 | u8 *urb_data; |
| 65 | u8 key:1; | ||
| 66 | 66 | ||
| 67 | int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *); | 67 | int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *); |
| 68 | 68 | ||
| @@ -122,13 +122,14 @@ static void tm6000_ir_urb_received(struct urb *urb) | |||
| 122 | 122 | ||
| 123 | if (urb->status != 0) | 123 | if (urb->status != 0) |
| 124 | printk(KERN_INFO "not ready\n"); | 124 | printk(KERN_INFO "not ready\n"); |
| 125 | else if (urb->actual_length > 0) | 125 | else if (urb->actual_length > 0) { |
| 126 | memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length); | 126 | memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length); |
| 127 | 127 | ||
| 128 | dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0], | 128 | dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0], |
| 129 | ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]); | 129 | ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]); |
| 130 | 130 | ||
| 131 | ir->key = 1; | 131 | ir->key = 1; |
| 132 | } | ||
| 132 | 133 | ||
| 133 | rc = usb_submit_urb(urb, GFP_ATOMIC); | 134 | rc = usb_submit_urb(urb, GFP_ATOMIC); |
| 134 | } | 135 | } |
| @@ -140,30 +141,47 @@ static int default_polling_getkey(struct tm6000_IR *ir, | |||
| 140 | int rc; | 141 | int rc; |
| 141 | u8 buf[2]; | 142 | u8 buf[2]; |
| 142 | 143 | ||
| 143 | if (ir->wait && !&dev->int_in) { | 144 | if (ir->wait && !&dev->int_in) |
| 144 | poll_result->rc_data[0] = 0xff; | ||
| 145 | return 0; | 145 | return 0; |
| 146 | } | ||
| 147 | 146 | ||
| 148 | if (&dev->int_in) { | 147 | if (&dev->int_in) { |
| 149 | poll_result->rc_data[0] = ir->urb_data[0]; | 148 | if (ir->ir.ir_type == IR_TYPE_RC5) |
| 150 | poll_result->rc_data[1] = ir->urb_data[1]; | 149 | poll_result->rc_data = ir->urb_data[0]; |
| 150 | else | ||
| 151 | poll_result->rc_data = ir->urb_data[0] | ir->urb_data[1] << 8; | ||
| 151 | } else { | 152 | } else { |
| 152 | tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0); | 153 | tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0); |
| 153 | msleep(10); | 154 | msleep(10); |
| 154 | tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1); | 155 | tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1); |
| 155 | msleep(10); | 156 | msleep(10); |
| 156 | 157 | ||
| 157 | rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | | 158 | if (ir->ir.ir_type == IR_TYPE_RC5) { |
| 158 | USB_RECIP_DEVICE, REQ_02_GET_IR_CODE, 0, 0, buf, 1); | 159 | rc = tm6000_read_write_usb(dev, USB_DIR_IN | |
| 160 | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | ||
| 161 | REQ_02_GET_IR_CODE, 0, 0, buf, 1); | ||
| 159 | 162 | ||
| 160 | msleep(10); | 163 | msleep(10); |
| 161 | 164 | ||
| 162 | dprintk("read data=%02x\n", buf[0]); | 165 | dprintk("read data=%02x\n", buf[0]); |
| 163 | if (rc < 0) | 166 | if (rc < 0) |
| 164 | return rc; | 167 | return rc; |
| 165 | 168 | ||
| 166 | poll_result->rc_data[0] = buf[0]; | 169 | poll_result->rc_data = buf[0]; |
| 170 | } else { | ||
| 171 | rc = tm6000_read_write_usb(dev, USB_DIR_IN | | ||
| 172 | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | ||
| 173 | REQ_02_GET_IR_CODE, 0, 0, buf, 2); | ||
| 174 | |||
| 175 | msleep(10); | ||
| 176 | |||
| 177 | dprintk("read data=%04x\n", buf[0] | buf[1] << 8); | ||
| 178 | if (rc < 0) | ||
| 179 | return rc; | ||
| 180 | |||
| 181 | poll_result->rc_data = buf[0] | buf[1] << 8; | ||
| 182 | } | ||
| 183 | if ((poll_result->rc_data & 0x00ff) != 0xff) | ||
| 184 | ir->key = 1; | ||
| 167 | } | 185 | } |
| 168 | return 0; | 186 | return 0; |
| 169 | } | 187 | } |
| @@ -180,12 +198,11 @@ static void tm6000_ir_handle_key(struct tm6000_IR *ir) | |||
| 180 | return; | 198 | return; |
| 181 | } | 199 | } |
| 182 | 200 | ||
| 183 | dprintk("ir->get_key result data=%02x %02x\n", | 201 | dprintk("ir->get_key result data=%04x\n", poll_result.rc_data); |
| 184 | poll_result.rc_data[0], poll_result.rc_data[1]); | ||
| 185 | 202 | ||
| 186 | if (poll_result.rc_data[0] != 0xff && ir->key == 1) { | 203 | if (ir->key) { |
| 187 | ir_input_keydown(ir->input->input_dev, &ir->ir, | 204 | ir_input_keydown(ir->input->input_dev, &ir->ir, |
| 188 | poll_result.rc_data[0] | poll_result.rc_data[1] << 8); | 205 | (u32)poll_result.rc_data); |
| 189 | 206 | ||
| 190 | ir_input_nokey(ir->input->input_dev, &ir->ir); | 207 | ir_input_nokey(ir->input->input_dev, &ir->ir); |
| 191 | ir->key = 0; | 208 | ir->key = 0; |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 29bac5118877..d409495876f1 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
| @@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb) | |||
| 755 | { | 755 | { |
| 756 | int ret = 0; | 756 | int ret = 0; |
| 757 | 757 | ||
| 758 | blocking_notifier_chain_register(&xenstore_chain, nb); | 758 | if (xenstored_ready > 0) |
| 759 | ret = nb->notifier_call(nb, 0, NULL); | ||
| 760 | else | ||
| 761 | blocking_notifier_chain_register(&xenstore_chain, nb); | ||
| 759 | 762 | ||
| 760 | return ret; | 763 | return ret; |
| 761 | } | 764 | } |
| @@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); | |||
| 769 | 772 | ||
| 770 | void xenbus_probe(struct work_struct *unused) | 773 | void xenbus_probe(struct work_struct *unused) |
| 771 | { | 774 | { |
| 772 | BUG_ON((xenstored_ready <= 0)); | 775 | xenstored_ready = 1; |
| 773 | 776 | ||
| 774 | /* Enumerate devices in xenstore and watch for changes. */ | 777 | /* Enumerate devices in xenstore and watch for changes. */ |
| 775 | xenbus_probe_devices(&xenbus_frontend); | 778 | xenbus_probe_devices(&xenbus_frontend); |
| @@ -835,8 +838,8 @@ static int __init xenbus_init(void) | |||
| 835 | xen_store_evtchn = xen_start_info->store_evtchn; | 838 | xen_store_evtchn = xen_start_info->store_evtchn; |
| 836 | xen_store_mfn = xen_start_info->store_mfn; | 839 | xen_store_mfn = xen_start_info->store_mfn; |
| 837 | xen_store_interface = mfn_to_virt(xen_store_mfn); | 840 | xen_store_interface = mfn_to_virt(xen_store_mfn); |
| 841 | xenstored_ready = 1; | ||
| 838 | } | 842 | } |
| 839 | xenstored_ready = 1; | ||
| 840 | } | 843 | } |
| 841 | 844 | ||
| 842 | /* Initialize the interface to xenstore. */ | 845 | /* Initialize the interface to xenstore. */ |
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index f96eff04e11a..a6395bdb26ae 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
| @@ -134,10 +134,6 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
| 134 | if (!dump_write(file, dump_start, dump_size)) | 134 | if (!dump_write(file, dump_start, dump_size)) |
| 135 | goto end_coredump; | 135 | goto end_coredump; |
| 136 | } | 136 | } |
| 137 | /* Finally dump the task struct. Not be used by gdb, but could be useful */ | ||
| 138 | set_fs(KERNEL_DS); | ||
| 139 | if (!dump_write(file, current, sizeof(*current))) | ||
| 140 | goto end_coredump; | ||
| 141 | end_coredump: | 137 | end_coredump: |
| 142 | set_fs(fs); | 138 | set_fs(fs); |
| 143 | return has_dumped; | 139 | return has_dumped; |
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index 0fcd2640c23f..9eb134ea6eb2 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig | |||
| @@ -1,9 +1,11 @@ | |||
| 1 | config CEPH_FS | 1 | config CEPH_FS |
| 2 | tristate "Ceph distributed file system (EXPERIMENTAL)" | 2 | tristate "Ceph distributed file system (EXPERIMENTAL)" |
| 3 | depends on INET && EXPERIMENTAL | 3 | depends on INET && EXPERIMENTAL |
| 4 | select CEPH_LIB | ||
| 4 | select LIBCRC32C | 5 | select LIBCRC32C |
| 5 | select CRYPTO_AES | 6 | select CRYPTO_AES |
| 6 | select CRYPTO | 7 | select CRYPTO |
| 8 | default n | ||
| 7 | help | 9 | help |
| 8 | Choose Y or M here to include support for mounting the | 10 | Choose Y or M here to include support for mounting the |
| 9 | experimental Ceph distributed file system. Ceph is an extremely | 11 | experimental Ceph distributed file system. Ceph is an extremely |
| @@ -14,15 +16,3 @@ config CEPH_FS | |||
| 14 | 16 | ||
| 15 | If unsure, say N. | 17 | If unsure, say N. |
| 16 | 18 | ||
| 17 | config CEPH_FS_PRETTYDEBUG | ||
| 18 | bool "Include file:line in ceph debug output" | ||
| 19 | depends on CEPH_FS | ||
| 20 | default n | ||
| 21 | help | ||
| 22 | If you say Y here, debug output will include a filename and | ||
| 23 | line to aid debugging. This icnreases kernel size and slows | ||
| 24 | execution slightly when debug call sites are enabled (e.g., | ||
| 25 | via CONFIG_DYNAMIC_DEBUG). | ||
| 26 | |||
| 27 | If unsure, say N. | ||
| 28 | |||
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 278e1172600d..9e6c4f2e8ff1 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile | |||
| @@ -8,15 +8,8 @@ obj-$(CONFIG_CEPH_FS) += ceph.o | |||
| 8 | 8 | ||
| 9 | ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ | 9 | ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ |
| 10 | export.o caps.o snap.o xattr.o \ | 10 | export.o caps.o snap.o xattr.o \ |
| 11 | messenger.o msgpool.o buffer.o pagelist.o \ | 11 | mds_client.o mdsmap.o strings.o ceph_frag.o \ |
| 12 | mds_client.o mdsmap.o \ | 12 | debugfs.o |
| 13 | mon_client.o \ | ||
| 14 | osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ | ||
| 15 | debugfs.o \ | ||
| 16 | auth.o auth_none.o \ | ||
| 17 | crypto.o armor.o \ | ||
| 18 | auth_x.o \ | ||
| 19 | ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o | ||
| 20 | 13 | ||
| 21 | else | 14 | else |
| 22 | #Otherwise we were called directly from the command | 15 | #Otherwise we were called directly from the command |
diff --git a/fs/ceph/README b/fs/ceph/README deleted file mode 100644 index 18352fab37c0..000000000000 --- a/fs/ceph/README +++ /dev/null | |||
| @@ -1,20 +0,0 @@ | |||
| 1 | # | ||
| 2 | # The following files are shared by (and manually synchronized | ||
| 3 | # between) the Ceph userland and kernel client. | ||
| 4 | # | ||
| 5 | # userland kernel | ||
| 6 | src/include/ceph_fs.h fs/ceph/ceph_fs.h | ||
| 7 | src/include/ceph_fs.cc fs/ceph/ceph_fs.c | ||
| 8 | src/include/msgr.h fs/ceph/msgr.h | ||
| 9 | src/include/rados.h fs/ceph/rados.h | ||
| 10 | src/include/ceph_strings.cc fs/ceph/ceph_strings.c | ||
| 11 | src/include/ceph_frag.h fs/ceph/ceph_frag.h | ||
| 12 | src/include/ceph_frag.cc fs/ceph/ceph_frag.c | ||
| 13 | src/include/ceph_hash.h fs/ceph/ceph_hash.h | ||
| 14 | src/include/ceph_hash.cc fs/ceph/ceph_hash.c | ||
| 15 | src/crush/crush.c fs/ceph/crush/crush.c | ||
| 16 | src/crush/crush.h fs/ceph/crush/crush.h | ||
| 17 | src/crush/mapper.c fs/ceph/crush/mapper.c | ||
| 18 | src/crush/mapper.h fs/ceph/crush/mapper.h | ||
| 19 | src/crush/hash.h fs/ceph/crush/hash.h | ||
| 20 | src/crush/hash.c fs/ceph/crush/hash.c | ||
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index efbc604001c8..51bcc5ce3230 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/backing-dev.h> | 3 | #include <linux/backing-dev.h> |
| 4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
| @@ -10,7 +10,8 @@ | |||
| 10 | #include <linux/task_io_accounting_ops.h> | 10 | #include <linux/task_io_accounting_ops.h> |
| 11 | 11 | ||
| 12 | #include "super.h" | 12 | #include "super.h" |
| 13 | #include "osd_client.h" | 13 | #include "mds_client.h" |
| 14 | #include <linux/ceph/osd_client.h> | ||
| 14 | 15 | ||
| 15 | /* | 16 | /* |
| 16 | * Ceph address space ops. | 17 | * Ceph address space ops. |
| @@ -193,7 +194,8 @@ static int readpage_nounlock(struct file *filp, struct page *page) | |||
| 193 | { | 194 | { |
| 194 | struct inode *inode = filp->f_dentry->d_inode; | 195 | struct inode *inode = filp->f_dentry->d_inode; |
| 195 | struct ceph_inode_info *ci = ceph_inode(inode); | 196 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 196 | struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; | 197 | struct ceph_osd_client *osdc = |
| 198 | &ceph_inode_to_client(inode)->client->osdc; | ||
| 197 | int err = 0; | 199 | int err = 0; |
| 198 | u64 len = PAGE_CACHE_SIZE; | 200 | u64 len = PAGE_CACHE_SIZE; |
| 199 | 201 | ||
| @@ -265,7 +267,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping, | |||
| 265 | { | 267 | { |
| 266 | struct inode *inode = file->f_dentry->d_inode; | 268 | struct inode *inode = file->f_dentry->d_inode; |
| 267 | struct ceph_inode_info *ci = ceph_inode(inode); | 269 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 268 | struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; | 270 | struct ceph_osd_client *osdc = |
| 271 | &ceph_inode_to_client(inode)->client->osdc; | ||
| 269 | int rc = 0; | 272 | int rc = 0; |
| 270 | struct page **pages; | 273 | struct page **pages; |
| 271 | loff_t offset; | 274 | loff_t offset; |
| @@ -365,7 +368,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 365 | { | 368 | { |
| 366 | struct inode *inode; | 369 | struct inode *inode; |
| 367 | struct ceph_inode_info *ci; | 370 | struct ceph_inode_info *ci; |
| 368 | struct ceph_client *client; | 371 | struct ceph_fs_client *fsc; |
| 369 | struct ceph_osd_client *osdc; | 372 | struct ceph_osd_client *osdc; |
| 370 | loff_t page_off = page->index << PAGE_CACHE_SHIFT; | 373 | loff_t page_off = page->index << PAGE_CACHE_SHIFT; |
| 371 | int len = PAGE_CACHE_SIZE; | 374 | int len = PAGE_CACHE_SIZE; |
| @@ -383,8 +386,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 383 | } | 386 | } |
| 384 | inode = page->mapping->host; | 387 | inode = page->mapping->host; |
| 385 | ci = ceph_inode(inode); | 388 | ci = ceph_inode(inode); |
| 386 | client = ceph_inode_to_client(inode); | 389 | fsc = ceph_inode_to_client(inode); |
| 387 | osdc = &client->osdc; | 390 | osdc = &fsc->client->osdc; |
| 388 | 391 | ||
| 389 | /* verify this is a writeable snap context */ | 392 | /* verify this is a writeable snap context */ |
| 390 | snapc = (void *)page->private; | 393 | snapc = (void *)page->private; |
| @@ -414,10 +417,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 414 | dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", | 417 | dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", |
| 415 | inode, page, page->index, page_off, len, snapc); | 418 | inode, page, page->index, page_off, len, snapc); |
| 416 | 419 | ||
| 417 | writeback_stat = atomic_long_inc_return(&client->writeback_count); | 420 | writeback_stat = atomic_long_inc_return(&fsc->writeback_count); |
| 418 | if (writeback_stat > | 421 | if (writeback_stat > |
| 419 | CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) | 422 | CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) |
| 420 | set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC); | 423 | set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); |
| 421 | 424 | ||
| 422 | set_page_writeback(page); | 425 | set_page_writeback(page); |
| 423 | err = ceph_osdc_writepages(osdc, ceph_vino(inode), | 426 | err = ceph_osdc_writepages(osdc, ceph_vino(inode), |
| @@ -496,7 +499,7 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
| 496 | struct address_space *mapping = inode->i_mapping; | 499 | struct address_space *mapping = inode->i_mapping; |
| 497 | __s32 rc = -EIO; | 500 | __s32 rc = -EIO; |
| 498 | u64 bytes = 0; | 501 | u64 bytes = 0; |
| 499 | struct ceph_client *client = ceph_inode_to_client(inode); | 502 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
| 500 | long writeback_stat; | 503 | long writeback_stat; |
| 501 | unsigned issued = ceph_caps_issued(ci); | 504 | unsigned issued = ceph_caps_issued(ci); |
| 502 | 505 | ||
| @@ -529,10 +532,10 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
| 529 | WARN_ON(!PageUptodate(page)); | 532 | WARN_ON(!PageUptodate(page)); |
| 530 | 533 | ||
| 531 | writeback_stat = | 534 | writeback_stat = |
| 532 | atomic_long_dec_return(&client->writeback_count); | 535 | atomic_long_dec_return(&fsc->writeback_count); |
| 533 | if (writeback_stat < | 536 | if (writeback_stat < |
| 534 | CONGESTION_OFF_THRESH(client->mount_args->congestion_kb)) | 537 | CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) |
| 535 | clear_bdi_congested(&client->backing_dev_info, | 538 | clear_bdi_congested(&fsc->backing_dev_info, |
| 536 | BLK_RW_ASYNC); | 539 | BLK_RW_ASYNC); |
| 537 | 540 | ||
| 538 | ceph_put_snap_context((void *)page->private); | 541 | ceph_put_snap_context((void *)page->private); |
| @@ -569,13 +572,13 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
| 569 | * mempool. we avoid the mempool if we can because req->r_num_pages | 572 | * mempool. we avoid the mempool if we can because req->r_num_pages |
| 570 | * may be less than the maximum write size. | 573 | * may be less than the maximum write size. |
| 571 | */ | 574 | */ |
| 572 | static void alloc_page_vec(struct ceph_client *client, | 575 | static void alloc_page_vec(struct ceph_fs_client *fsc, |
| 573 | struct ceph_osd_request *req) | 576 | struct ceph_osd_request *req) |
| 574 | { | 577 | { |
| 575 | req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, | 578 | req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, |
| 576 | GFP_NOFS); | 579 | GFP_NOFS); |
| 577 | if (!req->r_pages) { | 580 | if (!req->r_pages) { |
| 578 | req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS); | 581 | req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS); |
| 579 | req->r_pages_from_pool = 1; | 582 | req->r_pages_from_pool = 1; |
| 580 | WARN_ON(!req->r_pages); | 583 | WARN_ON(!req->r_pages); |
| 581 | } | 584 | } |
| @@ -590,7 +593,7 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
| 590 | struct inode *inode = mapping->host; | 593 | struct inode *inode = mapping->host; |
| 591 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 594 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
| 592 | struct ceph_inode_info *ci = ceph_inode(inode); | 595 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 593 | struct ceph_client *client; | 596 | struct ceph_fs_client *fsc; |
| 594 | pgoff_t index, start, end; | 597 | pgoff_t index, start, end; |
| 595 | int range_whole = 0; | 598 | int range_whole = 0; |
| 596 | int should_loop = 1; | 599 | int should_loop = 1; |
| @@ -617,13 +620,13 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
| 617 | wbc->sync_mode == WB_SYNC_NONE ? "NONE" : | 620 | wbc->sync_mode == WB_SYNC_NONE ? "NONE" : |
| 618 | (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); | 621 | (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); |
| 619 | 622 | ||
| 620 | client = ceph_inode_to_client(inode); | 623 | fsc = ceph_inode_to_client(inode); |
| 621 | if (client->mount_state == CEPH_MOUNT_SHUTDOWN) { | 624 | if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { |
| 622 | pr_warning("writepage_start %p on forced umount\n", inode); | 625 | pr_warning("writepage_start %p on forced umount\n", inode); |
| 623 | return -EIO; /* we're in a forced umount, don't write! */ | 626 | return -EIO; /* we're in a forced umount, don't write! */ |
| 624 | } | 627 | } |
| 625 | if (client->mount_args->wsize && client->mount_args->wsize < wsize) | 628 | if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) |
| 626 | wsize = client->mount_args->wsize; | 629 | wsize = fsc->mount_options->wsize; |
| 627 | if (wsize < PAGE_CACHE_SIZE) | 630 | if (wsize < PAGE_CACHE_SIZE) |
| 628 | wsize = PAGE_CACHE_SIZE; | 631 | wsize = PAGE_CACHE_SIZE; |
| 629 | max_pages_ever = wsize >> PAGE_CACHE_SHIFT; | 632 | max_pages_ever = wsize >> PAGE_CACHE_SHIFT; |
| @@ -769,7 +772,7 @@ get_more_pages: | |||
| 769 | offset = (unsigned long long)page->index | 772 | offset = (unsigned long long)page->index |
| 770 | << PAGE_CACHE_SHIFT; | 773 | << PAGE_CACHE_SHIFT; |
| 771 | len = wsize; | 774 | len = wsize; |
| 772 | req = ceph_osdc_new_request(&client->osdc, | 775 | req = ceph_osdc_new_request(&fsc->client->osdc, |
| 773 | &ci->i_layout, | 776 | &ci->i_layout, |
| 774 | ceph_vino(inode), | 777 | ceph_vino(inode), |
| 775 | offset, &len, | 778 | offset, &len, |
| @@ -782,7 +785,7 @@ get_more_pages: | |||
| 782 | &inode->i_mtime, true, 1); | 785 | &inode->i_mtime, true, 1); |
| 783 | max_pages = req->r_num_pages; | 786 | max_pages = req->r_num_pages; |
| 784 | 787 | ||
| 785 | alloc_page_vec(client, req); | 788 | alloc_page_vec(fsc, req); |
| 786 | req->r_callback = writepages_finish; | 789 | req->r_callback = writepages_finish; |
| 787 | req->r_inode = inode; | 790 | req->r_inode = inode; |
| 788 | } | 791 | } |
| @@ -794,10 +797,10 @@ get_more_pages: | |||
| 794 | inode, page, page->index); | 797 | inode, page, page->index); |
| 795 | 798 | ||
| 796 | writeback_stat = | 799 | writeback_stat = |
| 797 | atomic_long_inc_return(&client->writeback_count); | 800 | atomic_long_inc_return(&fsc->writeback_count); |
| 798 | if (writeback_stat > CONGESTION_ON_THRESH( | 801 | if (writeback_stat > CONGESTION_ON_THRESH( |
| 799 | client->mount_args->congestion_kb)) { | 802 | fsc->mount_options->congestion_kb)) { |
| 800 | set_bdi_congested(&client->backing_dev_info, | 803 | set_bdi_congested(&fsc->backing_dev_info, |
| 801 | BLK_RW_ASYNC); | 804 | BLK_RW_ASYNC); |
| 802 | } | 805 | } |
| 803 | 806 | ||
| @@ -846,7 +849,7 @@ get_more_pages: | |||
| 846 | op->payload_len = cpu_to_le32(len); | 849 | op->payload_len = cpu_to_le32(len); |
| 847 | req->r_request->hdr.data_len = cpu_to_le32(len); | 850 | req->r_request->hdr.data_len = cpu_to_le32(len); |
| 848 | 851 | ||
| 849 | ceph_osdc_start_request(&client->osdc, req, true); | 852 | ceph_osdc_start_request(&fsc->client->osdc, req, true); |
| 850 | req = NULL; | 853 | req = NULL; |
| 851 | 854 | ||
| 852 | /* continue? */ | 855 | /* continue? */ |
| @@ -915,7 +918,7 @@ static int ceph_update_writeable_page(struct file *file, | |||
| 915 | { | 918 | { |
| 916 | struct inode *inode = file->f_dentry->d_inode; | 919 | struct inode *inode = file->f_dentry->d_inode; |
| 917 | struct ceph_inode_info *ci = ceph_inode(inode); | 920 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 918 | struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; | 921 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; |
| 919 | loff_t page_off = pos & PAGE_CACHE_MASK; | 922 | loff_t page_off = pos & PAGE_CACHE_MASK; |
| 920 | int pos_in_page = pos & ~PAGE_CACHE_MASK; | 923 | int pos_in_page = pos & ~PAGE_CACHE_MASK; |
| 921 | int end_in_page = pos_in_page + len; | 924 | int end_in_page = pos_in_page + len; |
| @@ -1053,8 +1056,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, | |||
| 1053 | struct page *page, void *fsdata) | 1056 | struct page *page, void *fsdata) |
| 1054 | { | 1057 | { |
| 1055 | struct inode *inode = file->f_dentry->d_inode; | 1058 | struct inode *inode = file->f_dentry->d_inode; |
| 1056 | struct ceph_client *client = ceph_inode_to_client(inode); | 1059 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
| 1057 | struct ceph_mds_client *mdsc = &client->mdsc; | 1060 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 1058 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | 1061 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); |
| 1059 | int check_cap = 0; | 1062 | int check_cap = 0; |
| 1060 | 1063 | ||
| @@ -1123,7 +1126,7 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1123 | { | 1126 | { |
| 1124 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | 1127 | struct inode *inode = vma->vm_file->f_dentry->d_inode; |
| 1125 | struct page *page = vmf->page; | 1128 | struct page *page = vmf->page; |
| 1126 | struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; | 1129 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; |
| 1127 | loff_t off = page->index << PAGE_CACHE_SHIFT; | 1130 | loff_t off = page->index << PAGE_CACHE_SHIFT; |
| 1128 | loff_t size, len; | 1131 | loff_t size, len; |
| 1129 | int ret; | 1132 | int ret; |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 73c153092f72..98ab13e2b71d 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/fs.h> | 3 | #include <linux/fs.h> |
| 4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| @@ -9,8 +9,9 @@ | |||
| 9 | #include <linux/writeback.h> | 9 | #include <linux/writeback.h> |
| 10 | 10 | ||
| 11 | #include "super.h" | 11 | #include "super.h" |
| 12 | #include "decode.h" | 12 | #include "mds_client.h" |
| 13 | #include "messenger.h" | 13 | #include <linux/ceph/decode.h> |
| 14 | #include <linux/ceph/messenger.h> | ||
| 14 | 15 | ||
| 15 | /* | 16 | /* |
| 16 | * Capability management | 17 | * Capability management |
| @@ -287,11 +288,11 @@ void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) | |||
| 287 | spin_unlock(&mdsc->caps_list_lock); | 288 | spin_unlock(&mdsc->caps_list_lock); |
| 288 | } | 289 | } |
| 289 | 290 | ||
| 290 | void ceph_reservation_status(struct ceph_client *client, | 291 | void ceph_reservation_status(struct ceph_fs_client *fsc, |
| 291 | int *total, int *avail, int *used, int *reserved, | 292 | int *total, int *avail, int *used, int *reserved, |
| 292 | int *min) | 293 | int *min) |
| 293 | { | 294 | { |
| 294 | struct ceph_mds_client *mdsc = &client->mdsc; | 295 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 295 | 296 | ||
| 296 | if (total) | 297 | if (total) |
| 297 | *total = mdsc->caps_total_count; | 298 | *total = mdsc->caps_total_count; |
| @@ -399,7 +400,7 @@ static void __insert_cap_node(struct ceph_inode_info *ci, | |||
| 399 | static void __cap_set_timeouts(struct ceph_mds_client *mdsc, | 400 | static void __cap_set_timeouts(struct ceph_mds_client *mdsc, |
| 400 | struct ceph_inode_info *ci) | 401 | struct ceph_inode_info *ci) |
| 401 | { | 402 | { |
| 402 | struct ceph_mount_args *ma = mdsc->client->mount_args; | 403 | struct ceph_mount_options *ma = mdsc->fsc->mount_options; |
| 403 | 404 | ||
| 404 | ci->i_hold_caps_min = round_jiffies(jiffies + | 405 | ci->i_hold_caps_min = round_jiffies(jiffies + |
| 405 | ma->caps_wanted_delay_min * HZ); | 406 | ma->caps_wanted_delay_min * HZ); |
| @@ -515,7 +516,7 @@ int ceph_add_cap(struct inode *inode, | |||
| 515 | unsigned seq, unsigned mseq, u64 realmino, int flags, | 516 | unsigned seq, unsigned mseq, u64 realmino, int flags, |
| 516 | struct ceph_cap_reservation *caps_reservation) | 517 | struct ceph_cap_reservation *caps_reservation) |
| 517 | { | 518 | { |
| 518 | struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; | 519 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; |
| 519 | struct ceph_inode_info *ci = ceph_inode(inode); | 520 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 520 | struct ceph_cap *new_cap = NULL; | 521 | struct ceph_cap *new_cap = NULL; |
| 521 | struct ceph_cap *cap; | 522 | struct ceph_cap *cap; |
| @@ -873,7 +874,7 @@ void __ceph_remove_cap(struct ceph_cap *cap) | |||
| 873 | struct ceph_mds_session *session = cap->session; | 874 | struct ceph_mds_session *session = cap->session; |
| 874 | struct ceph_inode_info *ci = cap->ci; | 875 | struct ceph_inode_info *ci = cap->ci; |
| 875 | struct ceph_mds_client *mdsc = | 876 | struct ceph_mds_client *mdsc = |
| 876 | &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; | 877 | ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; |
| 877 | int removed = 0; | 878 | int removed = 0; |
| 878 | 879 | ||
| 879 | dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); | 880 | dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); |
| @@ -1210,7 +1211,7 @@ void __ceph_flush_snaps(struct ceph_inode_info *ci, | |||
| 1210 | int mds; | 1211 | int mds; |
| 1211 | struct ceph_cap_snap *capsnap; | 1212 | struct ceph_cap_snap *capsnap; |
| 1212 | u32 mseq; | 1213 | u32 mseq; |
| 1213 | struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; | 1214 | struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; |
| 1214 | struct ceph_mds_session *session = NULL; /* if session != NULL, we hold | 1215 | struct ceph_mds_session *session = NULL; /* if session != NULL, we hold |
| 1215 | session->s_mutex */ | 1216 | session->s_mutex */ |
| 1216 | u64 next_follows = 0; /* keep track of how far we've gotten through the | 1217 | u64 next_follows = 0; /* keep track of how far we've gotten through the |
| @@ -1336,7 +1337,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) | |||
| 1336 | void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) | 1337 | void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) |
| 1337 | { | 1338 | { |
| 1338 | struct ceph_mds_client *mdsc = | 1339 | struct ceph_mds_client *mdsc = |
| 1339 | &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; | 1340 | ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; |
| 1340 | struct inode *inode = &ci->vfs_inode; | 1341 | struct inode *inode = &ci->vfs_inode; |
| 1341 | int was = ci->i_dirty_caps; | 1342 | int was = ci->i_dirty_caps; |
| 1342 | int dirty = 0; | 1343 | int dirty = 0; |
| @@ -1378,7 +1379,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) | |||
| 1378 | static int __mark_caps_flushing(struct inode *inode, | 1379 | static int __mark_caps_flushing(struct inode *inode, |
| 1379 | struct ceph_mds_session *session) | 1380 | struct ceph_mds_session *session) |
| 1380 | { | 1381 | { |
| 1381 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; | 1382 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; |
| 1382 | struct ceph_inode_info *ci = ceph_inode(inode); | 1383 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 1383 | int flushing; | 1384 | int flushing; |
| 1384 | 1385 | ||
| @@ -1416,17 +1417,6 @@ static int __mark_caps_flushing(struct inode *inode, | |||
| 1416 | /* | 1417 | /* |
| 1417 | * try to invalidate mapping pages without blocking. | 1418 | * try to invalidate mapping pages without blocking. |
| 1418 | */ | 1419 | */ |
| 1419 | static int mapping_is_empty(struct address_space *mapping) | ||
| 1420 | { | ||
| 1421 | struct page *page = find_get_page(mapping, 0); | ||
| 1422 | |||
| 1423 | if (!page) | ||
| 1424 | return 1; | ||
| 1425 | |||
| 1426 | put_page(page); | ||
| 1427 | return 0; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | static int try_nonblocking_invalidate(struct inode *inode) | 1420 | static int try_nonblocking_invalidate(struct inode *inode) |
| 1431 | { | 1421 | { |
| 1432 | struct ceph_inode_info *ci = ceph_inode(inode); | 1422 | struct ceph_inode_info *ci = ceph_inode(inode); |
| @@ -1436,7 +1426,7 @@ static int try_nonblocking_invalidate(struct inode *inode) | |||
| 1436 | invalidate_mapping_pages(&inode->i_data, 0, -1); | 1426 | invalidate_mapping_pages(&inode->i_data, 0, -1); |
| 1437 | spin_lock(&inode->i_lock); | 1427 | spin_lock(&inode->i_lock); |
| 1438 | 1428 | ||
| 1439 | if (mapping_is_empty(&inode->i_data) && | 1429 | if (inode->i_data.nrpages == 0 && |
| 1440 | invalidating_gen == ci->i_rdcache_gen) { | 1430 | invalidating_gen == ci->i_rdcache_gen) { |
| 1441 | /* success. */ | 1431 | /* success. */ |
| 1442 | dout("try_nonblocking_invalidate %p success\n", inode); | 1432 | dout("try_nonblocking_invalidate %p success\n", inode); |
| @@ -1462,8 +1452,8 @@ static int try_nonblocking_invalidate(struct inode *inode) | |||
| 1462 | void ceph_check_caps(struct ceph_inode_info *ci, int flags, | 1452 | void ceph_check_caps(struct ceph_inode_info *ci, int flags, |
| 1463 | struct ceph_mds_session *session) | 1453 | struct ceph_mds_session *session) |
| 1464 | { | 1454 | { |
| 1465 | struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); | 1455 | struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); |
| 1466 | struct ceph_mds_client *mdsc = &client->mdsc; | 1456 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 1467 | struct inode *inode = &ci->vfs_inode; | 1457 | struct inode *inode = &ci->vfs_inode; |
| 1468 | struct ceph_cap *cap; | 1458 | struct ceph_cap *cap; |
| 1469 | int file_wanted, used; | 1459 | int file_wanted, used; |
| @@ -1533,7 +1523,7 @@ retry_locked: | |||
| 1533 | */ | 1523 | */ |
| 1534 | if ((!is_delayed || mdsc->stopping) && | 1524 | if ((!is_delayed || mdsc->stopping) && |
| 1535 | ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ | 1525 | ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ |
| 1536 | ci->i_rdcache_gen && /* may have cached pages */ | 1526 | inode->i_data.nrpages && /* have cached pages */ |
| 1537 | (file_wanted == 0 || /* no open files */ | 1527 | (file_wanted == 0 || /* no open files */ |
| 1538 | (revoking & (CEPH_CAP_FILE_CACHE| | 1528 | (revoking & (CEPH_CAP_FILE_CACHE| |
| 1539 | CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */ | 1529 | CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */ |
| @@ -1706,7 +1696,7 @@ ack: | |||
| 1706 | static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, | 1696 | static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, |
| 1707 | unsigned *flush_tid) | 1697 | unsigned *flush_tid) |
| 1708 | { | 1698 | { |
| 1709 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; | 1699 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; |
| 1710 | struct ceph_inode_info *ci = ceph_inode(inode); | 1700 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 1711 | int unlock_session = session ? 0 : 1; | 1701 | int unlock_session = session ? 0 : 1; |
| 1712 | int flushing = 0; | 1702 | int flushing = 0; |
| @@ -1872,7 +1862,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 1872 | caps_are_flushed(inode, flush_tid)); | 1862 | caps_are_flushed(inode, flush_tid)); |
| 1873 | } else { | 1863 | } else { |
| 1874 | struct ceph_mds_client *mdsc = | 1864 | struct ceph_mds_client *mdsc = |
| 1875 | &ceph_sb_to_client(inode->i_sb)->mdsc; | 1865 | ceph_sb_to_client(inode->i_sb)->mdsc; |
| 1876 | 1866 | ||
| 1877 | spin_lock(&inode->i_lock); | 1867 | spin_lock(&inode->i_lock); |
| 1878 | if (__ceph_caps_dirty(ci)) | 1868 | if (__ceph_caps_dirty(ci)) |
| @@ -2283,7 +2273,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
| 2283 | { | 2273 | { |
| 2284 | struct ceph_inode_info *ci = ceph_inode(inode); | 2274 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 2285 | int mds = session->s_mds; | 2275 | int mds = session->s_mds; |
| 2286 | int seq = le32_to_cpu(grant->seq); | 2276 | unsigned seq = le32_to_cpu(grant->seq); |
| 2277 | unsigned issue_seq = le32_to_cpu(grant->issue_seq); | ||
| 2287 | int newcaps = le32_to_cpu(grant->caps); | 2278 | int newcaps = le32_to_cpu(grant->caps); |
| 2288 | int issued, implemented, used, wanted, dirty; | 2279 | int issued, implemented, used, wanted, dirty; |
| 2289 | u64 size = le64_to_cpu(grant->size); | 2280 | u64 size = le64_to_cpu(grant->size); |
| @@ -2295,8 +2286,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
| 2295 | int revoked_rdcache = 0; | 2286 | int revoked_rdcache = 0; |
| 2296 | int queue_invalidate = 0; | 2287 | int queue_invalidate = 0; |
| 2297 | 2288 | ||
| 2298 | dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", | 2289 | dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n", |
| 2299 | inode, cap, mds, seq, ceph_cap_string(newcaps)); | 2290 | inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps)); |
| 2300 | dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, | 2291 | dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, |
| 2301 | inode->i_size); | 2292 | inode->i_size); |
| 2302 | 2293 | ||
| @@ -2392,6 +2383,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
| 2392 | } | 2383 | } |
| 2393 | 2384 | ||
| 2394 | cap->seq = seq; | 2385 | cap->seq = seq; |
| 2386 | cap->issue_seq = issue_seq; | ||
| 2395 | 2387 | ||
| 2396 | /* file layout may have changed */ | 2388 | /* file layout may have changed */ |
| 2397 | ci->i_layout = grant->layout; | 2389 | ci->i_layout = grant->layout; |
| @@ -2463,7 +2455,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, | |||
| 2463 | __releases(inode->i_lock) | 2455 | __releases(inode->i_lock) |
| 2464 | { | 2456 | { |
| 2465 | struct ceph_inode_info *ci = ceph_inode(inode); | 2457 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 2466 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; | 2458 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; |
| 2467 | unsigned seq = le32_to_cpu(m->seq); | 2459 | unsigned seq = le32_to_cpu(m->seq); |
| 2468 | int dirty = le32_to_cpu(m->dirty); | 2460 | int dirty = le32_to_cpu(m->dirty); |
| 2469 | int cleaned = 0; | 2461 | int cleaned = 0; |
| @@ -2711,7 +2703,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
| 2711 | struct ceph_msg *msg) | 2703 | struct ceph_msg *msg) |
| 2712 | { | 2704 | { |
| 2713 | struct ceph_mds_client *mdsc = session->s_mdsc; | 2705 | struct ceph_mds_client *mdsc = session->s_mdsc; |
| 2714 | struct super_block *sb = mdsc->client->sb; | 2706 | struct super_block *sb = mdsc->fsc->sb; |
| 2715 | struct inode *inode; | 2707 | struct inode *inode; |
| 2716 | struct ceph_cap *cap; | 2708 | struct ceph_cap *cap; |
| 2717 | struct ceph_mds_caps *h; | 2709 | struct ceph_mds_caps *h; |
| @@ -2774,15 +2766,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
| 2774 | if (op == CEPH_CAP_OP_IMPORT) | 2766 | if (op == CEPH_CAP_OP_IMPORT) |
| 2775 | __queue_cap_release(session, vino.ino, cap_id, | 2767 | __queue_cap_release(session, vino.ino, cap_id, |
| 2776 | mseq, seq); | 2768 | mseq, seq); |
| 2777 | 2769 | goto flush_cap_releases; | |
| 2778 | /* | ||
| 2779 | * send any full release message to try to move things | ||
| 2780 | * along for the mds (who clearly thinks we still have this | ||
| 2781 | * cap). | ||
| 2782 | */ | ||
| 2783 | ceph_add_cap_releases(mdsc, session); | ||
| 2784 | ceph_send_cap_releases(mdsc, session); | ||
| 2785 | goto done; | ||
| 2786 | } | 2770 | } |
| 2787 | 2771 | ||
| 2788 | /* these will work even if we don't have a cap yet */ | 2772 | /* these will work even if we don't have a cap yet */ |
| @@ -2810,7 +2794,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
| 2810 | dout(" no cap on %p ino %llx.%llx from mds%d\n", | 2794 | dout(" no cap on %p ino %llx.%llx from mds%d\n", |
| 2811 | inode, ceph_ino(inode), ceph_snap(inode), mds); | 2795 | inode, ceph_ino(inode), ceph_snap(inode), mds); |
| 2812 | spin_unlock(&inode->i_lock); | 2796 | spin_unlock(&inode->i_lock); |
| 2813 | goto done; | 2797 | goto flush_cap_releases; |
| 2814 | } | 2798 | } |
| 2815 | 2799 | ||
| 2816 | /* note that each of these drops i_lock for us */ | 2800 | /* note that each of these drops i_lock for us */ |
| @@ -2834,6 +2818,17 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
| 2834 | ceph_cap_op_name(op)); | 2818 | ceph_cap_op_name(op)); |
| 2835 | } | 2819 | } |
| 2836 | 2820 | ||
| 2821 | goto done; | ||
| 2822 | |||
| 2823 | flush_cap_releases: | ||
| 2824 | /* | ||
| 2825 | * send any full release message to try to move things | ||
| 2826 | * along for the mds (who clearly thinks we still have this | ||
| 2827 | * cap). | ||
| 2828 | */ | ||
| 2829 | ceph_add_cap_releases(mdsc, session); | ||
| 2830 | ceph_send_cap_releases(mdsc, session); | ||
| 2831 | |||
| 2837 | done: | 2832 | done: |
| 2838 | mutex_unlock(&session->s_mutex); | 2833 | mutex_unlock(&session->s_mutex); |
| 2839 | done_unlocked: | 2834 | done_unlocked: |
diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c index ab6cf35c4091..bdce8b1fbd06 100644 --- a/fs/ceph/ceph_frag.c +++ b/fs/ceph/ceph_frag.c | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Ceph 'frag' type | 2 | * Ceph 'frag' type |
| 3 | */ | 3 | */ |
| 4 | #include "types.h" | 4 | #include <linux/module.h> |
| 5 | #include <linux/ceph/types.h> | ||
| 5 | 6 | ||
| 6 | int ceph_frag_compare(__u32 a, __u32 b) | 7 | int ceph_frag_compare(__u32 a, __u32 b) |
| 7 | { | 8 | { |
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 6fd8b20a8611..7ae1b3d55b58 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
| 4 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
| @@ -7,143 +7,49 @@ | |||
| 7 | #include <linux/debugfs.h> | 7 | #include <linux/debugfs.h> |
| 8 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
| 9 | 9 | ||
| 10 | #include <linux/ceph/libceph.h> | ||
| 11 | #include <linux/ceph/mon_client.h> | ||
| 12 | #include <linux/ceph/auth.h> | ||
| 13 | #include <linux/ceph/debugfs.h> | ||
| 14 | |||
| 10 | #include "super.h" | 15 | #include "super.h" |
| 11 | #include "mds_client.h" | ||
| 12 | #include "mon_client.h" | ||
| 13 | #include "auth.h" | ||
| 14 | 16 | ||
| 15 | #ifdef CONFIG_DEBUG_FS | 17 | #ifdef CONFIG_DEBUG_FS |
| 16 | 18 | ||
| 17 | /* | 19 | #include "mds_client.h" |
| 18 | * Implement /sys/kernel/debug/ceph fun | ||
| 19 | * | ||
| 20 | * /sys/kernel/debug/ceph/client* - an instance of the ceph client | ||
| 21 | * .../osdmap - current osdmap | ||
| 22 | * .../mdsmap - current mdsmap | ||
| 23 | * .../monmap - current monmap | ||
| 24 | * .../osdc - active osd requests | ||
| 25 | * .../mdsc - active mds requests | ||
| 26 | * .../monc - mon client state | ||
| 27 | * .../dentry_lru - dump contents of dentry lru | ||
| 28 | * .../caps - expose cap (reservation) stats | ||
| 29 | * .../bdi - symlink to ../../bdi/something | ||
| 30 | */ | ||
| 31 | |||
| 32 | static struct dentry *ceph_debugfs_dir; | ||
| 33 | |||
| 34 | static int monmap_show(struct seq_file *s, void *p) | ||
| 35 | { | ||
| 36 | int i; | ||
| 37 | struct ceph_client *client = s->private; | ||
| 38 | |||
| 39 | if (client->monc.monmap == NULL) | ||
| 40 | return 0; | ||
| 41 | |||
| 42 | seq_printf(s, "epoch %d\n", client->monc.monmap->epoch); | ||
| 43 | for (i = 0; i < client->monc.monmap->num_mon; i++) { | ||
| 44 | struct ceph_entity_inst *inst = | ||
| 45 | &client->monc.monmap->mon_inst[i]; | ||
| 46 | |||
| 47 | seq_printf(s, "\t%s%lld\t%s\n", | ||
| 48 | ENTITY_NAME(inst->name), | ||
| 49 | pr_addr(&inst->addr.in_addr)); | ||
| 50 | } | ||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | 20 | ||
| 54 | static int mdsmap_show(struct seq_file *s, void *p) | 21 | static int mdsmap_show(struct seq_file *s, void *p) |
| 55 | { | 22 | { |
| 56 | int i; | 23 | int i; |
| 57 | struct ceph_client *client = s->private; | 24 | struct ceph_fs_client *fsc = s->private; |
| 58 | 25 | ||
| 59 | if (client->mdsc.mdsmap == NULL) | 26 | if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL) |
| 60 | return 0; | 27 | return 0; |
| 61 | seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch); | 28 | seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch); |
| 62 | seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root); | 29 | seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root); |
| 63 | seq_printf(s, "session_timeout %d\n", | 30 | seq_printf(s, "session_timeout %d\n", |
| 64 | client->mdsc.mdsmap->m_session_timeout); | 31 | fsc->mdsc->mdsmap->m_session_timeout); |
| 65 | seq_printf(s, "session_autoclose %d\n", | 32 | seq_printf(s, "session_autoclose %d\n", |
| 66 | client->mdsc.mdsmap->m_session_autoclose); | 33 | fsc->mdsc->mdsmap->m_session_autoclose); |
| 67 | for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) { | 34 | for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) { |
| 68 | struct ceph_entity_addr *addr = | 35 | struct ceph_entity_addr *addr = |
| 69 | &client->mdsc.mdsmap->m_info[i].addr; | 36 | &fsc->mdsc->mdsmap->m_info[i].addr; |
| 70 | int state = client->mdsc.mdsmap->m_info[i].state; | 37 | int state = fsc->mdsc->mdsmap->m_info[i].state; |
| 71 | 38 | ||
| 72 | seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr), | 39 | seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, |
| 40 | ceph_pr_addr(&addr->in_addr), | ||
| 73 | ceph_mds_state_name(state)); | 41 | ceph_mds_state_name(state)); |
| 74 | } | 42 | } |
| 75 | return 0; | 43 | return 0; |
| 76 | } | 44 | } |
| 77 | 45 | ||
| 78 | static int osdmap_show(struct seq_file *s, void *p) | 46 | /* |
| 79 | { | 47 | * mdsc debugfs |
| 80 | int i; | 48 | */ |
| 81 | struct ceph_client *client = s->private; | ||
| 82 | struct rb_node *n; | ||
| 83 | |||
| 84 | if (client->osdc.osdmap == NULL) | ||
| 85 | return 0; | ||
| 86 | seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch); | ||
| 87 | seq_printf(s, "flags%s%s\n", | ||
| 88 | (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ? | ||
| 89 | " NEARFULL" : "", | ||
| 90 | (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ? | ||
| 91 | " FULL" : ""); | ||
| 92 | for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { | ||
| 93 | struct ceph_pg_pool_info *pool = | ||
| 94 | rb_entry(n, struct ceph_pg_pool_info, node); | ||
| 95 | seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", | ||
| 96 | pool->id, pool->v.pg_num, pool->pg_num_mask, | ||
| 97 | pool->v.lpg_num, pool->lpg_num_mask); | ||
| 98 | } | ||
| 99 | for (i = 0; i < client->osdc.osdmap->max_osd; i++) { | ||
| 100 | struct ceph_entity_addr *addr = | ||
| 101 | &client->osdc.osdmap->osd_addr[i]; | ||
| 102 | int state = client->osdc.osdmap->osd_state[i]; | ||
| 103 | char sb[64]; | ||
| 104 | |||
| 105 | seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n", | ||
| 106 | i, pr_addr(&addr->in_addr), | ||
| 107 | ((client->osdc.osdmap->osd_weight[i]*100) >> 16), | ||
| 108 | ceph_osdmap_state_str(sb, sizeof(sb), state)); | ||
| 109 | } | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static int monc_show(struct seq_file *s, void *p) | ||
| 114 | { | ||
| 115 | struct ceph_client *client = s->private; | ||
| 116 | struct ceph_mon_generic_request *req; | ||
| 117 | struct ceph_mon_client *monc = &client->monc; | ||
| 118 | struct rb_node *rp; | ||
| 119 | |||
| 120 | mutex_lock(&monc->mutex); | ||
| 121 | |||
| 122 | if (monc->have_mdsmap) | ||
| 123 | seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); | ||
| 124 | if (monc->have_osdmap) | ||
| 125 | seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); | ||
| 126 | if (monc->want_next_osdmap) | ||
| 127 | seq_printf(s, "want next osdmap\n"); | ||
| 128 | |||
| 129 | for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { | ||
| 130 | __u16 op; | ||
| 131 | req = rb_entry(rp, struct ceph_mon_generic_request, node); | ||
| 132 | op = le16_to_cpu(req->request->hdr.type); | ||
| 133 | if (op == CEPH_MSG_STATFS) | ||
| 134 | seq_printf(s, "%lld statfs\n", req->tid); | ||
| 135 | else | ||
| 136 | seq_printf(s, "%lld unknown\n", req->tid); | ||
| 137 | } | ||
| 138 | |||
| 139 | mutex_unlock(&monc->mutex); | ||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | static int mdsc_show(struct seq_file *s, void *p) | 49 | static int mdsc_show(struct seq_file *s, void *p) |
| 144 | { | 50 | { |
| 145 | struct ceph_client *client = s->private; | 51 | struct ceph_fs_client *fsc = s->private; |
| 146 | struct ceph_mds_client *mdsc = &client->mdsc; | 52 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 147 | struct ceph_mds_request *req; | 53 | struct ceph_mds_request *req; |
| 148 | struct rb_node *rp; | 54 | struct rb_node *rp; |
| 149 | int pathlen; | 55 | int pathlen; |
| @@ -214,61 +120,12 @@ static int mdsc_show(struct seq_file *s, void *p) | |||
| 214 | return 0; | 120 | return 0; |
| 215 | } | 121 | } |
| 216 | 122 | ||
| 217 | static int osdc_show(struct seq_file *s, void *pp) | ||
| 218 | { | ||
| 219 | struct ceph_client *client = s->private; | ||
| 220 | struct ceph_osd_client *osdc = &client->osdc; | ||
| 221 | struct rb_node *p; | ||
| 222 | |||
| 223 | mutex_lock(&osdc->request_mutex); | ||
| 224 | for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { | ||
| 225 | struct ceph_osd_request *req; | ||
| 226 | struct ceph_osd_request_head *head; | ||
| 227 | struct ceph_osd_op *op; | ||
| 228 | int num_ops; | ||
| 229 | int opcode, olen; | ||
| 230 | int i; | ||
| 231 | |||
| 232 | req = rb_entry(p, struct ceph_osd_request, r_node); | ||
| 233 | |||
| 234 | seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid, | ||
| 235 | req->r_osd ? req->r_osd->o_osd : -1, | ||
| 236 | le32_to_cpu(req->r_pgid.pool), | ||
| 237 | le16_to_cpu(req->r_pgid.ps)); | ||
| 238 | |||
| 239 | head = req->r_request->front.iov_base; | ||
| 240 | op = (void *)(head + 1); | ||
| 241 | |||
| 242 | num_ops = le16_to_cpu(head->num_ops); | ||
| 243 | olen = le32_to_cpu(head->object_len); | ||
| 244 | seq_printf(s, "%.*s", olen, | ||
| 245 | (const char *)(head->ops + num_ops)); | ||
| 246 | |||
| 247 | if (req->r_reassert_version.epoch) | ||
| 248 | seq_printf(s, "\t%u'%llu", | ||
| 249 | (unsigned)le32_to_cpu(req->r_reassert_version.epoch), | ||
| 250 | le64_to_cpu(req->r_reassert_version.version)); | ||
| 251 | else | ||
| 252 | seq_printf(s, "\t"); | ||
| 253 | |||
| 254 | for (i = 0; i < num_ops; i++) { | ||
| 255 | opcode = le16_to_cpu(op->op); | ||
| 256 | seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); | ||
| 257 | op++; | ||
| 258 | } | ||
| 259 | |||
| 260 | seq_printf(s, "\n"); | ||
| 261 | } | ||
| 262 | mutex_unlock(&osdc->request_mutex); | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | static int caps_show(struct seq_file *s, void *p) | 123 | static int caps_show(struct seq_file *s, void *p) |
| 267 | { | 124 | { |
| 268 | struct ceph_client *client = s->private; | 125 | struct ceph_fs_client *fsc = s->private; |
| 269 | int total, avail, used, reserved, min; | 126 | int total, avail, used, reserved, min; |
| 270 | 127 | ||
| 271 | ceph_reservation_status(client, &total, &avail, &used, &reserved, &min); | 128 | ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min); |
| 272 | seq_printf(s, "total\t\t%d\n" | 129 | seq_printf(s, "total\t\t%d\n" |
| 273 | "avail\t\t%d\n" | 130 | "avail\t\t%d\n" |
| 274 | "used\t\t%d\n" | 131 | "used\t\t%d\n" |
| @@ -280,8 +137,8 @@ static int caps_show(struct seq_file *s, void *p) | |||
| 280 | 137 | ||
| 281 | static int dentry_lru_show(struct seq_file *s, void *ptr) | 138 | static int dentry_lru_show(struct seq_file *s, void *ptr) |
| 282 | { | 139 | { |
| 283 | struct ceph_client *client = s->private; | 140 | struct ceph_fs_client *fsc = s->private; |
| 284 | struct ceph_mds_client *mdsc = &client->mdsc; | 141 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 285 | struct ceph_dentry_info *di; | 142 | struct ceph_dentry_info *di; |
| 286 | 143 | ||
| 287 | spin_lock(&mdsc->dentry_lru_lock); | 144 | spin_lock(&mdsc->dentry_lru_lock); |
| @@ -295,199 +152,124 @@ static int dentry_lru_show(struct seq_file *s, void *ptr) | |||
| 295 | return 0; | 152 | return 0; |
| 296 | } | 153 | } |
| 297 | 154 | ||
| 298 | #define DEFINE_SHOW_FUNC(name) \ | 155 | CEPH_DEFINE_SHOW_FUNC(mdsmap_show) |
| 299 | static int name##_open(struct inode *inode, struct file *file) \ | 156 | CEPH_DEFINE_SHOW_FUNC(mdsc_show) |
| 300 | { \ | 157 | CEPH_DEFINE_SHOW_FUNC(caps_show) |
| 301 | struct seq_file *sf; \ | 158 | CEPH_DEFINE_SHOW_FUNC(dentry_lru_show) |
| 302 | int ret; \ | 159 | |
| 303 | \ | ||
| 304 | ret = single_open(file, name, NULL); \ | ||
| 305 | sf = file->private_data; \ | ||
| 306 | sf->private = inode->i_private; \ | ||
| 307 | return ret; \ | ||
| 308 | } \ | ||
| 309 | \ | ||
| 310 | static const struct file_operations name##_fops = { \ | ||
| 311 | .open = name##_open, \ | ||
| 312 | .read = seq_read, \ | ||
| 313 | .llseek = seq_lseek, \ | ||
| 314 | .release = single_release, \ | ||
| 315 | }; | ||
| 316 | |||
| 317 | DEFINE_SHOW_FUNC(monmap_show) | ||
| 318 | DEFINE_SHOW_FUNC(mdsmap_show) | ||
| 319 | DEFINE_SHOW_FUNC(osdmap_show) | ||
| 320 | DEFINE_SHOW_FUNC(monc_show) | ||
| 321 | DEFINE_SHOW_FUNC(mdsc_show) | ||
| 322 | DEFINE_SHOW_FUNC(osdc_show) | ||
| 323 | DEFINE_SHOW_FUNC(dentry_lru_show) | ||
| 324 | DEFINE_SHOW_FUNC(caps_show) | ||
| 325 | 160 | ||
| 161 | /* | ||
| 162 | * debugfs | ||
| 163 | */ | ||
| 326 | static int congestion_kb_set(void *data, u64 val) | 164 | static int congestion_kb_set(void *data, u64 val) |
| 327 | { | 165 | { |
| 328 | struct ceph_client *client = (struct ceph_client *)data; | 166 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)data; |
| 329 | |||
| 330 | if (client) | ||
| 331 | client->mount_args->congestion_kb = (int)val; | ||
| 332 | 167 | ||
| 168 | fsc->mount_options->congestion_kb = (int)val; | ||
| 333 | return 0; | 169 | return 0; |
| 334 | } | 170 | } |
| 335 | 171 | ||
| 336 | static int congestion_kb_get(void *data, u64 *val) | 172 | static int congestion_kb_get(void *data, u64 *val) |
| 337 | { | 173 | { |
| 338 | struct ceph_client *client = (struct ceph_client *)data; | 174 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)data; |
| 339 | |||
| 340 | if (client) | ||
| 341 | *val = (u64)client->mount_args->congestion_kb; | ||
| 342 | 175 | ||
| 176 | *val = (u64)fsc->mount_options->congestion_kb; | ||
| 343 | return 0; | 177 | return 0; |
| 344 | } | 178 | } |
| 345 | 179 | ||
| 346 | |||
| 347 | DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get, | 180 | DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get, |
| 348 | congestion_kb_set, "%llu\n"); | 181 | congestion_kb_set, "%llu\n"); |
| 349 | 182 | ||
| 350 | int __init ceph_debugfs_init(void) | ||
| 351 | { | ||
| 352 | ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); | ||
| 353 | if (!ceph_debugfs_dir) | ||
| 354 | return -ENOMEM; | ||
| 355 | return 0; | ||
| 356 | } | ||
| 357 | 183 | ||
| 358 | void ceph_debugfs_cleanup(void) | 184 | void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) |
| 359 | { | 185 | { |
| 360 | debugfs_remove(ceph_debugfs_dir); | 186 | dout("ceph_fs_debugfs_cleanup\n"); |
| 187 | debugfs_remove(fsc->debugfs_bdi); | ||
| 188 | debugfs_remove(fsc->debugfs_congestion_kb); | ||
| 189 | debugfs_remove(fsc->debugfs_mdsmap); | ||
| 190 | debugfs_remove(fsc->debugfs_caps); | ||
| 191 | debugfs_remove(fsc->debugfs_mdsc); | ||
| 192 | debugfs_remove(fsc->debugfs_dentry_lru); | ||
| 361 | } | 193 | } |
| 362 | 194 | ||
| 363 | int ceph_debugfs_client_init(struct ceph_client *client) | 195 | int ceph_fs_debugfs_init(struct ceph_fs_client *fsc) |
| 364 | { | 196 | { |
| 365 | int ret = 0; | 197 | char name[100]; |
| 366 | char name[80]; | 198 | int err = -ENOMEM; |
| 367 | |||
| 368 | snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, | ||
| 369 | client->monc.auth->global_id); | ||
| 370 | 199 | ||
| 371 | client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); | 200 | dout("ceph_fs_debugfs_init\n"); |
| 372 | if (!client->debugfs_dir) | 201 | fsc->debugfs_congestion_kb = |
| 373 | goto out; | 202 | debugfs_create_file("writeback_congestion_kb", |
| 374 | 203 | 0600, | |
| 375 | client->monc.debugfs_file = debugfs_create_file("monc", | 204 | fsc->client->debugfs_dir, |
| 376 | 0600, | 205 | fsc, |
| 377 | client->debugfs_dir, | 206 | &congestion_kb_fops); |
| 378 | client, | 207 | if (!fsc->debugfs_congestion_kb) |
| 379 | &monc_show_fops); | ||
| 380 | if (!client->monc.debugfs_file) | ||
| 381 | goto out; | 208 | goto out; |
| 382 | 209 | ||
| 383 | client->mdsc.debugfs_file = debugfs_create_file("mdsc", | 210 | dout("a\n"); |
| 384 | 0600, | ||
| 385 | client->debugfs_dir, | ||
| 386 | client, | ||
| 387 | &mdsc_show_fops); | ||
| 388 | if (!client->mdsc.debugfs_file) | ||
| 389 | goto out; | ||
| 390 | 211 | ||
| 391 | client->osdc.debugfs_file = debugfs_create_file("osdc", | 212 | snprintf(name, sizeof(name), "../../bdi/%s", |
| 392 | 0600, | 213 | dev_name(fsc->backing_dev_info.dev)); |
| 393 | client->debugfs_dir, | 214 | fsc->debugfs_bdi = |
| 394 | client, | 215 | debugfs_create_symlink("bdi", |
| 395 | &osdc_show_fops); | 216 | fsc->client->debugfs_dir, |
| 396 | if (!client->osdc.debugfs_file) | 217 | name); |
| 218 | if (!fsc->debugfs_bdi) | ||
| 397 | goto out; | 219 | goto out; |
| 398 | 220 | ||
| 399 | client->debugfs_monmap = debugfs_create_file("monmap", | 221 | dout("b\n"); |
| 222 | fsc->debugfs_mdsmap = debugfs_create_file("mdsmap", | ||
| 400 | 0600, | 223 | 0600, |
| 401 | client->debugfs_dir, | 224 | fsc->client->debugfs_dir, |
| 402 | client, | 225 | fsc, |
| 403 | &monmap_show_fops); | ||
| 404 | if (!client->debugfs_monmap) | ||
| 405 | goto out; | ||
| 406 | |||
| 407 | client->debugfs_mdsmap = debugfs_create_file("mdsmap", | ||
| 408 | 0600, | ||
| 409 | client->debugfs_dir, | ||
| 410 | client, | ||
| 411 | &mdsmap_show_fops); | 226 | &mdsmap_show_fops); |
| 412 | if (!client->debugfs_mdsmap) | 227 | if (!fsc->debugfs_mdsmap) |
| 413 | goto out; | ||
| 414 | |||
| 415 | client->debugfs_osdmap = debugfs_create_file("osdmap", | ||
| 416 | 0600, | ||
| 417 | client->debugfs_dir, | ||
| 418 | client, | ||
| 419 | &osdmap_show_fops); | ||
| 420 | if (!client->debugfs_osdmap) | ||
| 421 | goto out; | 228 | goto out; |
| 422 | 229 | ||
| 423 | client->debugfs_dentry_lru = debugfs_create_file("dentry_lru", | 230 | dout("ca\n"); |
| 424 | 0600, | 231 | fsc->debugfs_mdsc = debugfs_create_file("mdsc", |
| 425 | client->debugfs_dir, | 232 | 0600, |
| 426 | client, | 233 | fsc->client->debugfs_dir, |
| 427 | &dentry_lru_show_fops); | 234 | fsc, |
| 428 | if (!client->debugfs_dentry_lru) | 235 | &mdsc_show_fops); |
| 236 | if (!fsc->debugfs_mdsc) | ||
| 429 | goto out; | 237 | goto out; |
| 430 | 238 | ||
| 431 | client->debugfs_caps = debugfs_create_file("caps", | 239 | dout("da\n"); |
| 240 | fsc->debugfs_caps = debugfs_create_file("caps", | ||
| 432 | 0400, | 241 | 0400, |
| 433 | client->debugfs_dir, | 242 | fsc->client->debugfs_dir, |
| 434 | client, | 243 | fsc, |
| 435 | &caps_show_fops); | 244 | &caps_show_fops); |
| 436 | if (!client->debugfs_caps) | 245 | if (!fsc->debugfs_caps) |
| 437 | goto out; | 246 | goto out; |
| 438 | 247 | ||
| 439 | client->debugfs_congestion_kb = | 248 | dout("ea\n"); |
| 440 | debugfs_create_file("writeback_congestion_kb", | 249 | fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru", |
| 441 | 0600, | 250 | 0600, |
| 442 | client->debugfs_dir, | 251 | fsc->client->debugfs_dir, |
| 443 | client, | 252 | fsc, |
| 444 | &congestion_kb_fops); | 253 | &dentry_lru_show_fops); |
| 445 | if (!client->debugfs_congestion_kb) | 254 | if (!fsc->debugfs_dentry_lru) |
| 446 | goto out; | 255 | goto out; |
| 447 | 256 | ||
| 448 | sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev)); | ||
| 449 | client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir, | ||
| 450 | name); | ||
| 451 | |||
| 452 | return 0; | 257 | return 0; |
| 453 | 258 | ||
| 454 | out: | 259 | out: |
| 455 | ceph_debugfs_client_cleanup(client); | 260 | ceph_fs_debugfs_cleanup(fsc); |
| 456 | return ret; | 261 | return err; |
| 457 | } | 262 | } |
| 458 | 263 | ||
| 459 | void ceph_debugfs_client_cleanup(struct ceph_client *client) | ||
| 460 | { | ||
| 461 | debugfs_remove(client->debugfs_bdi); | ||
| 462 | debugfs_remove(client->debugfs_caps); | ||
| 463 | debugfs_remove(client->debugfs_dentry_lru); | ||
| 464 | debugfs_remove(client->debugfs_osdmap); | ||
| 465 | debugfs_remove(client->debugfs_mdsmap); | ||
| 466 | debugfs_remove(client->debugfs_monmap); | ||
| 467 | debugfs_remove(client->osdc.debugfs_file); | ||
| 468 | debugfs_remove(client->mdsc.debugfs_file); | ||
| 469 | debugfs_remove(client->monc.debugfs_file); | ||
| 470 | debugfs_remove(client->debugfs_congestion_kb); | ||
| 471 | debugfs_remove(client->debugfs_dir); | ||
| 472 | } | ||
| 473 | 264 | ||
| 474 | #else /* CONFIG_DEBUG_FS */ | 265 | #else /* CONFIG_DEBUG_FS */ |
| 475 | 266 | ||
| 476 | int __init ceph_debugfs_init(void) | 267 | int ceph_fs_debugfs_init(struct ceph_fs_client *fsc) |
| 477 | { | ||
| 478 | return 0; | ||
| 479 | } | ||
| 480 | |||
| 481 | void ceph_debugfs_cleanup(void) | ||
| 482 | { | ||
| 483 | } | ||
| 484 | |||
| 485 | int ceph_debugfs_client_init(struct ceph_client *client) | ||
| 486 | { | 268 | { |
| 487 | return 0; | 269 | return 0; |
| 488 | } | 270 | } |
| 489 | 271 | ||
| 490 | void ceph_debugfs_client_cleanup(struct ceph_client *client) | 272 | void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) |
| 491 | { | 273 | { |
| 492 | } | 274 | } |
| 493 | 275 | ||
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index a1986eb52045..e0a2dc6fcafc 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/spinlock.h> | 3 | #include <linux/spinlock.h> |
| 4 | #include <linux/fs_struct.h> | 4 | #include <linux/fs_struct.h> |
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | 8 | ||
| 9 | #include "super.h" | 9 | #include "super.h" |
| 10 | #include "mds_client.h" | ||
| 10 | 11 | ||
| 11 | /* | 12 | /* |
| 12 | * Directory operations: readdir, lookup, create, link, unlink, | 13 | * Directory operations: readdir, lookup, create, link, unlink, |
| @@ -94,10 +95,7 @@ static unsigned fpos_off(loff_t p) | |||
| 94 | */ | 95 | */ |
| 95 | static int __dcache_readdir(struct file *filp, | 96 | static int __dcache_readdir(struct file *filp, |
| 96 | void *dirent, filldir_t filldir) | 97 | void *dirent, filldir_t filldir) |
| 97 | __releases(inode->i_lock) | ||
| 98 | __acquires(inode->i_lock) | ||
| 99 | { | 98 | { |
| 100 | struct inode *inode = filp->f_dentry->d_inode; | ||
| 101 | struct ceph_file_info *fi = filp->private_data; | 99 | struct ceph_file_info *fi = filp->private_data; |
| 102 | struct dentry *parent = filp->f_dentry; | 100 | struct dentry *parent = filp->f_dentry; |
| 103 | struct inode *dir = parent->d_inode; | 101 | struct inode *dir = parent->d_inode; |
| @@ -153,7 +151,6 @@ more: | |||
| 153 | 151 | ||
| 154 | atomic_inc(&dentry->d_count); | 152 | atomic_inc(&dentry->d_count); |
| 155 | spin_unlock(&dcache_lock); | 153 | spin_unlock(&dcache_lock); |
| 156 | spin_unlock(&inode->i_lock); | ||
| 157 | 154 | ||
| 158 | dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, | 155 | dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, |
| 159 | dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); | 156 | dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); |
| @@ -171,35 +168,30 @@ more: | |||
| 171 | } else { | 168 | } else { |
| 172 | dput(last); | 169 | dput(last); |
| 173 | } | 170 | } |
| 174 | last = NULL; | ||
| 175 | } | 171 | } |
| 176 | |||
| 177 | spin_lock(&inode->i_lock); | ||
| 178 | spin_lock(&dcache_lock); | ||
| 179 | |||
| 180 | last = dentry; | 172 | last = dentry; |
| 181 | 173 | ||
| 182 | if (err < 0) | 174 | if (err < 0) |
| 183 | goto out_unlock; | 175 | goto out; |
| 184 | 176 | ||
| 185 | p = p->prev; | ||
| 186 | filp->f_pos++; | 177 | filp->f_pos++; |
| 187 | 178 | ||
| 188 | /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ | 179 | /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ |
| 189 | if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE)) | 180 | if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { |
| 190 | goto more; | 181 | dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); |
| 191 | dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); | 182 | err = -EAGAIN; |
| 192 | err = -EAGAIN; | 183 | goto out; |
| 184 | } | ||
| 185 | |||
| 186 | spin_lock(&dcache_lock); | ||
| 187 | p = p->prev; /* advance to next dentry */ | ||
| 188 | goto more; | ||
| 193 | 189 | ||
| 194 | out_unlock: | 190 | out_unlock: |
| 195 | spin_unlock(&dcache_lock); | 191 | spin_unlock(&dcache_lock); |
| 196 | 192 | out: | |
| 197 | if (last) { | 193 | if (last) |
| 198 | spin_unlock(&inode->i_lock); | ||
| 199 | dput(last); | 194 | dput(last); |
| 200 | spin_lock(&inode->i_lock); | ||
| 201 | } | ||
| 202 | |||
| 203 | return err; | 195 | return err; |
| 204 | } | 196 | } |
| 205 | 197 | ||
| @@ -227,15 +219,15 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
| 227 | struct ceph_file_info *fi = filp->private_data; | 219 | struct ceph_file_info *fi = filp->private_data; |
| 228 | struct inode *inode = filp->f_dentry->d_inode; | 220 | struct inode *inode = filp->f_dentry->d_inode; |
| 229 | struct ceph_inode_info *ci = ceph_inode(inode); | 221 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 230 | struct ceph_client *client = ceph_inode_to_client(inode); | 222 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
| 231 | struct ceph_mds_client *mdsc = &client->mdsc; | 223 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 232 | unsigned frag = fpos_frag(filp->f_pos); | 224 | unsigned frag = fpos_frag(filp->f_pos); |
| 233 | int off = fpos_off(filp->f_pos); | 225 | int off = fpos_off(filp->f_pos); |
| 234 | int err; | 226 | int err; |
| 235 | u32 ftype; | 227 | u32 ftype; |
| 236 | struct ceph_mds_reply_info_parsed *rinfo; | 228 | struct ceph_mds_reply_info_parsed *rinfo; |
| 237 | const int max_entries = client->mount_args->max_readdir; | 229 | const int max_entries = fsc->mount_options->max_readdir; |
| 238 | const int max_bytes = client->mount_args->max_readdir_bytes; | 230 | const int max_bytes = fsc->mount_options->max_readdir_bytes; |
| 239 | 231 | ||
| 240 | dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); | 232 | dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); |
| 241 | if (fi->at_end) | 233 | if (fi->at_end) |
| @@ -267,17 +259,17 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
| 267 | /* can we use the dcache? */ | 259 | /* can we use the dcache? */ |
| 268 | spin_lock(&inode->i_lock); | 260 | spin_lock(&inode->i_lock); |
| 269 | if ((filp->f_pos == 2 || fi->dentry) && | 261 | if ((filp->f_pos == 2 || fi->dentry) && |
| 270 | !ceph_test_opt(client, NOASYNCREADDIR) && | 262 | !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && |
| 271 | ceph_snap(inode) != CEPH_SNAPDIR && | 263 | ceph_snap(inode) != CEPH_SNAPDIR && |
| 272 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && | 264 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && |
| 273 | __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { | 265 | __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { |
| 266 | spin_unlock(&inode->i_lock); | ||
| 274 | err = __dcache_readdir(filp, dirent, filldir); | 267 | err = __dcache_readdir(filp, dirent, filldir); |
| 275 | if (err != -EAGAIN) { | 268 | if (err != -EAGAIN) |
| 276 | spin_unlock(&inode->i_lock); | ||
| 277 | return err; | 269 | return err; |
| 278 | } | 270 | } else { |
| 271 | spin_unlock(&inode->i_lock); | ||
| 279 | } | 272 | } |
| 280 | spin_unlock(&inode->i_lock); | ||
| 281 | if (fi->dentry) { | 273 | if (fi->dentry) { |
| 282 | err = note_last_dentry(fi, fi->dentry->d_name.name, | 274 | err = note_last_dentry(fi, fi->dentry->d_name.name, |
| 283 | fi->dentry->d_name.len); | 275 | fi->dentry->d_name.len); |
| @@ -487,14 +479,13 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) | |||
| 487 | struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, | 479 | struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, |
| 488 | struct dentry *dentry, int err) | 480 | struct dentry *dentry, int err) |
| 489 | { | 481 | { |
| 490 | struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); | 482 | struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); |
| 491 | struct inode *parent = dentry->d_parent->d_inode; | 483 | struct inode *parent = dentry->d_parent->d_inode; |
| 492 | 484 | ||
| 493 | /* .snap dir? */ | 485 | /* .snap dir? */ |
| 494 | if (err == -ENOENT && | 486 | if (err == -ENOENT && |
| 495 | ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */ | ||
| 496 | strcmp(dentry->d_name.name, | 487 | strcmp(dentry->d_name.name, |
| 497 | client->mount_args->snapdir_name) == 0) { | 488 | fsc->mount_options->snapdir_name) == 0) { |
| 498 | struct inode *inode = ceph_get_snapdir(parent); | 489 | struct inode *inode = ceph_get_snapdir(parent); |
| 499 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", | 490 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", |
| 500 | dentry, dentry->d_name.len, dentry->d_name.name, inode); | 491 | dentry, dentry->d_name.len, dentry->d_name.name, inode); |
| @@ -539,8 +530,8 @@ static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) | |||
| 539 | static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, | 530 | static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, |
| 540 | struct nameidata *nd) | 531 | struct nameidata *nd) |
| 541 | { | 532 | { |
| 542 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 533 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 543 | struct ceph_mds_client *mdsc = &client->mdsc; | 534 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 544 | struct ceph_mds_request *req; | 535 | struct ceph_mds_request *req; |
| 545 | int op; | 536 | int op; |
| 546 | int err; | 537 | int err; |
| @@ -572,7 +563,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, | |||
| 572 | spin_lock(&dir->i_lock); | 563 | spin_lock(&dir->i_lock); |
| 573 | dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); | 564 | dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); |
| 574 | if (strncmp(dentry->d_name.name, | 565 | if (strncmp(dentry->d_name.name, |
| 575 | client->mount_args->snapdir_name, | 566 | fsc->mount_options->snapdir_name, |
| 576 | dentry->d_name.len) && | 567 | dentry->d_name.len) && |
| 577 | !is_root_ceph_dentry(dir, dentry) && | 568 | !is_root_ceph_dentry(dir, dentry) && |
| 578 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && | 569 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && |
| @@ -629,8 +620,8 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) | |||
| 629 | static int ceph_mknod(struct inode *dir, struct dentry *dentry, | 620 | static int ceph_mknod(struct inode *dir, struct dentry *dentry, |
| 630 | int mode, dev_t rdev) | 621 | int mode, dev_t rdev) |
| 631 | { | 622 | { |
| 632 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 623 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 633 | struct ceph_mds_client *mdsc = &client->mdsc; | 624 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 634 | struct ceph_mds_request *req; | 625 | struct ceph_mds_request *req; |
| 635 | int err; | 626 | int err; |
| 636 | 627 | ||
| @@ -685,8 +676,8 @@ static int ceph_create(struct inode *dir, struct dentry *dentry, int mode, | |||
| 685 | static int ceph_symlink(struct inode *dir, struct dentry *dentry, | 676 | static int ceph_symlink(struct inode *dir, struct dentry *dentry, |
| 686 | const char *dest) | 677 | const char *dest) |
| 687 | { | 678 | { |
| 688 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 679 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 689 | struct ceph_mds_client *mdsc = &client->mdsc; | 680 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 690 | struct ceph_mds_request *req; | 681 | struct ceph_mds_request *req; |
| 691 | int err; | 682 | int err; |
| 692 | 683 | ||
| @@ -716,8 +707,8 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, | |||
| 716 | 707 | ||
| 717 | static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 708 | static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
| 718 | { | 709 | { |
| 719 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 710 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 720 | struct ceph_mds_client *mdsc = &client->mdsc; | 711 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 721 | struct ceph_mds_request *req; | 712 | struct ceph_mds_request *req; |
| 722 | int err = -EROFS; | 713 | int err = -EROFS; |
| 723 | int op; | 714 | int op; |
| @@ -758,8 +749,8 @@ out: | |||
| 758 | static int ceph_link(struct dentry *old_dentry, struct inode *dir, | 749 | static int ceph_link(struct dentry *old_dentry, struct inode *dir, |
| 759 | struct dentry *dentry) | 750 | struct dentry *dentry) |
| 760 | { | 751 | { |
| 761 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 752 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 762 | struct ceph_mds_client *mdsc = &client->mdsc; | 753 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 763 | struct ceph_mds_request *req; | 754 | struct ceph_mds_request *req; |
| 764 | int err; | 755 | int err; |
| 765 | 756 | ||
| @@ -813,8 +804,8 @@ static int drop_caps_for_unlink(struct inode *inode) | |||
| 813 | */ | 804 | */ |
| 814 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) | 805 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) |
| 815 | { | 806 | { |
| 816 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 807 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 817 | struct ceph_mds_client *mdsc = &client->mdsc; | 808 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 818 | struct inode *inode = dentry->d_inode; | 809 | struct inode *inode = dentry->d_inode; |
| 819 | struct ceph_mds_request *req; | 810 | struct ceph_mds_request *req; |
| 820 | int err = -EROFS; | 811 | int err = -EROFS; |
| @@ -854,8 +845,8 @@ out: | |||
| 854 | static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, | 845 | static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, |
| 855 | struct inode *new_dir, struct dentry *new_dentry) | 846 | struct inode *new_dir, struct dentry *new_dentry) |
| 856 | { | 847 | { |
| 857 | struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb); | 848 | struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); |
| 858 | struct ceph_mds_client *mdsc = &client->mdsc; | 849 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 859 | struct ceph_mds_request *req; | 850 | struct ceph_mds_request *req; |
| 860 | int err; | 851 | int err; |
| 861 | 852 | ||
| @@ -1076,7 +1067,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, | |||
| 1076 | struct ceph_inode_info *ci = ceph_inode(inode); | 1067 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 1077 | int left; | 1068 | int left; |
| 1078 | 1069 | ||
| 1079 | if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) | 1070 | if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) |
| 1080 | return -EISDIR; | 1071 | return -EISDIR; |
| 1081 | 1072 | ||
| 1082 | if (!cf->dir_info) { | 1073 | if (!cf->dir_info) { |
| @@ -1177,7 +1168,7 @@ void ceph_dentry_lru_add(struct dentry *dn) | |||
| 1177 | dout("dentry_lru_add %p %p '%.*s'\n", di, dn, | 1168 | dout("dentry_lru_add %p %p '%.*s'\n", di, dn, |
| 1178 | dn->d_name.len, dn->d_name.name); | 1169 | dn->d_name.len, dn->d_name.name); |
| 1179 | if (di) { | 1170 | if (di) { |
| 1180 | mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; | 1171 | mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; |
| 1181 | spin_lock(&mdsc->dentry_lru_lock); | 1172 | spin_lock(&mdsc->dentry_lru_lock); |
| 1182 | list_add_tail(&di->lru, &mdsc->dentry_lru); | 1173 | list_add_tail(&di->lru, &mdsc->dentry_lru); |
| 1183 | mdsc->num_dentry++; | 1174 | mdsc->num_dentry++; |
| @@ -1193,7 +1184,7 @@ void ceph_dentry_lru_touch(struct dentry *dn) | |||
| 1193 | dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, | 1184 | dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, |
| 1194 | dn->d_name.len, dn->d_name.name, di->offset); | 1185 | dn->d_name.len, dn->d_name.name, di->offset); |
| 1195 | if (di) { | 1186 | if (di) { |
| 1196 | mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; | 1187 | mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; |
| 1197 | spin_lock(&mdsc->dentry_lru_lock); | 1188 | spin_lock(&mdsc->dentry_lru_lock); |
| 1198 | list_move_tail(&di->lru, &mdsc->dentry_lru); | 1189 | list_move_tail(&di->lru, &mdsc->dentry_lru); |
| 1199 | spin_unlock(&mdsc->dentry_lru_lock); | 1190 | spin_unlock(&mdsc->dentry_lru_lock); |
| @@ -1208,7 +1199,7 @@ void ceph_dentry_lru_del(struct dentry *dn) | |||
| 1208 | dout("dentry_lru_del %p %p '%.*s'\n", di, dn, | 1199 | dout("dentry_lru_del %p %p '%.*s'\n", di, dn, |
| 1209 | dn->d_name.len, dn->d_name.name); | 1200 | dn->d_name.len, dn->d_name.name); |
| 1210 | if (di) { | 1201 | if (di) { |
| 1211 | mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; | 1202 | mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; |
| 1212 | spin_lock(&mdsc->dentry_lru_lock); | 1203 | spin_lock(&mdsc->dentry_lru_lock); |
| 1213 | list_del_init(&di->lru); | 1204 | list_del_init(&di->lru); |
| 1214 | mdsc->num_dentry--; | 1205 | mdsc->num_dentry--; |
diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 4480cb1c63e7..2297d9426992 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c | |||
| @@ -1,10 +1,11 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/exportfs.h> | 3 | #include <linux/exportfs.h> |
| 4 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
| 5 | #include <asm/unaligned.h> | 5 | #include <asm/unaligned.h> |
| 6 | 6 | ||
| 7 | #include "super.h" | 7 | #include "super.h" |
| 8 | #include "mds_client.h" | ||
| 8 | 9 | ||
| 9 | /* | 10 | /* |
| 10 | * NFS export support | 11 | * NFS export support |
| @@ -42,32 +43,37 @@ struct ceph_nfs_confh { | |||
| 42 | static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, | 43 | static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, |
| 43 | int connectable) | 44 | int connectable) |
| 44 | { | 45 | { |
| 46 | int type; | ||
| 45 | struct ceph_nfs_fh *fh = (void *)rawfh; | 47 | struct ceph_nfs_fh *fh = (void *)rawfh; |
| 46 | struct ceph_nfs_confh *cfh = (void *)rawfh; | 48 | struct ceph_nfs_confh *cfh = (void *)rawfh; |
| 47 | struct dentry *parent = dentry->d_parent; | 49 | struct dentry *parent = dentry->d_parent; |
| 48 | struct inode *inode = dentry->d_inode; | 50 | struct inode *inode = dentry->d_inode; |
| 49 | int type; | 51 | int connected_handle_length = sizeof(*cfh)/4; |
| 52 | int handle_length = sizeof(*fh)/4; | ||
| 50 | 53 | ||
| 51 | /* don't re-export snaps */ | 54 | /* don't re-export snaps */ |
| 52 | if (ceph_snap(inode) != CEPH_NOSNAP) | 55 | if (ceph_snap(inode) != CEPH_NOSNAP) |
| 53 | return -EINVAL; | 56 | return -EINVAL; |
| 54 | 57 | ||
| 55 | if (*max_len >= sizeof(*cfh)) { | 58 | if (*max_len >= connected_handle_length) { |
| 56 | dout("encode_fh %p connectable\n", dentry); | 59 | dout("encode_fh %p connectable\n", dentry); |
| 57 | cfh->ino = ceph_ino(dentry->d_inode); | 60 | cfh->ino = ceph_ino(dentry->d_inode); |
| 58 | cfh->parent_ino = ceph_ino(parent->d_inode); | 61 | cfh->parent_ino = ceph_ino(parent->d_inode); |
| 59 | cfh->parent_name_hash = parent->d_name.hash; | 62 | cfh->parent_name_hash = parent->d_name.hash; |
| 60 | *max_len = sizeof(*cfh); | 63 | *max_len = connected_handle_length; |
| 61 | type = 2; | 64 | type = 2; |
| 62 | } else if (*max_len > sizeof(*fh)) { | 65 | } else if (*max_len >= handle_length) { |
| 63 | if (connectable) | 66 | if (connectable) { |
| 64 | return -ENOSPC; | 67 | *max_len = connected_handle_length; |
| 68 | return 255; | ||
| 69 | } | ||
| 65 | dout("encode_fh %p\n", dentry); | 70 | dout("encode_fh %p\n", dentry); |
| 66 | fh->ino = ceph_ino(dentry->d_inode); | 71 | fh->ino = ceph_ino(dentry->d_inode); |
| 67 | *max_len = sizeof(*fh); | 72 | *max_len = handle_length; |
| 68 | type = 1; | 73 | type = 1; |
| 69 | } else { | 74 | } else { |
| 70 | return -ENOSPC; | 75 | *max_len = handle_length; |
| 76 | return 255; | ||
| 71 | } | 77 | } |
| 72 | return type; | 78 | return type; |
| 73 | } | 79 | } |
| @@ -115,7 +121,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, | |||
| 115 | static struct dentry *__cfh_to_dentry(struct super_block *sb, | 121 | static struct dentry *__cfh_to_dentry(struct super_block *sb, |
| 116 | struct ceph_nfs_confh *cfh) | 122 | struct ceph_nfs_confh *cfh) |
| 117 | { | 123 | { |
| 118 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(sb)->mdsc; | 124 | struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; |
| 119 | struct inode *inode; | 125 | struct inode *inode; |
| 120 | struct dentry *dentry; | 126 | struct dentry *dentry; |
| 121 | struct ceph_vino vino; | 127 | struct ceph_vino vino; |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8c044a4f0457..e77c28cf3690 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/module.h> | ||
| 3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 5 | #include <linux/file.h> | 6 | #include <linux/file.h> |
| @@ -38,8 +39,8 @@ | |||
| 38 | static struct ceph_mds_request * | 39 | static struct ceph_mds_request * |
| 39 | prepare_open_request(struct super_block *sb, int flags, int create_mode) | 40 | prepare_open_request(struct super_block *sb, int flags, int create_mode) |
| 40 | { | 41 | { |
| 41 | struct ceph_client *client = ceph_sb_to_client(sb); | 42 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
| 42 | struct ceph_mds_client *mdsc = &client->mdsc; | 43 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 43 | struct ceph_mds_request *req; | 44 | struct ceph_mds_request *req; |
| 44 | int want_auth = USE_ANY_MDS; | 45 | int want_auth = USE_ANY_MDS; |
| 45 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; | 46 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; |
| @@ -117,8 +118,8 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |||
| 117 | int ceph_open(struct inode *inode, struct file *file) | 118 | int ceph_open(struct inode *inode, struct file *file) |
| 118 | { | 119 | { |
| 119 | struct ceph_inode_info *ci = ceph_inode(inode); | 120 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 120 | struct ceph_client *client = ceph_sb_to_client(inode->i_sb); | 121 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); |
| 121 | struct ceph_mds_client *mdsc = &client->mdsc; | 122 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 122 | struct ceph_mds_request *req; | 123 | struct ceph_mds_request *req; |
| 123 | struct ceph_file_info *cf = file->private_data; | 124 | struct ceph_file_info *cf = file->private_data; |
| 124 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; | 125 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; |
| @@ -216,8 +217,8 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, | |||
| 216 | struct nameidata *nd, int mode, | 217 | struct nameidata *nd, int mode, |
| 217 | int locked_dir) | 218 | int locked_dir) |
| 218 | { | 219 | { |
| 219 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | 220 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
| 220 | struct ceph_mds_client *mdsc = &client->mdsc; | 221 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 221 | struct file *file = nd->intent.open.file; | 222 | struct file *file = nd->intent.open.file; |
| 222 | struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); | 223 | struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); |
| 223 | struct ceph_mds_request *req; | 224 | struct ceph_mds_request *req; |
| @@ -270,163 +271,6 @@ int ceph_release(struct inode *inode, struct file *file) | |||
| 270 | } | 271 | } |
| 271 | 272 | ||
| 272 | /* | 273 | /* |
| 273 | * build a vector of user pages | ||
| 274 | */ | ||
| 275 | static struct page **get_direct_page_vector(const char __user *data, | ||
| 276 | int num_pages, | ||
| 277 | loff_t off, size_t len) | ||
| 278 | { | ||
| 279 | struct page **pages; | ||
| 280 | int rc; | ||
| 281 | |||
| 282 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | ||
| 283 | if (!pages) | ||
| 284 | return ERR_PTR(-ENOMEM); | ||
| 285 | |||
| 286 | down_read(¤t->mm->mmap_sem); | ||
| 287 | rc = get_user_pages(current, current->mm, (unsigned long)data, | ||
| 288 | num_pages, 0, 0, pages, NULL); | ||
| 289 | up_read(¤t->mm->mmap_sem); | ||
| 290 | if (rc < 0) | ||
| 291 | goto fail; | ||
| 292 | return pages; | ||
| 293 | |||
| 294 | fail: | ||
| 295 | kfree(pages); | ||
| 296 | return ERR_PTR(rc); | ||
| 297 | } | ||
| 298 | |||
| 299 | static void put_page_vector(struct page **pages, int num_pages) | ||
| 300 | { | ||
| 301 | int i; | ||
| 302 | |||
| 303 | for (i = 0; i < num_pages; i++) | ||
| 304 | put_page(pages[i]); | ||
| 305 | kfree(pages); | ||
| 306 | } | ||
| 307 | |||
| 308 | void ceph_release_page_vector(struct page **pages, int num_pages) | ||
| 309 | { | ||
| 310 | int i; | ||
| 311 | |||
| 312 | for (i = 0; i < num_pages; i++) | ||
| 313 | __free_pages(pages[i], 0); | ||
| 314 | kfree(pages); | ||
| 315 | } | ||
| 316 | |||
| 317 | /* | ||
| 318 | * allocate a vector new pages | ||
| 319 | */ | ||
| 320 | static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) | ||
| 321 | { | ||
| 322 | struct page **pages; | ||
| 323 | int i; | ||
| 324 | |||
| 325 | pages = kmalloc(sizeof(*pages) * num_pages, flags); | ||
| 326 | if (!pages) | ||
| 327 | return ERR_PTR(-ENOMEM); | ||
| 328 | for (i = 0; i < num_pages; i++) { | ||
| 329 | pages[i] = __page_cache_alloc(flags); | ||
| 330 | if (pages[i] == NULL) { | ||
| 331 | ceph_release_page_vector(pages, i); | ||
| 332 | return ERR_PTR(-ENOMEM); | ||
| 333 | } | ||
| 334 | } | ||
| 335 | return pages; | ||
| 336 | } | ||
| 337 | |||
| 338 | /* | ||
| 339 | * copy user data into a page vector | ||
| 340 | */ | ||
| 341 | static int copy_user_to_page_vector(struct page **pages, | ||
| 342 | const char __user *data, | ||
| 343 | loff_t off, size_t len) | ||
| 344 | { | ||
| 345 | int i = 0; | ||
| 346 | int po = off & ~PAGE_CACHE_MASK; | ||
| 347 | int left = len; | ||
| 348 | int l, bad; | ||
| 349 | |||
| 350 | while (left > 0) { | ||
| 351 | l = min_t(int, PAGE_CACHE_SIZE-po, left); | ||
| 352 | bad = copy_from_user(page_address(pages[i]) + po, data, l); | ||
| 353 | if (bad == l) | ||
| 354 | return -EFAULT; | ||
| 355 | data += l - bad; | ||
| 356 | left -= l - bad; | ||
| 357 | po += l - bad; | ||
| 358 | if (po == PAGE_CACHE_SIZE) { | ||
| 359 | po = 0; | ||
| 360 | i++; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | return len; | ||
| 364 | } | ||
| 365 | |||
| 366 | /* | ||
| 367 | * copy user data from a page vector into a user pointer | ||
| 368 | */ | ||
| 369 | static int copy_page_vector_to_user(struct page **pages, char __user *data, | ||
| 370 | loff_t off, size_t len) | ||
| 371 | { | ||
| 372 | int i = 0; | ||
| 373 | int po = off & ~PAGE_CACHE_MASK; | ||
| 374 | int left = len; | ||
| 375 | int l, bad; | ||
| 376 | |||
| 377 | while (left > 0) { | ||
| 378 | l = min_t(int, left, PAGE_CACHE_SIZE-po); | ||
| 379 | bad = copy_to_user(data, page_address(pages[i]) + po, l); | ||
| 380 | if (bad == l) | ||
| 381 | return -EFAULT; | ||
| 382 | data += l - bad; | ||
| 383 | left -= l - bad; | ||
| 384 | if (po) { | ||
| 385 | po += l - bad; | ||
| 386 | if (po == PAGE_CACHE_SIZE) | ||
| 387 | po = 0; | ||
| 388 | } | ||
| 389 | i++; | ||
| 390 | } | ||
| 391 | return len; | ||
| 392 | } | ||
| 393 | |||
| 394 | /* | ||
| 395 | * Zero an extent within a page vector. Offset is relative to the | ||
| 396 | * start of the first page. | ||
| 397 | */ | ||
| 398 | static void zero_page_vector_range(int off, int len, struct page **pages) | ||
| 399 | { | ||
| 400 | int i = off >> PAGE_CACHE_SHIFT; | ||
| 401 | |||
| 402 | off &= ~PAGE_CACHE_MASK; | ||
| 403 | |||
| 404 | dout("zero_page_vector_page %u~%u\n", off, len); | ||
| 405 | |||
| 406 | /* leading partial page? */ | ||
| 407 | if (off) { | ||
| 408 | int end = min((int)PAGE_CACHE_SIZE, off + len); | ||
| 409 | dout("zeroing %d %p head from %d\n", i, pages[i], | ||
| 410 | (int)off); | ||
| 411 | zero_user_segment(pages[i], off, end); | ||
| 412 | len -= (end - off); | ||
| 413 | i++; | ||
| 414 | } | ||
| 415 | while (len >= PAGE_CACHE_SIZE) { | ||
| 416 | dout("zeroing %d %p len=%d\n", i, pages[i], len); | ||
| 417 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); | ||
| 418 | len -= PAGE_CACHE_SIZE; | ||
| 419 | i++; | ||
| 420 | } | ||
| 421 | /* trailing partial page? */ | ||
| 422 | if (len) { | ||
| 423 | dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); | ||
| 424 | zero_user_segment(pages[i], 0, len); | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | |||
| 429 | /* | ||
| 430 | * Read a range of bytes striped over one or more objects. Iterate over | 274 | * Read a range of bytes striped over one or more objects. Iterate over |
| 431 | * objects we stripe over. (That's not atomic, but good enough for now.) | 275 | * objects we stripe over. (That's not atomic, but good enough for now.) |
| 432 | * | 276 | * |
| @@ -438,7 +282,7 @@ static int striped_read(struct inode *inode, | |||
| 438 | struct page **pages, int num_pages, | 282 | struct page **pages, int num_pages, |
| 439 | int *checkeof) | 283 | int *checkeof) |
| 440 | { | 284 | { |
| 441 | struct ceph_client *client = ceph_inode_to_client(inode); | 285 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
| 442 | struct ceph_inode_info *ci = ceph_inode(inode); | 286 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 443 | u64 pos, this_len; | 287 | u64 pos, this_len; |
| 444 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ | 288 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ |
| @@ -459,7 +303,7 @@ static int striped_read(struct inode *inode, | |||
| 459 | 303 | ||
| 460 | more: | 304 | more: |
| 461 | this_len = left; | 305 | this_len = left; |
| 462 | ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), | 306 | ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), |
| 463 | &ci->i_layout, pos, &this_len, | 307 | &ci->i_layout, pos, &this_len, |
| 464 | ci->i_truncate_seq, | 308 | ci->i_truncate_seq, |
| 465 | ci->i_truncate_size, | 309 | ci->i_truncate_size, |
| @@ -477,8 +321,8 @@ more: | |||
| 477 | 321 | ||
| 478 | if (read < pos - off) { | 322 | if (read < pos - off) { |
| 479 | dout(" zero gap %llu to %llu\n", off + read, pos); | 323 | dout(" zero gap %llu to %llu\n", off + read, pos); |
| 480 | zero_page_vector_range(page_off + read, | 324 | ceph_zero_page_vector_range(page_off + read, |
| 481 | pos - off - read, pages); | 325 | pos - off - read, pages); |
| 482 | } | 326 | } |
| 483 | pos += ret; | 327 | pos += ret; |
| 484 | read = pos - off; | 328 | read = pos - off; |
| @@ -495,8 +339,8 @@ more: | |||
| 495 | /* was original extent fully inside i_size? */ | 339 | /* was original extent fully inside i_size? */ |
| 496 | if (pos + left <= inode->i_size) { | 340 | if (pos + left <= inode->i_size) { |
| 497 | dout("zero tail\n"); | 341 | dout("zero tail\n"); |
| 498 | zero_page_vector_range(page_off + read, len - read, | 342 | ceph_zero_page_vector_range(page_off + read, len - read, |
| 499 | pages); | 343 | pages); |
| 500 | read = len; | 344 | read = len; |
| 501 | goto out; | 345 | goto out; |
| 502 | } | 346 | } |
| @@ -531,7 +375,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data, | |||
| 531 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | 375 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); |
| 532 | 376 | ||
| 533 | if (file->f_flags & O_DIRECT) { | 377 | if (file->f_flags & O_DIRECT) { |
| 534 | pages = get_direct_page_vector(data, num_pages, off, len); | 378 | pages = ceph_get_direct_page_vector(data, num_pages, off, len); |
| 535 | 379 | ||
| 536 | /* | 380 | /* |
| 537 | * flush any page cache pages in this range. this | 381 | * flush any page cache pages in this range. this |
| @@ -552,13 +396,13 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data, | |||
| 552 | ret = striped_read(inode, off, len, pages, num_pages, checkeof); | 396 | ret = striped_read(inode, off, len, pages, num_pages, checkeof); |
| 553 | 397 | ||
| 554 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) | 398 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) |
| 555 | ret = copy_page_vector_to_user(pages, data, off, ret); | 399 | ret = ceph_copy_page_vector_to_user(pages, data, off, ret); |
| 556 | if (ret >= 0) | 400 | if (ret >= 0) |
| 557 | *poff = off + ret; | 401 | *poff = off + ret; |
| 558 | 402 | ||
| 559 | done: | 403 | done: |
| 560 | if (file->f_flags & O_DIRECT) | 404 | if (file->f_flags & O_DIRECT) |
| 561 | put_page_vector(pages, num_pages); | 405 | ceph_put_page_vector(pages, num_pages); |
| 562 | else | 406 | else |
| 563 | ceph_release_page_vector(pages, num_pages); | 407 | ceph_release_page_vector(pages, num_pages); |
| 564 | dout("sync_read result %d\n", ret); | 408 | dout("sync_read result %d\n", ret); |
| @@ -594,7 +438,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |||
| 594 | { | 438 | { |
| 595 | struct inode *inode = file->f_dentry->d_inode; | 439 | struct inode *inode = file->f_dentry->d_inode; |
| 596 | struct ceph_inode_info *ci = ceph_inode(inode); | 440 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 597 | struct ceph_client *client = ceph_inode_to_client(inode); | 441 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
| 598 | struct ceph_osd_request *req; | 442 | struct ceph_osd_request *req; |
| 599 | struct page **pages; | 443 | struct page **pages; |
| 600 | int num_pages; | 444 | int num_pages; |
| @@ -642,7 +486,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |||
| 642 | */ | 486 | */ |
| 643 | more: | 487 | more: |
| 644 | len = left; | 488 | len = left; |
| 645 | req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, | 489 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, |
| 646 | ceph_vino(inode), pos, &len, | 490 | ceph_vino(inode), pos, &len, |
| 647 | CEPH_OSD_OP_WRITE, flags, | 491 | CEPH_OSD_OP_WRITE, flags, |
| 648 | ci->i_snap_realm->cached_context, | 492 | ci->i_snap_realm->cached_context, |
| @@ -655,7 +499,7 @@ more: | |||
| 655 | num_pages = calc_pages_for(pos, len); | 499 | num_pages = calc_pages_for(pos, len); |
| 656 | 500 | ||
| 657 | if (file->f_flags & O_DIRECT) { | 501 | if (file->f_flags & O_DIRECT) { |
| 658 | pages = get_direct_page_vector(data, num_pages, pos, len); | 502 | pages = ceph_get_direct_page_vector(data, num_pages, pos, len); |
| 659 | if (IS_ERR(pages)) { | 503 | if (IS_ERR(pages)) { |
| 660 | ret = PTR_ERR(pages); | 504 | ret = PTR_ERR(pages); |
| 661 | goto out; | 505 | goto out; |
| @@ -673,7 +517,7 @@ more: | |||
| 673 | ret = PTR_ERR(pages); | 517 | ret = PTR_ERR(pages); |
| 674 | goto out; | 518 | goto out; |
| 675 | } | 519 | } |
| 676 | ret = copy_user_to_page_vector(pages, data, pos, len); | 520 | ret = ceph_copy_user_to_page_vector(pages, data, pos, len); |
| 677 | if (ret < 0) { | 521 | if (ret < 0) { |
| 678 | ceph_release_page_vector(pages, num_pages); | 522 | ceph_release_page_vector(pages, num_pages); |
| 679 | goto out; | 523 | goto out; |
| @@ -689,7 +533,7 @@ more: | |||
| 689 | req->r_num_pages = num_pages; | 533 | req->r_num_pages = num_pages; |
| 690 | req->r_inode = inode; | 534 | req->r_inode = inode; |
| 691 | 535 | ||
| 692 | ret = ceph_osdc_start_request(&client->osdc, req, false); | 536 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); |
| 693 | if (!ret) { | 537 | if (!ret) { |
| 694 | if (req->r_safe_callback) { | 538 | if (req->r_safe_callback) { |
| 695 | /* | 539 | /* |
| @@ -697,15 +541,15 @@ more: | |||
| 697 | * start_request so that a tid has been assigned. | 541 | * start_request so that a tid has been assigned. |
| 698 | */ | 542 | */ |
| 699 | spin_lock(&ci->i_unsafe_lock); | 543 | spin_lock(&ci->i_unsafe_lock); |
| 700 | list_add(&ci->i_unsafe_writes, &req->r_unsafe_item); | 544 | list_add(&req->r_unsafe_item, &ci->i_unsafe_writes); |
| 701 | spin_unlock(&ci->i_unsafe_lock); | 545 | spin_unlock(&ci->i_unsafe_lock); |
| 702 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); | 546 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); |
| 703 | } | 547 | } |
| 704 | ret = ceph_osdc_wait_request(&client->osdc, req); | 548 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); |
| 705 | } | 549 | } |
| 706 | 550 | ||
| 707 | if (file->f_flags & O_DIRECT) | 551 | if (file->f_flags & O_DIRECT) |
| 708 | put_page_vector(pages, num_pages); | 552 | ceph_put_page_vector(pages, num_pages); |
| 709 | else if (file->f_flags & O_SYNC) | 553 | else if (file->f_flags & O_SYNC) |
| 710 | ceph_release_page_vector(pages, num_pages); | 554 | ceph_release_page_vector(pages, num_pages); |
| 711 | 555 | ||
| @@ -814,7 +658,8 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 814 | struct ceph_file_info *fi = file->private_data; | 658 | struct ceph_file_info *fi = file->private_data; |
| 815 | struct inode *inode = file->f_dentry->d_inode; | 659 | struct inode *inode = file->f_dentry->d_inode; |
| 816 | struct ceph_inode_info *ci = ceph_inode(inode); | 660 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 817 | struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; | 661 | struct ceph_osd_client *osdc = |
| 662 | &ceph_sb_to_client(inode->i_sb)->client->osdc; | ||
| 818 | loff_t endoff = pos + iov->iov_len; | 663 | loff_t endoff = pos + iov->iov_len; |
| 819 | int want, got = 0; | 664 | int want, got = 0; |
| 820 | int ret, err; | 665 | int ret, err; |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 62377ec37edf..1d6a45b5a04c 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
| 4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
| @@ -13,7 +13,8 @@ | |||
| 13 | #include <linux/pagevec.h> | 13 | #include <linux/pagevec.h> |
| 14 | 14 | ||
| 15 | #include "super.h" | 15 | #include "super.h" |
| 16 | #include "decode.h" | 16 | #include "mds_client.h" |
| 17 | #include <linux/ceph/decode.h> | ||
| 17 | 18 | ||
| 18 | /* | 19 | /* |
| 19 | * Ceph inode operations | 20 | * Ceph inode operations |
| @@ -384,7 +385,7 @@ void ceph_destroy_inode(struct inode *inode) | |||
| 384 | */ | 385 | */ |
| 385 | if (ci->i_snap_realm) { | 386 | if (ci->i_snap_realm) { |
| 386 | struct ceph_mds_client *mdsc = | 387 | struct ceph_mds_client *mdsc = |
| 387 | &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; | 388 | ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; |
| 388 | struct ceph_snap_realm *realm = ci->i_snap_realm; | 389 | struct ceph_snap_realm *realm = ci->i_snap_realm; |
| 389 | 390 | ||
| 390 | dout(" dropping residual ref to snap realm %p\n", realm); | 391 | dout(" dropping residual ref to snap realm %p\n", realm); |
| @@ -685,7 +686,7 @@ static int fill_inode(struct inode *inode, | |||
| 685 | } | 686 | } |
| 686 | 687 | ||
| 687 | /* it may be better to set st_size in getattr instead? */ | 688 | /* it may be better to set st_size in getattr instead? */ |
| 688 | if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) | 689 | if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) |
| 689 | inode->i_size = ci->i_rbytes; | 690 | inode->i_size = ci->i_rbytes; |
| 690 | break; | 691 | break; |
| 691 | default: | 692 | default: |
| @@ -901,7 +902,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 901 | struct inode *in = NULL; | 902 | struct inode *in = NULL; |
| 902 | struct ceph_mds_reply_inode *ininfo; | 903 | struct ceph_mds_reply_inode *ininfo; |
| 903 | struct ceph_vino vino; | 904 | struct ceph_vino vino; |
| 904 | struct ceph_client *client = ceph_sb_to_client(sb); | 905 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
| 905 | int i = 0; | 906 | int i = 0; |
| 906 | int err = 0; | 907 | int err = 0; |
| 907 | 908 | ||
| @@ -965,7 +966,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 965 | */ | 966 | */ |
| 966 | if (rinfo->head->is_dentry && !req->r_aborted && | 967 | if (rinfo->head->is_dentry && !req->r_aborted && |
| 967 | (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, | 968 | (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, |
| 968 | client->mount_args->snapdir_name, | 969 | fsc->mount_options->snapdir_name, |
| 969 | req->r_dentry->d_name.len))) { | 970 | req->r_dentry->d_name.len))) { |
| 970 | /* | 971 | /* |
| 971 | * lookup link rename : null -> possibly existing inode | 972 | * lookup link rename : null -> possibly existing inode |
| @@ -1533,7 +1534,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 1533 | struct inode *parent_inode = dentry->d_parent->d_inode; | 1534 | struct inode *parent_inode = dentry->d_parent->d_inode; |
| 1534 | const unsigned int ia_valid = attr->ia_valid; | 1535 | const unsigned int ia_valid = attr->ia_valid; |
| 1535 | struct ceph_mds_request *req; | 1536 | struct ceph_mds_request *req; |
| 1536 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc; | 1537 | struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; |
| 1537 | int issued; | 1538 | int issued; |
| 1538 | int release = 0, dirtied = 0; | 1539 | int release = 0, dirtied = 0; |
| 1539 | int mask = 0; | 1540 | int mask = 0; |
| @@ -1728,8 +1729,8 @@ out: | |||
| 1728 | */ | 1729 | */ |
| 1729 | int ceph_do_getattr(struct inode *inode, int mask) | 1730 | int ceph_do_getattr(struct inode *inode, int mask) |
| 1730 | { | 1731 | { |
| 1731 | struct ceph_client *client = ceph_sb_to_client(inode->i_sb); | 1732 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); |
| 1732 | struct ceph_mds_client *mdsc = &client->mdsc; | 1733 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 1733 | struct ceph_mds_request *req; | 1734 | struct ceph_mds_request *req; |
| 1734 | int err; | 1735 | int err; |
| 1735 | 1736 | ||
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 76e307d2aba1..8888c9ba68db 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c | |||
| @@ -1,8 +1,10 @@ | |||
| 1 | #include <linux/in.h> | 1 | #include <linux/in.h> |
| 2 | 2 | ||
| 3 | #include "ioctl.h" | ||
| 4 | #include "super.h" | 3 | #include "super.h" |
| 5 | #include "ceph_debug.h" | 4 | #include "mds_client.h" |
| 5 | #include <linux/ceph/ceph_debug.h> | ||
| 6 | |||
| 7 | #include "ioctl.h" | ||
| 6 | 8 | ||
| 7 | 9 | ||
| 8 | /* | 10 | /* |
| @@ -37,7 +39,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg) | |||
| 37 | { | 39 | { |
| 38 | struct inode *inode = file->f_dentry->d_inode; | 40 | struct inode *inode = file->f_dentry->d_inode; |
| 39 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; | 41 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; |
| 40 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; | 42 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; |
| 41 | struct ceph_mds_request *req; | 43 | struct ceph_mds_request *req; |
| 42 | struct ceph_ioctl_layout l; | 44 | struct ceph_ioctl_layout l; |
| 43 | int err, i; | 45 | int err, i; |
| @@ -90,6 +92,68 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg) | |||
| 90 | } | 92 | } |
| 91 | 93 | ||
| 92 | /* | 94 | /* |
| 95 | * Set a layout policy on a directory inode. All items in the tree | ||
| 96 | * rooted at this inode will inherit this layout on creation, | ||
| 97 | * (It doesn't apply retroactively ) | ||
| 98 | * unless a subdirectory has its own layout policy. | ||
| 99 | */ | ||
| 100 | static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg) | ||
| 101 | { | ||
| 102 | struct inode *inode = file->f_dentry->d_inode; | ||
| 103 | struct ceph_mds_request *req; | ||
| 104 | struct ceph_ioctl_layout l; | ||
| 105 | int err, i; | ||
| 106 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; | ||
| 107 | |||
| 108 | /* copy and validate */ | ||
| 109 | if (copy_from_user(&l, arg, sizeof(l))) | ||
| 110 | return -EFAULT; | ||
| 111 | |||
| 112 | if ((l.object_size & ~PAGE_MASK) || | ||
| 113 | (l.stripe_unit & ~PAGE_MASK) || | ||
| 114 | !l.stripe_unit || | ||
| 115 | (l.object_size && | ||
| 116 | (unsigned)l.object_size % (unsigned)l.stripe_unit)) | ||
| 117 | return -EINVAL; | ||
| 118 | |||
| 119 | /* make sure it's a valid data pool */ | ||
| 120 | if (l.data_pool > 0) { | ||
| 121 | mutex_lock(&mdsc->mutex); | ||
| 122 | err = -EINVAL; | ||
| 123 | for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) | ||
| 124 | if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { | ||
| 125 | err = 0; | ||
| 126 | break; | ||
| 127 | } | ||
| 128 | mutex_unlock(&mdsc->mutex); | ||
| 129 | if (err) | ||
| 130 | return err; | ||
| 131 | } | ||
| 132 | |||
| 133 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT, | ||
| 134 | USE_AUTH_MDS); | ||
| 135 | |||
| 136 | if (IS_ERR(req)) | ||
| 137 | return PTR_ERR(req); | ||
| 138 | req->r_inode = igrab(inode); | ||
| 139 | |||
| 140 | req->r_args.setlayout.layout.fl_stripe_unit = | ||
| 141 | cpu_to_le32(l.stripe_unit); | ||
| 142 | req->r_args.setlayout.layout.fl_stripe_count = | ||
| 143 | cpu_to_le32(l.stripe_count); | ||
| 144 | req->r_args.setlayout.layout.fl_object_size = | ||
| 145 | cpu_to_le32(l.object_size); | ||
| 146 | req->r_args.setlayout.layout.fl_pg_pool = | ||
| 147 | cpu_to_le32(l.data_pool); | ||
| 148 | req->r_args.setlayout.layout.fl_pg_preferred = | ||
| 149 | cpu_to_le32(l.preferred_osd); | ||
| 150 | |||
| 151 | err = ceph_mdsc_do_request(mdsc, inode, req); | ||
| 152 | ceph_mdsc_put_request(req); | ||
| 153 | return err; | ||
| 154 | } | ||
| 155 | |||
| 156 | /* | ||
| 93 | * Return object name, size/offset information, and location (OSD | 157 | * Return object name, size/offset information, and location (OSD |
| 94 | * number, network address) for a given file offset. | 158 | * number, network address) for a given file offset. |
| 95 | */ | 159 | */ |
| @@ -98,7 +162,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) | |||
| 98 | struct ceph_ioctl_dataloc dl; | 162 | struct ceph_ioctl_dataloc dl; |
| 99 | struct inode *inode = file->f_dentry->d_inode; | 163 | struct inode *inode = file->f_dentry->d_inode; |
| 100 | struct ceph_inode_info *ci = ceph_inode(inode); | 164 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 101 | struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; | 165 | struct ceph_osd_client *osdc = |
| 166 | &ceph_sb_to_client(inode->i_sb)->client->osdc; | ||
| 102 | u64 len = 1, olen; | 167 | u64 len = 1, olen; |
| 103 | u64 tmp; | 168 | u64 tmp; |
| 104 | struct ceph_object_layout ol; | 169 | struct ceph_object_layout ol; |
| @@ -174,11 +239,15 @@ long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 174 | case CEPH_IOC_SET_LAYOUT: | 239 | case CEPH_IOC_SET_LAYOUT: |
| 175 | return ceph_ioctl_set_layout(file, (void __user *)arg); | 240 | return ceph_ioctl_set_layout(file, (void __user *)arg); |
| 176 | 241 | ||
| 242 | case CEPH_IOC_SET_LAYOUT_POLICY: | ||
| 243 | return ceph_ioctl_set_layout_policy(file, (void __user *)arg); | ||
| 244 | |||
| 177 | case CEPH_IOC_GET_DATALOC: | 245 | case CEPH_IOC_GET_DATALOC: |
| 178 | return ceph_ioctl_get_dataloc(file, (void __user *)arg); | 246 | return ceph_ioctl_get_dataloc(file, (void __user *)arg); |
| 179 | 247 | ||
| 180 | case CEPH_IOC_LAZYIO: | 248 | case CEPH_IOC_LAZYIO: |
| 181 | return ceph_ioctl_lazyio(file); | 249 | return ceph_ioctl_lazyio(file); |
| 182 | } | 250 | } |
| 251 | |||
| 183 | return -ENOTTY; | 252 | return -ENOTTY; |
| 184 | } | 253 | } |
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h index 88451a3b6857..a6ce54e94eb5 100644 --- a/fs/ceph/ioctl.h +++ b/fs/ceph/ioctl.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #include <linux/ioctl.h> | 4 | #include <linux/ioctl.h> |
| 5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
| 6 | 6 | ||
| 7 | #define CEPH_IOCTL_MAGIC 0x97 | 7 | #define CEPH_IOCTL_MAGIC 0x98 |
| 8 | 8 | ||
| 9 | /* just use u64 to align sanely on all archs */ | 9 | /* just use u64 to align sanely on all archs */ |
| 10 | struct ceph_ioctl_layout { | 10 | struct ceph_ioctl_layout { |
| @@ -17,6 +17,8 @@ struct ceph_ioctl_layout { | |||
| 17 | struct ceph_ioctl_layout) | 17 | struct ceph_ioctl_layout) |
| 18 | #define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \ | 18 | #define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \ |
| 19 | struct ceph_ioctl_layout) | 19 | struct ceph_ioctl_layout) |
| 20 | #define CEPH_IOC_SET_LAYOUT_POLICY _IOW(CEPH_IOCTL_MAGIC, 5, \ | ||
| 21 | struct ceph_ioctl_layout) | ||
| 20 | 22 | ||
| 21 | /* | 23 | /* |
| 22 | * Extract identity, address of the OSD and object storing a given | 24 | * Extract identity, address of the OSD and object storing a given |
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index ff4e753aae92..40abde93c345 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c | |||
| @@ -1,11 +1,11 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/file.h> | 3 | #include <linux/file.h> |
| 4 | #include <linux/namei.h> | 4 | #include <linux/namei.h> |
| 5 | 5 | ||
| 6 | #include "super.h" | 6 | #include "super.h" |
| 7 | #include "mds_client.h" | 7 | #include "mds_client.h" |
| 8 | #include "pagelist.h" | 8 | #include <linux/ceph/pagelist.h> |
| 9 | 9 | ||
| 10 | /** | 10 | /** |
| 11 | * Implement fcntl and flock locking functions. | 11 | * Implement fcntl and flock locking functions. |
| @@ -16,7 +16,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, | |||
| 16 | { | 16 | { |
| 17 | struct inode *inode = file->f_dentry->d_inode; | 17 | struct inode *inode = file->f_dentry->d_inode; |
| 18 | struct ceph_mds_client *mdsc = | 18 | struct ceph_mds_client *mdsc = |
| 19 | &ceph_sb_to_client(inode->i_sb)->mdsc; | 19 | ceph_sb_to_client(inode->i_sb)->mdsc; |
| 20 | struct ceph_mds_request *req; | 20 | struct ceph_mds_request *req; |
| 21 | int err; | 21 | int err; |
| 22 | 22 | ||
| @@ -181,8 +181,9 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) | |||
| 181 | * Encode the flock and fcntl locks for the given inode into the pagelist. | 181 | * Encode the flock and fcntl locks for the given inode into the pagelist. |
| 182 | * Format is: #fcntl locks, sequential fcntl locks, #flock locks, | 182 | * Format is: #fcntl locks, sequential fcntl locks, #flock locks, |
| 183 | * sequential flock locks. | 183 | * sequential flock locks. |
| 184 | * Must be called with BLK already held, and the lock numbers should have | 184 | * Must be called with lock_flocks() already held. |
| 185 | * been gathered under the same lock holding window. | 185 | * If we encounter more of a specific lock type than expected, |
| 186 | * we return the value 1. | ||
| 186 | */ | 187 | */ |
| 187 | int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | 188 | int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, |
| 188 | int num_fcntl_locks, int num_flock_locks) | 189 | int num_fcntl_locks, int num_flock_locks) |
| @@ -190,6 +191,8 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | |||
| 190 | struct file_lock *lock; | 191 | struct file_lock *lock; |
| 191 | struct ceph_filelock cephlock; | 192 | struct ceph_filelock cephlock; |
| 192 | int err = 0; | 193 | int err = 0; |
| 194 | int seen_fcntl = 0; | ||
| 195 | int seen_flock = 0; | ||
| 193 | 196 | ||
| 194 | dout("encoding %d flock and %d fcntl locks", num_flock_locks, | 197 | dout("encoding %d flock and %d fcntl locks", num_flock_locks, |
| 195 | num_fcntl_locks); | 198 | num_fcntl_locks); |
| @@ -198,6 +201,11 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | |||
| 198 | goto fail; | 201 | goto fail; |
| 199 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { | 202 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { |
| 200 | if (lock->fl_flags & FL_POSIX) { | 203 | if (lock->fl_flags & FL_POSIX) { |
| 204 | ++seen_fcntl; | ||
| 205 | if (seen_fcntl > num_fcntl_locks) { | ||
| 206 | err = -ENOSPC; | ||
| 207 | goto fail; | ||
| 208 | } | ||
| 201 | err = lock_to_ceph_filelock(lock, &cephlock); | 209 | err = lock_to_ceph_filelock(lock, &cephlock); |
| 202 | if (err) | 210 | if (err) |
| 203 | goto fail; | 211 | goto fail; |
| @@ -213,6 +221,11 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | |||
| 213 | goto fail; | 221 | goto fail; |
| 214 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { | 222 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { |
| 215 | if (lock->fl_flags & FL_FLOCK) { | 223 | if (lock->fl_flags & FL_FLOCK) { |
| 224 | ++seen_flock; | ||
| 225 | if (seen_flock > num_flock_locks) { | ||
| 226 | err = -ENOSPC; | ||
| 227 | goto fail; | ||
| 228 | } | ||
| 216 | err = lock_to_ceph_filelock(lock, &cephlock); | 229 | err = lock_to_ceph_filelock(lock, &cephlock); |
| 217 | if (err) | 230 | if (err) |
| 218 | goto fail; | 231 | goto fail; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fad95f8f2608..3142b15940c2 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -1,17 +1,21 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/fs.h> | ||
| 3 | #include <linux/wait.h> | 4 | #include <linux/wait.h> |
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 5 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
| 7 | #include <linux/debugfs.h> | ||
| 8 | #include <linux/seq_file.h> | ||
| 6 | #include <linux/smp_lock.h> | 9 | #include <linux/smp_lock.h> |
| 7 | 10 | ||
| 8 | #include "mds_client.h" | ||
| 9 | #include "mon_client.h" | ||
| 10 | #include "super.h" | 11 | #include "super.h" |
| 11 | #include "messenger.h" | 12 | #include "mds_client.h" |
| 12 | #include "decode.h" | 13 | |
| 13 | #include "auth.h" | 14 | #include <linux/ceph/messenger.h> |
| 14 | #include "pagelist.h" | 15 | #include <linux/ceph/decode.h> |
| 16 | #include <linux/ceph/pagelist.h> | ||
| 17 | #include <linux/ceph/auth.h> | ||
| 18 | #include <linux/ceph/debugfs.h> | ||
| 15 | 19 | ||
| 16 | /* | 20 | /* |
| 17 | * A cluster of MDS (metadata server) daemons is responsible for | 21 | * A cluster of MDS (metadata server) daemons is responsible for |
| @@ -286,8 +290,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s) | |||
| 286 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); | 290 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); |
| 287 | if (atomic_dec_and_test(&s->s_ref)) { | 291 | if (atomic_dec_and_test(&s->s_ref)) { |
| 288 | if (s->s_authorizer) | 292 | if (s->s_authorizer) |
| 289 | s->s_mdsc->client->monc.auth->ops->destroy_authorizer( | 293 | s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer( |
| 290 | s->s_mdsc->client->monc.auth, s->s_authorizer); | 294 | s->s_mdsc->fsc->client->monc.auth, |
| 295 | s->s_authorizer); | ||
| 291 | kfree(s); | 296 | kfree(s); |
| 292 | } | 297 | } |
| 293 | } | 298 | } |
| @@ -344,7 +349,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |||
| 344 | s->s_seq = 0; | 349 | s->s_seq = 0; |
| 345 | mutex_init(&s->s_mutex); | 350 | mutex_init(&s->s_mutex); |
| 346 | 351 | ||
| 347 | ceph_con_init(mdsc->client->msgr, &s->s_con); | 352 | ceph_con_init(mdsc->fsc->client->msgr, &s->s_con); |
| 348 | s->s_con.private = s; | 353 | s->s_con.private = s; |
| 349 | s->s_con.ops = &mds_con_ops; | 354 | s->s_con.ops = &mds_con_ops; |
| 350 | s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; | 355 | s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; |
| @@ -599,7 +604,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, | |||
| 599 | } else if (req->r_dentry) { | 604 | } else if (req->r_dentry) { |
| 600 | struct inode *dir = req->r_dentry->d_parent->d_inode; | 605 | struct inode *dir = req->r_dentry->d_parent->d_inode; |
| 601 | 606 | ||
| 602 | if (dir->i_sb != mdsc->client->sb) { | 607 | if (dir->i_sb != mdsc->fsc->sb) { |
| 603 | /* not this fs! */ | 608 | /* not this fs! */ |
| 604 | inode = req->r_dentry->d_inode; | 609 | inode = req->r_dentry->d_inode; |
| 605 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { | 610 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { |
| @@ -884,7 +889,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |||
| 884 | __ceph_remove_cap(cap); | 889 | __ceph_remove_cap(cap); |
| 885 | if (!__ceph_is_any_real_caps(ci)) { | 890 | if (!__ceph_is_any_real_caps(ci)) { |
| 886 | struct ceph_mds_client *mdsc = | 891 | struct ceph_mds_client *mdsc = |
| 887 | &ceph_sb_to_client(inode->i_sb)->mdsc; | 892 | ceph_sb_to_client(inode->i_sb)->mdsc; |
| 888 | 893 | ||
| 889 | spin_lock(&mdsc->cap_dirty_lock); | 894 | spin_lock(&mdsc->cap_dirty_lock); |
| 890 | if (!list_empty(&ci->i_dirty_item)) { | 895 | if (!list_empty(&ci->i_dirty_item)) { |
| @@ -1146,7 +1151,7 @@ int ceph_add_cap_releases(struct ceph_mds_client *mdsc, | |||
| 1146 | struct ceph_msg *msg, *partial = NULL; | 1151 | struct ceph_msg *msg, *partial = NULL; |
| 1147 | struct ceph_mds_cap_release *head; | 1152 | struct ceph_mds_cap_release *head; |
| 1148 | int err = -ENOMEM; | 1153 | int err = -ENOMEM; |
| 1149 | int extra = mdsc->client->mount_args->cap_release_safety; | 1154 | int extra = mdsc->fsc->mount_options->cap_release_safety; |
| 1150 | int num; | 1155 | int num; |
| 1151 | 1156 | ||
| 1152 | dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, | 1157 | dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, |
| @@ -2085,7 +2090,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
| 2085 | 2090 | ||
| 2086 | /* insert trace into our cache */ | 2091 | /* insert trace into our cache */ |
| 2087 | mutex_lock(&req->r_fill_mutex); | 2092 | mutex_lock(&req->r_fill_mutex); |
| 2088 | err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); | 2093 | err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); |
| 2089 | if (err == 0) { | 2094 | if (err == 0) { |
| 2090 | if (result == 0 && rinfo->dir_nr) | 2095 | if (result == 0 && rinfo->dir_nr) |
| 2091 | ceph_readdir_prepopulate(req, req->r_session); | 2096 | ceph_readdir_prepopulate(req, req->r_session); |
| @@ -2361,19 +2366,35 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, | |||
| 2361 | 2366 | ||
| 2362 | if (recon_state->flock) { | 2367 | if (recon_state->flock) { |
| 2363 | int num_fcntl_locks, num_flock_locks; | 2368 | int num_fcntl_locks, num_flock_locks; |
| 2364 | 2369 | struct ceph_pagelist_cursor trunc_point; | |
| 2365 | lock_kernel(); | 2370 | |
| 2366 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); | 2371 | ceph_pagelist_set_cursor(pagelist, &trunc_point); |
| 2367 | rec.v2.flock_len = (2*sizeof(u32) + | 2372 | do { |
| 2368 | (num_fcntl_locks+num_flock_locks) * | 2373 | lock_flocks(); |
| 2369 | sizeof(struct ceph_filelock)); | 2374 | ceph_count_locks(inode, &num_fcntl_locks, |
| 2370 | 2375 | &num_flock_locks); | |
| 2371 | err = ceph_pagelist_append(pagelist, &rec, reclen); | 2376 | rec.v2.flock_len = (2*sizeof(u32) + |
| 2372 | if (!err) | 2377 | (num_fcntl_locks+num_flock_locks) * |
| 2373 | err = ceph_encode_locks(inode, pagelist, | 2378 | sizeof(struct ceph_filelock)); |
| 2374 | num_fcntl_locks, | 2379 | unlock_flocks(); |
| 2375 | num_flock_locks); | 2380 | |
| 2376 | unlock_kernel(); | 2381 | /* pre-alloc pagelist */ |
| 2382 | ceph_pagelist_truncate(pagelist, &trunc_point); | ||
| 2383 | err = ceph_pagelist_append(pagelist, &rec, reclen); | ||
| 2384 | if (!err) | ||
| 2385 | err = ceph_pagelist_reserve(pagelist, | ||
| 2386 | rec.v2.flock_len); | ||
| 2387 | |||
| 2388 | /* encode locks */ | ||
| 2389 | if (!err) { | ||
| 2390 | lock_flocks(); | ||
| 2391 | err = ceph_encode_locks(inode, | ||
| 2392 | pagelist, | ||
| 2393 | num_fcntl_locks, | ||
| 2394 | num_flock_locks); | ||
| 2395 | unlock_flocks(); | ||
| 2396 | } | ||
| 2397 | } while (err == -ENOSPC); | ||
| 2377 | } else { | 2398 | } else { |
| 2378 | err = ceph_pagelist_append(pagelist, &rec, reclen); | 2399 | err = ceph_pagelist_append(pagelist, &rec, reclen); |
| 2379 | } | 2400 | } |
| @@ -2613,7 +2634,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
| 2613 | struct ceph_mds_session *session, | 2634 | struct ceph_mds_session *session, |
| 2614 | struct ceph_msg *msg) | 2635 | struct ceph_msg *msg) |
| 2615 | { | 2636 | { |
| 2616 | struct super_block *sb = mdsc->client->sb; | 2637 | struct super_block *sb = mdsc->fsc->sb; |
| 2617 | struct inode *inode; | 2638 | struct inode *inode; |
| 2618 | struct ceph_inode_info *ci; | 2639 | struct ceph_inode_info *ci; |
| 2619 | struct dentry *parent, *dentry; | 2640 | struct dentry *parent, *dentry; |
| @@ -2891,10 +2912,16 @@ static void delayed_work(struct work_struct *work) | |||
| 2891 | schedule_delayed(mdsc); | 2912 | schedule_delayed(mdsc); |
| 2892 | } | 2913 | } |
| 2893 | 2914 | ||
| 2915 | int ceph_mdsc_init(struct ceph_fs_client *fsc) | ||
| 2894 | 2916 | ||
| 2895 | int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) | ||
| 2896 | { | 2917 | { |
| 2897 | mdsc->client = client; | 2918 | struct ceph_mds_client *mdsc; |
| 2919 | |||
| 2920 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | ||
| 2921 | if (!mdsc) | ||
| 2922 | return -ENOMEM; | ||
| 2923 | mdsc->fsc = fsc; | ||
| 2924 | fsc->mdsc = mdsc; | ||
| 2898 | mutex_init(&mdsc->mutex); | 2925 | mutex_init(&mdsc->mutex); |
| 2899 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | 2926 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); |
| 2900 | if (mdsc->mdsmap == NULL) | 2927 | if (mdsc->mdsmap == NULL) |
| @@ -2927,7 +2954,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) | |||
| 2927 | INIT_LIST_HEAD(&mdsc->dentry_lru); | 2954 | INIT_LIST_HEAD(&mdsc->dentry_lru); |
| 2928 | 2955 | ||
| 2929 | ceph_caps_init(mdsc); | 2956 | ceph_caps_init(mdsc); |
| 2930 | ceph_adjust_min_caps(mdsc, client->min_caps); | 2957 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
| 2931 | 2958 | ||
| 2932 | return 0; | 2959 | return 0; |
| 2933 | } | 2960 | } |
| @@ -2939,7 +2966,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) | |||
| 2939 | static void wait_requests(struct ceph_mds_client *mdsc) | 2966 | static void wait_requests(struct ceph_mds_client *mdsc) |
| 2940 | { | 2967 | { |
| 2941 | struct ceph_mds_request *req; | 2968 | struct ceph_mds_request *req; |
| 2942 | struct ceph_client *client = mdsc->client; | 2969 | struct ceph_fs_client *fsc = mdsc->fsc; |
| 2943 | 2970 | ||
| 2944 | mutex_lock(&mdsc->mutex); | 2971 | mutex_lock(&mdsc->mutex); |
| 2945 | if (__get_oldest_req(mdsc)) { | 2972 | if (__get_oldest_req(mdsc)) { |
| @@ -2947,7 +2974,7 @@ static void wait_requests(struct ceph_mds_client *mdsc) | |||
| 2947 | 2974 | ||
| 2948 | dout("wait_requests waiting for requests\n"); | 2975 | dout("wait_requests waiting for requests\n"); |
| 2949 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | 2976 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, |
| 2950 | client->mount_args->mount_timeout * HZ); | 2977 | fsc->client->options->mount_timeout * HZ); |
| 2951 | 2978 | ||
| 2952 | /* tear down remaining requests */ | 2979 | /* tear down remaining requests */ |
| 2953 | mutex_lock(&mdsc->mutex); | 2980 | mutex_lock(&mdsc->mutex); |
| @@ -3030,7 +3057,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |||
| 3030 | { | 3057 | { |
| 3031 | u64 want_tid, want_flush; | 3058 | u64 want_tid, want_flush; |
| 3032 | 3059 | ||
| 3033 | if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) | 3060 | if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
| 3034 | return; | 3061 | return; |
| 3035 | 3062 | ||
| 3036 | dout("sync\n"); | 3063 | dout("sync\n"); |
| @@ -3053,7 +3080,7 @@ bool done_closing_sessions(struct ceph_mds_client *mdsc) | |||
| 3053 | { | 3080 | { |
| 3054 | int i, n = 0; | 3081 | int i, n = 0; |
| 3055 | 3082 | ||
| 3056 | if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) | 3083 | if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
| 3057 | return true; | 3084 | return true; |
| 3058 | 3085 | ||
| 3059 | mutex_lock(&mdsc->mutex); | 3086 | mutex_lock(&mdsc->mutex); |
| @@ -3071,8 +3098,8 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |||
| 3071 | { | 3098 | { |
| 3072 | struct ceph_mds_session *session; | 3099 | struct ceph_mds_session *session; |
| 3073 | int i; | 3100 | int i; |
| 3074 | struct ceph_client *client = mdsc->client; | 3101 | struct ceph_fs_client *fsc = mdsc->fsc; |
| 3075 | unsigned long timeout = client->mount_args->mount_timeout * HZ; | 3102 | unsigned long timeout = fsc->client->options->mount_timeout * HZ; |
| 3076 | 3103 | ||
| 3077 | dout("close_sessions\n"); | 3104 | dout("close_sessions\n"); |
| 3078 | 3105 | ||
| @@ -3119,7 +3146,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |||
| 3119 | dout("stopped\n"); | 3146 | dout("stopped\n"); |
| 3120 | } | 3147 | } |
| 3121 | 3148 | ||
| 3122 | void ceph_mdsc_stop(struct ceph_mds_client *mdsc) | 3149 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
| 3123 | { | 3150 | { |
| 3124 | dout("stop\n"); | 3151 | dout("stop\n"); |
| 3125 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | 3152 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ |
| @@ -3129,6 +3156,15 @@ void ceph_mdsc_stop(struct ceph_mds_client *mdsc) | |||
| 3129 | ceph_caps_finalize(mdsc); | 3156 | ceph_caps_finalize(mdsc); |
| 3130 | } | 3157 | } |
| 3131 | 3158 | ||
| 3159 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) | ||
| 3160 | { | ||
| 3161 | struct ceph_mds_client *mdsc = fsc->mdsc; | ||
| 3162 | |||
| 3163 | ceph_mdsc_stop(mdsc); | ||
| 3164 | fsc->mdsc = NULL; | ||
| 3165 | kfree(mdsc); | ||
| 3166 | } | ||
| 3167 | |||
| 3132 | 3168 | ||
| 3133 | /* | 3169 | /* |
| 3134 | * handle mds map update. | 3170 | * handle mds map update. |
| @@ -3145,14 +3181,14 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) | |||
| 3145 | 3181 | ||
| 3146 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | 3182 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); |
| 3147 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | 3183 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); |
| 3148 | if (ceph_check_fsid(mdsc->client, &fsid) < 0) | 3184 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
| 3149 | return; | 3185 | return; |
| 3150 | epoch = ceph_decode_32(&p); | 3186 | epoch = ceph_decode_32(&p); |
| 3151 | maplen = ceph_decode_32(&p); | 3187 | maplen = ceph_decode_32(&p); |
| 3152 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); | 3188 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
| 3153 | 3189 | ||
| 3154 | /* do we need it? */ | 3190 | /* do we need it? */ |
| 3155 | ceph_monc_got_mdsmap(&mdsc->client->monc, epoch); | 3191 | ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); |
| 3156 | mutex_lock(&mdsc->mutex); | 3192 | mutex_lock(&mdsc->mutex); |
| 3157 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | 3193 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { |
| 3158 | dout("handle_map epoch %u <= our %u\n", | 3194 | dout("handle_map epoch %u <= our %u\n", |
| @@ -3176,7 +3212,7 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) | |||
| 3176 | } else { | 3212 | } else { |
| 3177 | mdsc->mdsmap = newmap; /* first mds map */ | 3213 | mdsc->mdsmap = newmap; /* first mds map */ |
| 3178 | } | 3214 | } |
| 3179 | mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; | 3215 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
| 3180 | 3216 | ||
| 3181 | __wake_requests(mdsc, &mdsc->waiting_for_map); | 3217 | __wake_requests(mdsc, &mdsc->waiting_for_map); |
| 3182 | 3218 | ||
| @@ -3277,7 +3313,7 @@ static int get_authorizer(struct ceph_connection *con, | |||
| 3277 | { | 3313 | { |
| 3278 | struct ceph_mds_session *s = con->private; | 3314 | struct ceph_mds_session *s = con->private; |
| 3279 | struct ceph_mds_client *mdsc = s->s_mdsc; | 3315 | struct ceph_mds_client *mdsc = s->s_mdsc; |
| 3280 | struct ceph_auth_client *ac = mdsc->client->monc.auth; | 3316 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
| 3281 | int ret = 0; | 3317 | int ret = 0; |
| 3282 | 3318 | ||
| 3283 | if (force_new && s->s_authorizer) { | 3319 | if (force_new && s->s_authorizer) { |
| @@ -3311,7 +3347,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) | |||
| 3311 | { | 3347 | { |
| 3312 | struct ceph_mds_session *s = con->private; | 3348 | struct ceph_mds_session *s = con->private; |
| 3313 | struct ceph_mds_client *mdsc = s->s_mdsc; | 3349 | struct ceph_mds_client *mdsc = s->s_mdsc; |
| 3314 | struct ceph_auth_client *ac = mdsc->client->monc.auth; | 3350 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
| 3315 | 3351 | ||
| 3316 | return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); | 3352 | return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); |
| 3317 | } | 3353 | } |
| @@ -3320,12 +3356,12 @@ static int invalidate_authorizer(struct ceph_connection *con) | |||
| 3320 | { | 3356 | { |
| 3321 | struct ceph_mds_session *s = con->private; | 3357 | struct ceph_mds_session *s = con->private; |
| 3322 | struct ceph_mds_client *mdsc = s->s_mdsc; | 3358 | struct ceph_mds_client *mdsc = s->s_mdsc; |
| 3323 | struct ceph_auth_client *ac = mdsc->client->monc.auth; | 3359 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
| 3324 | 3360 | ||
| 3325 | if (ac->ops->invalidate_authorizer) | 3361 | if (ac->ops->invalidate_authorizer) |
| 3326 | ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); | 3362 | ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
| 3327 | 3363 | ||
| 3328 | return ceph_monc_validate_auth(&mdsc->client->monc); | 3364 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
| 3329 | } | 3365 | } |
| 3330 | 3366 | ||
| 3331 | static const struct ceph_connection_operations mds_con_ops = { | 3367 | static const struct ceph_connection_operations mds_con_ops = { |
| @@ -3338,7 +3374,4 @@ static const struct ceph_connection_operations mds_con_ops = { | |||
| 3338 | .peer_reset = peer_reset, | 3374 | .peer_reset = peer_reset, |
| 3339 | }; | 3375 | }; |
| 3340 | 3376 | ||
| 3341 | |||
| 3342 | |||
| 3343 | |||
| 3344 | /* eof */ | 3377 | /* eof */ |
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index c98267ce6d2a..d66d63c72355 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
| @@ -8,9 +8,9 @@ | |||
| 8 | #include <linux/rbtree.h> | 8 | #include <linux/rbtree.h> |
| 9 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
| 10 | 10 | ||
| 11 | #include "types.h" | 11 | #include <linux/ceph/types.h> |
| 12 | #include "messenger.h" | 12 | #include <linux/ceph/messenger.h> |
| 13 | #include "mdsmap.h" | 13 | #include <linux/ceph/mdsmap.h> |
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * Some lock dependencies: | 16 | * Some lock dependencies: |
| @@ -26,7 +26,7 @@ | |||
| 26 | * | 26 | * |
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | struct ceph_client; | 29 | struct ceph_fs_client; |
| 30 | struct ceph_cap; | 30 | struct ceph_cap; |
| 31 | 31 | ||
| 32 | /* | 32 | /* |
| @@ -230,7 +230,7 @@ struct ceph_mds_request { | |||
| 230 | * mds client state | 230 | * mds client state |
| 231 | */ | 231 | */ |
| 232 | struct ceph_mds_client { | 232 | struct ceph_mds_client { |
| 233 | struct ceph_client *client; | 233 | struct ceph_fs_client *fsc; |
| 234 | struct mutex mutex; /* all nested structures */ | 234 | struct mutex mutex; /* all nested structures */ |
| 235 | 235 | ||
| 236 | struct ceph_mdsmap *mdsmap; | 236 | struct ceph_mdsmap *mdsmap; |
| @@ -289,11 +289,6 @@ struct ceph_mds_client { | |||
| 289 | int caps_avail_count; /* unused, unreserved */ | 289 | int caps_avail_count; /* unused, unreserved */ |
| 290 | int caps_min_count; /* keep at least this many | 290 | int caps_min_count; /* keep at least this many |
| 291 | (unreserved) */ | 291 | (unreserved) */ |
| 292 | |||
| 293 | #ifdef CONFIG_DEBUG_FS | ||
| 294 | struct dentry *debugfs_file; | ||
| 295 | #endif | ||
| 296 | |||
| 297 | spinlock_t dentry_lru_lock; | 292 | spinlock_t dentry_lru_lock; |
| 298 | struct list_head dentry_lru; | 293 | struct list_head dentry_lru; |
| 299 | int num_dentry; | 294 | int num_dentry; |
| @@ -316,10 +311,9 @@ extern void ceph_put_mds_session(struct ceph_mds_session *s); | |||
| 316 | extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, | 311 | extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, |
| 317 | struct ceph_msg *msg, int mds); | 312 | struct ceph_msg *msg, int mds); |
| 318 | 313 | ||
| 319 | extern int ceph_mdsc_init(struct ceph_mds_client *mdsc, | 314 | extern int ceph_mdsc_init(struct ceph_fs_client *fsc); |
| 320 | struct ceph_client *client); | ||
| 321 | extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); | 315 | extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); |
| 322 | extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc); | 316 | extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc); |
| 323 | 317 | ||
| 324 | extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); | 318 | extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); |
| 325 | 319 | ||
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 040be6d1150b..73b7d44e8a35 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/bug.h> | 3 | #include <linux/bug.h> |
| 4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| @@ -6,9 +6,9 @@ | |||
| 6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
| 7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
| 8 | 8 | ||
| 9 | #include "mdsmap.h" | 9 | #include <linux/ceph/mdsmap.h> |
| 10 | #include "messenger.h" | 10 | #include <linux/ceph/messenger.h> |
| 11 | #include "decode.h" | 11 | #include <linux/ceph/decode.h> |
| 12 | 12 | ||
| 13 | #include "super.h" | 13 | #include "super.h" |
| 14 | 14 | ||
| @@ -117,7 +117,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) | |||
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n", | 119 | dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n", |
| 120 | i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr), | 120 | i+1, n, global_id, mds, inc, |
| 121 | ceph_pr_addr(&addr.in_addr), | ||
| 121 | ceph_mds_state_name(state)); | 122 | ceph_mds_state_name(state)); |
| 122 | if (mds >= 0 && mds < m->m_max_mds && state > 0) { | 123 | if (mds >= 0 && mds < m->m_max_mds && state > 0) { |
| 123 | m->m_info[mds].global_id = global_id; | 124 | m->m_info[mds].global_id = global_id; |
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c deleted file mode 100644 index 46a368b6dce5..000000000000 --- a/fs/ceph/pagelist.c +++ /dev/null | |||
| @@ -1,63 +0,0 @@ | |||
| 1 | |||
| 2 | #include <linux/gfp.h> | ||
| 3 | #include <linux/pagemap.h> | ||
| 4 | #include <linux/highmem.h> | ||
| 5 | |||
| 6 | #include "pagelist.h" | ||
| 7 | |||
| 8 | static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) | ||
| 9 | { | ||
| 10 | struct page *page = list_entry(pl->head.prev, struct page, | ||
| 11 | lru); | ||
| 12 | kunmap(page); | ||
| 13 | } | ||
| 14 | |||
| 15 | int ceph_pagelist_release(struct ceph_pagelist *pl) | ||
| 16 | { | ||
| 17 | if (pl->mapped_tail) | ||
| 18 | ceph_pagelist_unmap_tail(pl); | ||
| 19 | |||
| 20 | while (!list_empty(&pl->head)) { | ||
| 21 | struct page *page = list_first_entry(&pl->head, struct page, | ||
| 22 | lru); | ||
| 23 | list_del(&page->lru); | ||
| 24 | __free_page(page); | ||
| 25 | } | ||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | |||
| 29 | static int ceph_pagelist_addpage(struct ceph_pagelist *pl) | ||
| 30 | { | ||
| 31 | struct page *page = __page_cache_alloc(GFP_NOFS); | ||
| 32 | if (!page) | ||
| 33 | return -ENOMEM; | ||
| 34 | pl->room += PAGE_SIZE; | ||
| 35 | list_add_tail(&page->lru, &pl->head); | ||
| 36 | if (pl->mapped_tail) | ||
| 37 | ceph_pagelist_unmap_tail(pl); | ||
| 38 | pl->mapped_tail = kmap(page); | ||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | int ceph_pagelist_append(struct ceph_pagelist *pl, void *buf, size_t len) | ||
| 43 | { | ||
| 44 | while (pl->room < len) { | ||
| 45 | size_t bit = pl->room; | ||
| 46 | int ret; | ||
| 47 | |||
| 48 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), | ||
| 49 | buf, bit); | ||
| 50 | pl->length += bit; | ||
| 51 | pl->room -= bit; | ||
| 52 | buf += bit; | ||
| 53 | len -= bit; | ||
| 54 | ret = ceph_pagelist_addpage(pl); | ||
| 55 | if (ret) | ||
| 56 | return ret; | ||
| 57 | } | ||
| 58 | |||
| 59 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); | ||
| 60 | pl->length += len; | ||
| 61 | pl->room -= len; | ||
| 62 | return 0; | ||
| 63 | } | ||
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 190b6c4a6f2b..39c243acd062 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
| @@ -1,10 +1,12 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/sort.h> | 3 | #include <linux/sort.h> |
| 4 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
| 5 | 5 | ||
| 6 | #include "super.h" | 6 | #include "super.h" |
| 7 | #include "decode.h" | 7 | #include "mds_client.h" |
| 8 | |||
| 9 | #include <linux/ceph/decode.h> | ||
| 8 | 10 | ||
| 9 | /* | 11 | /* |
| 10 | * Snapshots in ceph are driven in large part by cooperation from the | 12 | * Snapshots in ceph are driven in large part by cooperation from the |
| @@ -526,7 +528,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, | |||
| 526 | struct ceph_cap_snap *capsnap) | 528 | struct ceph_cap_snap *capsnap) |
| 527 | { | 529 | { |
| 528 | struct inode *inode = &ci->vfs_inode; | 530 | struct inode *inode = &ci->vfs_inode; |
| 529 | struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; | 531 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; |
| 530 | 532 | ||
| 531 | BUG_ON(capsnap->writing); | 533 | BUG_ON(capsnap->writing); |
| 532 | capsnap->size = inode->i_size; | 534 | capsnap->size = inode->i_size; |
| @@ -747,7 +749,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |||
| 747 | struct ceph_mds_session *session, | 749 | struct ceph_mds_session *session, |
| 748 | struct ceph_msg *msg) | 750 | struct ceph_msg *msg) |
| 749 | { | 751 | { |
| 750 | struct super_block *sb = mdsc->client->sb; | 752 | struct super_block *sb = mdsc->fsc->sb; |
| 751 | int mds = session->s_mds; | 753 | int mds = session->s_mds; |
| 752 | u64 split; | 754 | u64 split; |
| 753 | int op; | 755 | int op; |
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/strings.c index c6179d3a26a2..cd5097d7c804 100644 --- a/fs/ceph/ceph_strings.c +++ b/fs/ceph/strings.c | |||
| @@ -1,71 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Ceph string constants | 2 | * Ceph fs string constants |
| 3 | */ | 3 | */ |
| 4 | #include "types.h" | 4 | #include <linux/module.h> |
| 5 | #include <linux/ceph/types.h> | ||
| 5 | 6 | ||
| 6 | const char *ceph_entity_type_name(int type) | ||
| 7 | { | ||
| 8 | switch (type) { | ||
| 9 | case CEPH_ENTITY_TYPE_MDS: return "mds"; | ||
| 10 | case CEPH_ENTITY_TYPE_OSD: return "osd"; | ||
| 11 | case CEPH_ENTITY_TYPE_MON: return "mon"; | ||
| 12 | case CEPH_ENTITY_TYPE_CLIENT: return "client"; | ||
| 13 | case CEPH_ENTITY_TYPE_AUTH: return "auth"; | ||
| 14 | default: return "unknown"; | ||
| 15 | } | ||
| 16 | } | ||
| 17 | |||
| 18 | const char *ceph_osd_op_name(int op) | ||
| 19 | { | ||
| 20 | switch (op) { | ||
| 21 | case CEPH_OSD_OP_READ: return "read"; | ||
| 22 | case CEPH_OSD_OP_STAT: return "stat"; | ||
| 23 | |||
| 24 | case CEPH_OSD_OP_MASKTRUNC: return "masktrunc"; | ||
| 25 | |||
| 26 | case CEPH_OSD_OP_WRITE: return "write"; | ||
| 27 | case CEPH_OSD_OP_DELETE: return "delete"; | ||
| 28 | case CEPH_OSD_OP_TRUNCATE: return "truncate"; | ||
| 29 | case CEPH_OSD_OP_ZERO: return "zero"; | ||
| 30 | case CEPH_OSD_OP_WRITEFULL: return "writefull"; | ||
| 31 | case CEPH_OSD_OP_ROLLBACK: return "rollback"; | ||
| 32 | |||
| 33 | case CEPH_OSD_OP_APPEND: return "append"; | ||
| 34 | case CEPH_OSD_OP_STARTSYNC: return "startsync"; | ||
| 35 | case CEPH_OSD_OP_SETTRUNC: return "settrunc"; | ||
| 36 | case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc"; | ||
| 37 | |||
| 38 | case CEPH_OSD_OP_TMAPUP: return "tmapup"; | ||
| 39 | case CEPH_OSD_OP_TMAPGET: return "tmapget"; | ||
| 40 | case CEPH_OSD_OP_TMAPPUT: return "tmapput"; | ||
| 41 | |||
| 42 | case CEPH_OSD_OP_GETXATTR: return "getxattr"; | ||
| 43 | case CEPH_OSD_OP_GETXATTRS: return "getxattrs"; | ||
| 44 | case CEPH_OSD_OP_SETXATTR: return "setxattr"; | ||
| 45 | case CEPH_OSD_OP_SETXATTRS: return "setxattrs"; | ||
| 46 | case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs"; | ||
| 47 | case CEPH_OSD_OP_RMXATTR: return "rmxattr"; | ||
| 48 | case CEPH_OSD_OP_CMPXATTR: return "cmpxattr"; | ||
| 49 | |||
| 50 | case CEPH_OSD_OP_PULL: return "pull"; | ||
| 51 | case CEPH_OSD_OP_PUSH: return "push"; | ||
| 52 | case CEPH_OSD_OP_BALANCEREADS: return "balance-reads"; | ||
| 53 | case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads"; | ||
| 54 | case CEPH_OSD_OP_SCRUB: return "scrub"; | ||
| 55 | |||
| 56 | case CEPH_OSD_OP_WRLOCK: return "wrlock"; | ||
| 57 | case CEPH_OSD_OP_WRUNLOCK: return "wrunlock"; | ||
| 58 | case CEPH_OSD_OP_RDLOCK: return "rdlock"; | ||
| 59 | case CEPH_OSD_OP_RDUNLOCK: return "rdunlock"; | ||
| 60 | case CEPH_OSD_OP_UPLOCK: return "uplock"; | ||
| 61 | case CEPH_OSD_OP_DNLOCK: return "dnlock"; | ||
| 62 | |||
| 63 | case CEPH_OSD_OP_CALL: return "call"; | ||
| 64 | |||
| 65 | case CEPH_OSD_OP_PGLS: return "pgls"; | ||
| 66 | } | ||
| 67 | return "???"; | ||
| 68 | } | ||
| 69 | 7 | ||
| 70 | const char *ceph_mds_state_name(int s) | 8 | const char *ceph_mds_state_name(int s) |
| 71 | { | 9 | { |
| @@ -177,17 +115,3 @@ const char *ceph_snap_op_name(int o) | |||
| 177 | } | 115 | } |
| 178 | return "???"; | 116 | return "???"; |
| 179 | } | 117 | } |
| 180 | |||
| 181 | const char *ceph_pool_op_name(int op) | ||
| 182 | { | ||
| 183 | switch (op) { | ||
| 184 | case POOL_OP_CREATE: return "create"; | ||
| 185 | case POOL_OP_DELETE: return "delete"; | ||
| 186 | case POOL_OP_AUID_CHANGE: return "auid change"; | ||
| 187 | case POOL_OP_CREATE_SNAP: return "create snap"; | ||
| 188 | case POOL_OP_DELETE_SNAP: return "delete snap"; | ||
| 189 | case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap"; | ||
| 190 | case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap"; | ||
| 191 | } | ||
| 192 | return "???"; | ||
| 193 | } | ||
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 9922628532b2..d6e0e0421891 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | 1 | ||
| 2 | #include "ceph_debug.h" | 2 | #include <linux/ceph/ceph_debug.h> |
| 3 | 3 | ||
| 4 | #include <linux/backing-dev.h> | 4 | #include <linux/backing-dev.h> |
| 5 | #include <linux/ctype.h> | 5 | #include <linux/ctype.h> |
| @@ -15,10 +15,13 @@ | |||
| 15 | #include <linux/statfs.h> | 15 | #include <linux/statfs.h> |
| 16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 17 | 17 | ||
| 18 | #include "decode.h" | ||
| 19 | #include "super.h" | 18 | #include "super.h" |
| 20 | #include "mon_client.h" | 19 | #include "mds_client.h" |
| 21 | #include "auth.h" | 20 | |
| 21 | #include <linux/ceph/decode.h> | ||
| 22 | #include <linux/ceph/mon_client.h> | ||
| 23 | #include <linux/ceph/auth.h> | ||
| 24 | #include <linux/ceph/debugfs.h> | ||
| 22 | 25 | ||
| 23 | /* | 26 | /* |
| 24 | * Ceph superblock operations | 27 | * Ceph superblock operations |
| @@ -26,36 +29,22 @@ | |||
| 26 | * Handle the basics of mounting, unmounting. | 29 | * Handle the basics of mounting, unmounting. |
| 27 | */ | 30 | */ |
| 28 | 31 | ||
| 29 | |||
| 30 | /* | ||
| 31 | * find filename portion of a path (/foo/bar/baz -> baz) | ||
| 32 | */ | ||
| 33 | const char *ceph_file_part(const char *s, int len) | ||
| 34 | { | ||
| 35 | const char *e = s + len; | ||
| 36 | |||
| 37 | while (e != s && *(e-1) != '/') | ||
| 38 | e--; | ||
| 39 | return e; | ||
| 40 | } | ||
| 41 | |||
| 42 | |||
| 43 | /* | 32 | /* |
| 44 | * super ops | 33 | * super ops |
| 45 | */ | 34 | */ |
| 46 | static void ceph_put_super(struct super_block *s) | 35 | static void ceph_put_super(struct super_block *s) |
| 47 | { | 36 | { |
| 48 | struct ceph_client *client = ceph_sb_to_client(s); | 37 | struct ceph_fs_client *fsc = ceph_sb_to_client(s); |
| 49 | 38 | ||
| 50 | dout("put_super\n"); | 39 | dout("put_super\n"); |
| 51 | ceph_mdsc_close_sessions(&client->mdsc); | 40 | ceph_mdsc_close_sessions(fsc->mdsc); |
| 52 | 41 | ||
| 53 | /* | 42 | /* |
| 54 | * ensure we release the bdi before put_anon_super releases | 43 | * ensure we release the bdi before put_anon_super releases |
| 55 | * the device name. | 44 | * the device name. |
| 56 | */ | 45 | */ |
| 57 | if (s->s_bdi == &client->backing_dev_info) { | 46 | if (s->s_bdi == &fsc->backing_dev_info) { |
| 58 | bdi_unregister(&client->backing_dev_info); | 47 | bdi_unregister(&fsc->backing_dev_info); |
| 59 | s->s_bdi = NULL; | 48 | s->s_bdi = NULL; |
| 60 | } | 49 | } |
| 61 | 50 | ||
| @@ -64,14 +53,14 @@ static void ceph_put_super(struct super_block *s) | |||
| 64 | 53 | ||
| 65 | static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) | 54 | static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) |
| 66 | { | 55 | { |
| 67 | struct ceph_client *client = ceph_inode_to_client(dentry->d_inode); | 56 | struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode); |
| 68 | struct ceph_monmap *monmap = client->monc.monmap; | 57 | struct ceph_monmap *monmap = fsc->client->monc.monmap; |
| 69 | struct ceph_statfs st; | 58 | struct ceph_statfs st; |
| 70 | u64 fsid; | 59 | u64 fsid; |
| 71 | int err; | 60 | int err; |
| 72 | 61 | ||
| 73 | dout("statfs\n"); | 62 | dout("statfs\n"); |
| 74 | err = ceph_monc_do_statfs(&client->monc, &st); | 63 | err = ceph_monc_do_statfs(&fsc->client->monc, &st); |
| 75 | if (err < 0) | 64 | if (err < 0) |
| 76 | return err; | 65 | return err; |
| 77 | 66 | ||
| @@ -104,238 +93,28 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 104 | 93 | ||
| 105 | static int ceph_sync_fs(struct super_block *sb, int wait) | 94 | static int ceph_sync_fs(struct super_block *sb, int wait) |
| 106 | { | 95 | { |
| 107 | struct ceph_client *client = ceph_sb_to_client(sb); | 96 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
| 108 | 97 | ||
| 109 | if (!wait) { | 98 | if (!wait) { |
| 110 | dout("sync_fs (non-blocking)\n"); | 99 | dout("sync_fs (non-blocking)\n"); |
| 111 | ceph_flush_dirty_caps(&client->mdsc); | 100 | ceph_flush_dirty_caps(fsc->mdsc); |
| 112 | dout("sync_fs (non-blocking) done\n"); | 101 | dout("sync_fs (non-blocking) done\n"); |
| 113 | return 0; | 102 | return 0; |
| 114 | } | 103 | } |
| 115 | 104 | ||
| 116 | dout("sync_fs (blocking)\n"); | 105 | dout("sync_fs (blocking)\n"); |
| 117 | ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc); | 106 | ceph_osdc_sync(&fsc->client->osdc); |
| 118 | ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc); | 107 | ceph_mdsc_sync(fsc->mdsc); |
| 119 | dout("sync_fs (blocking) done\n"); | 108 | dout("sync_fs (blocking) done\n"); |
| 120 | return 0; | 109 | return 0; |
| 121 | } | 110 | } |
| 122 | 111 | ||
| 123 | static int default_congestion_kb(void) | ||
| 124 | { | ||
| 125 | int congestion_kb; | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Copied from NFS | ||
| 129 | * | ||
| 130 | * congestion size, scale with available memory. | ||
| 131 | * | ||
| 132 | * 64MB: 8192k | ||
| 133 | * 128MB: 11585k | ||
| 134 | * 256MB: 16384k | ||
| 135 | * 512MB: 23170k | ||
| 136 | * 1GB: 32768k | ||
| 137 | * 2GB: 46340k | ||
| 138 | * 4GB: 65536k | ||
| 139 | * 8GB: 92681k | ||
| 140 | * 16GB: 131072k | ||
| 141 | * | ||
| 142 | * This allows larger machines to have larger/more transfers. | ||
| 143 | * Limit the default to 256M | ||
| 144 | */ | ||
| 145 | congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); | ||
| 146 | if (congestion_kb > 256*1024) | ||
| 147 | congestion_kb = 256*1024; | ||
| 148 | |||
| 149 | return congestion_kb; | ||
| 150 | } | ||
| 151 | |||
| 152 | /** | ||
| 153 | * ceph_show_options - Show mount options in /proc/mounts | ||
| 154 | * @m: seq_file to write to | ||
| 155 | * @mnt: mount descriptor | ||
| 156 | */ | ||
| 157 | static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt) | ||
| 158 | { | ||
| 159 | struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb); | ||
| 160 | struct ceph_mount_args *args = client->mount_args; | ||
| 161 | |||
| 162 | if (args->flags & CEPH_OPT_FSID) | ||
| 163 | seq_printf(m, ",fsid=%pU", &args->fsid); | ||
| 164 | if (args->flags & CEPH_OPT_NOSHARE) | ||
| 165 | seq_puts(m, ",noshare"); | ||
| 166 | if (args->flags & CEPH_OPT_DIRSTAT) | ||
| 167 | seq_puts(m, ",dirstat"); | ||
| 168 | if ((args->flags & CEPH_OPT_RBYTES) == 0) | ||
| 169 | seq_puts(m, ",norbytes"); | ||
| 170 | if (args->flags & CEPH_OPT_NOCRC) | ||
| 171 | seq_puts(m, ",nocrc"); | ||
| 172 | if (args->flags & CEPH_OPT_NOASYNCREADDIR) | ||
| 173 | seq_puts(m, ",noasyncreaddir"); | ||
| 174 | |||
| 175 | if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) | ||
| 176 | seq_printf(m, ",mount_timeout=%d", args->mount_timeout); | ||
| 177 | if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) | ||
| 178 | seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl); | ||
| 179 | if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT) | ||
| 180 | seq_printf(m, ",osdtimeout=%d", args->osd_timeout); | ||
| 181 | if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) | ||
| 182 | seq_printf(m, ",osdkeepalivetimeout=%d", | ||
| 183 | args->osd_keepalive_timeout); | ||
| 184 | if (args->wsize) | ||
| 185 | seq_printf(m, ",wsize=%d", args->wsize); | ||
| 186 | if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT) | ||
| 187 | seq_printf(m, ",rsize=%d", args->rsize); | ||
| 188 | if (args->congestion_kb != default_congestion_kb()) | ||
| 189 | seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb); | ||
| 190 | if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT) | ||
| 191 | seq_printf(m, ",caps_wanted_delay_min=%d", | ||
| 192 | args->caps_wanted_delay_min); | ||
| 193 | if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT) | ||
| 194 | seq_printf(m, ",caps_wanted_delay_max=%d", | ||
| 195 | args->caps_wanted_delay_max); | ||
| 196 | if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT) | ||
| 197 | seq_printf(m, ",cap_release_safety=%d", | ||
| 198 | args->cap_release_safety); | ||
| 199 | if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT) | ||
| 200 | seq_printf(m, ",readdir_max_entries=%d", args->max_readdir); | ||
| 201 | if (args->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT) | ||
| 202 | seq_printf(m, ",readdir_max_bytes=%d", args->max_readdir_bytes); | ||
| 203 | if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) | ||
| 204 | seq_printf(m, ",snapdirname=%s", args->snapdir_name); | ||
| 205 | if (args->name) | ||
| 206 | seq_printf(m, ",name=%s", args->name); | ||
| 207 | if (args->secret) | ||
| 208 | seq_puts(m, ",secret=<hidden>"); | ||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | /* | ||
| 213 | * caches | ||
| 214 | */ | ||
| 215 | struct kmem_cache *ceph_inode_cachep; | ||
| 216 | struct kmem_cache *ceph_cap_cachep; | ||
| 217 | struct kmem_cache *ceph_dentry_cachep; | ||
| 218 | struct kmem_cache *ceph_file_cachep; | ||
| 219 | |||
| 220 | static void ceph_inode_init_once(void *foo) | ||
| 221 | { | ||
| 222 | struct ceph_inode_info *ci = foo; | ||
| 223 | inode_init_once(&ci->vfs_inode); | ||
| 224 | } | ||
| 225 | |||
| 226 | static int __init init_caches(void) | ||
| 227 | { | ||
| 228 | ceph_inode_cachep = kmem_cache_create("ceph_inode_info", | ||
| 229 | sizeof(struct ceph_inode_info), | ||
| 230 | __alignof__(struct ceph_inode_info), | ||
| 231 | (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), | ||
| 232 | ceph_inode_init_once); | ||
| 233 | if (ceph_inode_cachep == NULL) | ||
| 234 | return -ENOMEM; | ||
| 235 | |||
| 236 | ceph_cap_cachep = KMEM_CACHE(ceph_cap, | ||
| 237 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); | ||
| 238 | if (ceph_cap_cachep == NULL) | ||
| 239 | goto bad_cap; | ||
| 240 | |||
| 241 | ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info, | ||
| 242 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); | ||
| 243 | if (ceph_dentry_cachep == NULL) | ||
| 244 | goto bad_dentry; | ||
| 245 | |||
| 246 | ceph_file_cachep = KMEM_CACHE(ceph_file_info, | ||
| 247 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); | ||
| 248 | if (ceph_file_cachep == NULL) | ||
| 249 | goto bad_file; | ||
| 250 | |||
| 251 | return 0; | ||
| 252 | |||
| 253 | bad_file: | ||
| 254 | kmem_cache_destroy(ceph_dentry_cachep); | ||
| 255 | bad_dentry: | ||
| 256 | kmem_cache_destroy(ceph_cap_cachep); | ||
| 257 | bad_cap: | ||
| 258 | kmem_cache_destroy(ceph_inode_cachep); | ||
| 259 | return -ENOMEM; | ||
| 260 | } | ||
| 261 | |||
| 262 | static void destroy_caches(void) | ||
| 263 | { | ||
| 264 | kmem_cache_destroy(ceph_inode_cachep); | ||
| 265 | kmem_cache_destroy(ceph_cap_cachep); | ||
| 266 | kmem_cache_destroy(ceph_dentry_cachep); | ||
| 267 | kmem_cache_destroy(ceph_file_cachep); | ||
| 268 | } | ||
| 269 | |||
| 270 | |||
| 271 | /* | ||
| 272 | * ceph_umount_begin - initiate forced umount. Tear down down the | ||
| 273 | * mount, skipping steps that may hang while waiting for server(s). | ||
| 274 | */ | ||
| 275 | static void ceph_umount_begin(struct super_block *sb) | ||
| 276 | { | ||
| 277 | struct ceph_client *client = ceph_sb_to_client(sb); | ||
| 278 | |||
| 279 | dout("ceph_umount_begin - starting forced umount\n"); | ||
| 280 | if (!client) | ||
| 281 | return; | ||
| 282 | client->mount_state = CEPH_MOUNT_SHUTDOWN; | ||
| 283 | return; | ||
| 284 | } | ||
| 285 | |||
| 286 | static const struct super_operations ceph_super_ops = { | ||
| 287 | .alloc_inode = ceph_alloc_inode, | ||
| 288 | .destroy_inode = ceph_destroy_inode, | ||
| 289 | .write_inode = ceph_write_inode, | ||
| 290 | .sync_fs = ceph_sync_fs, | ||
| 291 | .put_super = ceph_put_super, | ||
| 292 | .show_options = ceph_show_options, | ||
| 293 | .statfs = ceph_statfs, | ||
| 294 | .umount_begin = ceph_umount_begin, | ||
| 295 | }; | ||
| 296 | |||
| 297 | |||
| 298 | const char *ceph_msg_type_name(int type) | ||
| 299 | { | ||
| 300 | switch (type) { | ||
| 301 | case CEPH_MSG_SHUTDOWN: return "shutdown"; | ||
| 302 | case CEPH_MSG_PING: return "ping"; | ||
| 303 | case CEPH_MSG_AUTH: return "auth"; | ||
| 304 | case CEPH_MSG_AUTH_REPLY: return "auth_reply"; | ||
| 305 | case CEPH_MSG_MON_MAP: return "mon_map"; | ||
| 306 | case CEPH_MSG_MON_GET_MAP: return "mon_get_map"; | ||
| 307 | case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe"; | ||
| 308 | case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack"; | ||
| 309 | case CEPH_MSG_STATFS: return "statfs"; | ||
| 310 | case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; | ||
| 311 | case CEPH_MSG_MDS_MAP: return "mds_map"; | ||
| 312 | case CEPH_MSG_CLIENT_SESSION: return "client_session"; | ||
| 313 | case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect"; | ||
| 314 | case CEPH_MSG_CLIENT_REQUEST: return "client_request"; | ||
| 315 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward"; | ||
| 316 | case CEPH_MSG_CLIENT_REPLY: return "client_reply"; | ||
| 317 | case CEPH_MSG_CLIENT_CAPS: return "client_caps"; | ||
| 318 | case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release"; | ||
| 319 | case CEPH_MSG_CLIENT_SNAP: return "client_snap"; | ||
| 320 | case CEPH_MSG_CLIENT_LEASE: return "client_lease"; | ||
| 321 | case CEPH_MSG_OSD_MAP: return "osd_map"; | ||
| 322 | case CEPH_MSG_OSD_OP: return "osd_op"; | ||
| 323 | case CEPH_MSG_OSD_OPREPLY: return "osd_opreply"; | ||
| 324 | default: return "unknown"; | ||
| 325 | } | ||
| 326 | } | ||
| 327 | |||
| 328 | |||
| 329 | /* | 112 | /* |
| 330 | * mount options | 113 | * mount options |
| 331 | */ | 114 | */ |
| 332 | enum { | 115 | enum { |
| 333 | Opt_wsize, | 116 | Opt_wsize, |
| 334 | Opt_rsize, | 117 | Opt_rsize, |
| 335 | Opt_osdtimeout, | ||
| 336 | Opt_osdkeepalivetimeout, | ||
| 337 | Opt_mount_timeout, | ||
| 338 | Opt_osd_idle_ttl, | ||
| 339 | Opt_caps_wanted_delay_min, | 118 | Opt_caps_wanted_delay_min, |
| 340 | Opt_caps_wanted_delay_max, | 119 | Opt_caps_wanted_delay_max, |
| 341 | Opt_cap_release_safety, | 120 | Opt_cap_release_safety, |
| @@ -344,29 +123,19 @@ enum { | |||
| 344 | Opt_congestion_kb, | 123 | Opt_congestion_kb, |
| 345 | Opt_last_int, | 124 | Opt_last_int, |
| 346 | /* int args above */ | 125 | /* int args above */ |
| 347 | Opt_fsid, | ||
| 348 | Opt_snapdirname, | 126 | Opt_snapdirname, |
| 349 | Opt_name, | ||
| 350 | Opt_secret, | ||
| 351 | Opt_last_string, | 127 | Opt_last_string, |
| 352 | /* string args above */ | 128 | /* string args above */ |
| 353 | Opt_ip, | ||
| 354 | Opt_noshare, | ||
| 355 | Opt_dirstat, | 129 | Opt_dirstat, |
| 356 | Opt_nodirstat, | 130 | Opt_nodirstat, |
| 357 | Opt_rbytes, | 131 | Opt_rbytes, |
| 358 | Opt_norbytes, | 132 | Opt_norbytes, |
| 359 | Opt_nocrc, | ||
| 360 | Opt_noasyncreaddir, | 133 | Opt_noasyncreaddir, |
| 361 | }; | 134 | }; |
| 362 | 135 | ||
| 363 | static match_table_t arg_tokens = { | 136 | static match_table_t fsopt_tokens = { |
| 364 | {Opt_wsize, "wsize=%d"}, | 137 | {Opt_wsize, "wsize=%d"}, |
| 365 | {Opt_rsize, "rsize=%d"}, | 138 | {Opt_rsize, "rsize=%d"}, |
| 366 | {Opt_osdtimeout, "osdtimeout=%d"}, | ||
| 367 | {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, | ||
| 368 | {Opt_mount_timeout, "mount_timeout=%d"}, | ||
| 369 | {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, | ||
| 370 | {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, | 139 | {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, |
| 371 | {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, | 140 | {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, |
| 372 | {Opt_cap_release_safety, "cap_release_safety=%d"}, | 141 | {Opt_cap_release_safety, "cap_release_safety=%d"}, |
| @@ -374,403 +143,459 @@ static match_table_t arg_tokens = { | |||
| 374 | {Opt_readdir_max_bytes, "readdir_max_bytes=%d"}, | 143 | {Opt_readdir_max_bytes, "readdir_max_bytes=%d"}, |
| 375 | {Opt_congestion_kb, "write_congestion_kb=%d"}, | 144 | {Opt_congestion_kb, "write_congestion_kb=%d"}, |
| 376 | /* int args above */ | 145 | /* int args above */ |
| 377 | {Opt_fsid, "fsid=%s"}, | ||
| 378 | {Opt_snapdirname, "snapdirname=%s"}, | 146 | {Opt_snapdirname, "snapdirname=%s"}, |
| 379 | {Opt_name, "name=%s"}, | ||
| 380 | {Opt_secret, "secret=%s"}, | ||
| 381 | /* string args above */ | 147 | /* string args above */ |
| 382 | {Opt_ip, "ip=%s"}, | ||
| 383 | {Opt_noshare, "noshare"}, | ||
| 384 | {Opt_dirstat, "dirstat"}, | 148 | {Opt_dirstat, "dirstat"}, |
| 385 | {Opt_nodirstat, "nodirstat"}, | 149 | {Opt_nodirstat, "nodirstat"}, |
| 386 | {Opt_rbytes, "rbytes"}, | 150 | {Opt_rbytes, "rbytes"}, |
| 387 | {Opt_norbytes, "norbytes"}, | 151 | {Opt_norbytes, "norbytes"}, |
| 388 | {Opt_nocrc, "nocrc"}, | ||
| 389 | {Opt_noasyncreaddir, "noasyncreaddir"}, | 152 | {Opt_noasyncreaddir, "noasyncreaddir"}, |
| 390 | {-1, NULL} | 153 | {-1, NULL} |
| 391 | }; | 154 | }; |
| 392 | 155 | ||
| 393 | static int parse_fsid(const char *str, struct ceph_fsid *fsid) | 156 | static int parse_fsopt_token(char *c, void *private) |
| 394 | { | 157 | { |
| 395 | int i = 0; | 158 | struct ceph_mount_options *fsopt = private; |
| 396 | char tmp[3]; | 159 | substring_t argstr[MAX_OPT_ARGS]; |
| 397 | int err = -EINVAL; | 160 | int token, intval, ret; |
| 398 | int d; | 161 | |
| 399 | 162 | token = match_token((char *)c, fsopt_tokens, argstr); | |
| 400 | dout("parse_fsid '%s'\n", str); | 163 | if (token < 0) |
| 401 | tmp[2] = 0; | 164 | return -EINVAL; |
| 402 | while (*str && i < 16) { | 165 | |
| 403 | if (ispunct(*str)) { | 166 | if (token < Opt_last_int) { |
| 404 | str++; | 167 | ret = match_int(&argstr[0], &intval); |
| 405 | continue; | 168 | if (ret < 0) { |
| 169 | pr_err("bad mount option arg (not int) " | ||
| 170 | "at '%s'\n", c); | ||
| 171 | return ret; | ||
| 406 | } | 172 | } |
| 407 | if (!isxdigit(str[0]) || !isxdigit(str[1])) | 173 | dout("got int token %d val %d\n", token, intval); |
| 408 | break; | 174 | } else if (token > Opt_last_int && token < Opt_last_string) { |
| 409 | tmp[0] = str[0]; | 175 | dout("got string token %d val %s\n", token, |
| 410 | tmp[1] = str[1]; | 176 | argstr[0].from); |
| 411 | if (sscanf(tmp, "%x", &d) < 1) | 177 | } else { |
| 412 | break; | 178 | dout("got token %d\n", token); |
| 413 | fsid->fsid[i] = d & 0xff; | ||
| 414 | i++; | ||
| 415 | str += 2; | ||
| 416 | } | 179 | } |
| 417 | 180 | ||
| 418 | if (i == 16) | 181 | switch (token) { |
| 419 | err = 0; | 182 | case Opt_snapdirname: |
| 420 | dout("parse_fsid ret %d got fsid %pU", err, fsid); | 183 | kfree(fsopt->snapdir_name); |
| 421 | return err; | 184 | fsopt->snapdir_name = kstrndup(argstr[0].from, |
| 185 | argstr[0].to-argstr[0].from, | ||
| 186 | GFP_KERNEL); | ||
| 187 | if (!fsopt->snapdir_name) | ||
| 188 | return -ENOMEM; | ||
| 189 | break; | ||
| 190 | |||
| 191 | /* misc */ | ||
| 192 | case Opt_wsize: | ||
| 193 | fsopt->wsize = intval; | ||
| 194 | break; | ||
| 195 | case Opt_rsize: | ||
| 196 | fsopt->rsize = intval; | ||
| 197 | break; | ||
| 198 | case Opt_caps_wanted_delay_min: | ||
| 199 | fsopt->caps_wanted_delay_min = intval; | ||
| 200 | break; | ||
| 201 | case Opt_caps_wanted_delay_max: | ||
| 202 | fsopt->caps_wanted_delay_max = intval; | ||
| 203 | break; | ||
| 204 | case Opt_readdir_max_entries: | ||
| 205 | fsopt->max_readdir = intval; | ||
| 206 | break; | ||
| 207 | case Opt_readdir_max_bytes: | ||
| 208 | fsopt->max_readdir_bytes = intval; | ||
| 209 | break; | ||
| 210 | case Opt_congestion_kb: | ||
| 211 | fsopt->congestion_kb = intval; | ||
| 212 | break; | ||
| 213 | case Opt_dirstat: | ||
| 214 | fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT; | ||
| 215 | break; | ||
| 216 | case Opt_nodirstat: | ||
| 217 | fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT; | ||
| 218 | break; | ||
| 219 | case Opt_rbytes: | ||
| 220 | fsopt->flags |= CEPH_MOUNT_OPT_RBYTES; | ||
| 221 | break; | ||
| 222 | case Opt_norbytes: | ||
| 223 | fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES; | ||
| 224 | break; | ||
| 225 | case Opt_noasyncreaddir: | ||
| 226 | fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR; | ||
| 227 | break; | ||
| 228 | default: | ||
| 229 | BUG_ON(token); | ||
| 230 | } | ||
| 231 | return 0; | ||
| 422 | } | 232 | } |
| 423 | 233 | ||
| 424 | static struct ceph_mount_args *parse_mount_args(int flags, char *options, | 234 | static void destroy_mount_options(struct ceph_mount_options *args) |
| 425 | const char *dev_name, | ||
| 426 | const char **path) | ||
| 427 | { | 235 | { |
| 428 | struct ceph_mount_args *args; | 236 | dout("destroy_mount_options %p\n", args); |
| 429 | const char *c; | 237 | kfree(args->snapdir_name); |
| 430 | int err = -ENOMEM; | 238 | kfree(args); |
| 431 | substring_t argstr[MAX_OPT_ARGS]; | 239 | } |
| 432 | 240 | ||
| 433 | args = kzalloc(sizeof(*args), GFP_KERNEL); | 241 | static int strcmp_null(const char *s1, const char *s2) |
| 434 | if (!args) | 242 | { |
| 435 | return ERR_PTR(-ENOMEM); | 243 | if (!s1 && !s2) |
| 436 | args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr), | 244 | return 0; |
| 437 | GFP_KERNEL); | 245 | if (s1 && !s2) |
| 438 | if (!args->mon_addr) | 246 | return -1; |
| 439 | goto out; | 247 | if (!s1 && s2) |
| 248 | return 1; | ||
| 249 | return strcmp(s1, s2); | ||
| 250 | } | ||
| 440 | 251 | ||
| 441 | dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name); | 252 | static int compare_mount_options(struct ceph_mount_options *new_fsopt, |
| 442 | 253 | struct ceph_options *new_opt, | |
| 443 | /* start with defaults */ | 254 | struct ceph_fs_client *fsc) |
| 444 | args->sb_flags = flags; | 255 | { |
| 445 | args->flags = CEPH_OPT_DEFAULT; | 256 | struct ceph_mount_options *fsopt1 = new_fsopt; |
| 446 | args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; | 257 | struct ceph_mount_options *fsopt2 = fsc->mount_options; |
| 447 | args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; | 258 | int ofs = offsetof(struct ceph_mount_options, snapdir_name); |
| 448 | args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ | 259 | int ret; |
| 449 | args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ | ||
| 450 | args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; | ||
| 451 | args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; | ||
| 452 | args->rsize = CEPH_MOUNT_RSIZE_DEFAULT; | ||
| 453 | args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); | ||
| 454 | args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT; | ||
| 455 | args->max_readdir = CEPH_MAX_READDIR_DEFAULT; | ||
| 456 | args->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT; | ||
| 457 | args->congestion_kb = default_congestion_kb(); | ||
| 458 | |||
| 459 | /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */ | ||
| 460 | err = -EINVAL; | ||
| 461 | if (!dev_name) | ||
| 462 | goto out; | ||
| 463 | *path = strstr(dev_name, ":/"); | ||
| 464 | if (*path == NULL) { | ||
| 465 | pr_err("device name is missing path (no :/ in %s)\n", | ||
| 466 | dev_name); | ||
| 467 | goto out; | ||
| 468 | } | ||
| 469 | 260 | ||
| 470 | /* get mon ip(s) */ | 261 | ret = memcmp(fsopt1, fsopt2, ofs); |
| 471 | err = ceph_parse_ips(dev_name, *path, args->mon_addr, | 262 | if (ret) |
| 472 | CEPH_MAX_MON, &args->num_mon); | 263 | return ret; |
| 473 | if (err < 0) | 264 | |
| 474 | goto out; | 265 | ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name); |
| 266 | if (ret) | ||
| 267 | return ret; | ||
| 268 | |||
| 269 | return ceph_compare_options(new_opt, fsc->client); | ||
| 270 | } | ||
| 271 | |||
| 272 | static int parse_mount_options(struct ceph_mount_options **pfsopt, | ||
| 273 | struct ceph_options **popt, | ||
| 274 | int flags, char *options, | ||
| 275 | const char *dev_name, | ||
| 276 | const char **path) | ||
| 277 | { | ||
| 278 | struct ceph_mount_options *fsopt; | ||
| 279 | const char *dev_name_end; | ||
| 280 | int err = -ENOMEM; | ||
| 281 | |||
| 282 | fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL); | ||
| 283 | if (!fsopt) | ||
| 284 | return -ENOMEM; | ||
| 285 | |||
| 286 | dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name); | ||
| 287 | |||
| 288 | fsopt->sb_flags = flags; | ||
| 289 | fsopt->flags = CEPH_MOUNT_OPT_DEFAULT; | ||
| 290 | |||
| 291 | fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT; | ||
| 292 | fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); | ||
| 293 | fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT; | ||
| 294 | fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT; | ||
| 295 | fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT; | ||
| 296 | fsopt->congestion_kb = default_congestion_kb(); | ||
| 297 | |||
| 298 | /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */ | ||
| 299 | err = -EINVAL; | ||
| 300 | if (!dev_name) | ||
| 301 | goto out; | ||
| 302 | *path = strstr(dev_name, ":/"); | ||
| 303 | if (*path == NULL) { | ||
| 304 | pr_err("device name is missing path (no :/ in %s)\n", | ||
| 305 | dev_name); | ||
| 306 | goto out; | ||
| 307 | } | ||
| 308 | dev_name_end = *path; | ||
| 309 | dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name); | ||
| 475 | 310 | ||
| 476 | /* path on server */ | 311 | /* path on server */ |
| 477 | *path += 2; | 312 | *path += 2; |
| 478 | dout("server path '%s'\n", *path); | 313 | dout("server path '%s'\n", *path); |
| 479 | 314 | ||
| 480 | /* parse mount options */ | 315 | err = ceph_parse_options(popt, options, dev_name, dev_name_end, |
| 481 | while ((c = strsep(&options, ",")) != NULL) { | 316 | parse_fsopt_token, (void *)fsopt); |
| 482 | int token, intval, ret; | 317 | if (err) |
| 483 | if (!*c) | 318 | goto out; |
| 484 | continue; | 319 | |
| 485 | err = -EINVAL; | 320 | /* success */ |
| 486 | token = match_token((char *)c, arg_tokens, argstr); | 321 | *pfsopt = fsopt; |
| 487 | if (token < 0) { | 322 | return 0; |
| 488 | pr_err("bad mount option at '%s'\n", c); | ||
| 489 | goto out; | ||
| 490 | } | ||
| 491 | if (token < Opt_last_int) { | ||
| 492 | ret = match_int(&argstr[0], &intval); | ||
| 493 | if (ret < 0) { | ||
| 494 | pr_err("bad mount option arg (not int) " | ||
| 495 | "at '%s'\n", c); | ||
| 496 | continue; | ||
| 497 | } | ||
| 498 | dout("got int token %d val %d\n", token, intval); | ||
| 499 | } else if (token > Opt_last_int && token < Opt_last_string) { | ||
| 500 | dout("got string token %d val %s\n", token, | ||
| 501 | argstr[0].from); | ||
| 502 | } else { | ||
| 503 | dout("got token %d\n", token); | ||
| 504 | } | ||
| 505 | switch (token) { | ||
| 506 | case Opt_ip: | ||
| 507 | err = ceph_parse_ips(argstr[0].from, | ||
| 508 | argstr[0].to, | ||
| 509 | &args->my_addr, | ||
| 510 | 1, NULL); | ||
| 511 | if (err < 0) | ||
| 512 | goto out; | ||
| 513 | args->flags |= CEPH_OPT_MYIP; | ||
| 514 | break; | ||
| 515 | |||
| 516 | case Opt_fsid: | ||
| 517 | err = parse_fsid(argstr[0].from, &args->fsid); | ||
| 518 | if (err == 0) | ||
| 519 | args->flags |= CEPH_OPT_FSID; | ||
| 520 | break; | ||
| 521 | case Opt_snapdirname: | ||
| 522 | kfree(args->snapdir_name); | ||
| 523 | args->snapdir_name = kstrndup(argstr[0].from, | ||
| 524 | argstr[0].to-argstr[0].from, | ||
| 525 | GFP_KERNEL); | ||
| 526 | break; | ||
| 527 | case Opt_name: | ||
| 528 | args->name = kstrndup(argstr[0].from, | ||
| 529 | argstr[0].to-argstr[0].from, | ||
| 530 | GFP_KERNEL); | ||
| 531 | break; | ||
| 532 | case Opt_secret: | ||
| 533 | args->secret = kstrndup(argstr[0].from, | ||
| 534 | argstr[0].to-argstr[0].from, | ||
| 535 | GFP_KERNEL); | ||
| 536 | break; | ||
| 537 | |||
| 538 | /* misc */ | ||
| 539 | case Opt_wsize: | ||
| 540 | args->wsize = intval; | ||
| 541 | break; | ||
| 542 | case Opt_rsize: | ||
| 543 | args->rsize = intval; | ||
| 544 | break; | ||
| 545 | case Opt_osdtimeout: | ||
| 546 | args->osd_timeout = intval; | ||
| 547 | break; | ||
| 548 | case Opt_osdkeepalivetimeout: | ||
| 549 | args->osd_keepalive_timeout = intval; | ||
| 550 | break; | ||
| 551 | case Opt_osd_idle_ttl: | ||
| 552 | args->osd_idle_ttl = intval; | ||
| 553 | break; | ||
| 554 | case Opt_mount_timeout: | ||
| 555 | args->mount_timeout = intval; | ||
| 556 | break; | ||
| 557 | case Opt_caps_wanted_delay_min: | ||
| 558 | args->caps_wanted_delay_min = intval; | ||
| 559 | break; | ||
| 560 | case Opt_caps_wanted_delay_max: | ||
| 561 | args->caps_wanted_delay_max = intval; | ||
| 562 | break; | ||
| 563 | case Opt_readdir_max_entries: | ||
| 564 | args->max_readdir = intval; | ||
| 565 | break; | ||
| 566 | case Opt_readdir_max_bytes: | ||
| 567 | args->max_readdir_bytes = intval; | ||
| 568 | break; | ||
| 569 | case Opt_congestion_kb: | ||
| 570 | args->congestion_kb = intval; | ||
| 571 | break; | ||
| 572 | |||
| 573 | case Opt_noshare: | ||
| 574 | args->flags |= CEPH_OPT_NOSHARE; | ||
| 575 | break; | ||
| 576 | |||
| 577 | case Opt_dirstat: | ||
| 578 | args->flags |= CEPH_OPT_DIRSTAT; | ||
| 579 | break; | ||
| 580 | case Opt_nodirstat: | ||
| 581 | args->flags &= ~CEPH_OPT_DIRSTAT; | ||
| 582 | break; | ||
| 583 | case Opt_rbytes: | ||
| 584 | args->flags |= CEPH_OPT_RBYTES; | ||
| 585 | break; | ||
| 586 | case Opt_norbytes: | ||
| 587 | args->flags &= ~CEPH_OPT_RBYTES; | ||
| 588 | break; | ||
| 589 | case Opt_nocrc: | ||
| 590 | args->flags |= CEPH_OPT_NOCRC; | ||
| 591 | break; | ||
| 592 | case Opt_noasyncreaddir: | ||
| 593 | args->flags |= CEPH_OPT_NOASYNCREADDIR; | ||
| 594 | break; | ||
| 595 | |||
| 596 | default: | ||
| 597 | BUG_ON(token); | ||
| 598 | } | ||
| 599 | } | ||
| 600 | return args; | ||
| 601 | 323 | ||
| 602 | out: | 324 | out: |
| 603 | kfree(args->mon_addr); | 325 | destroy_mount_options(fsopt); |
| 604 | kfree(args); | 326 | return err; |
| 605 | return ERR_PTR(err); | ||
| 606 | } | 327 | } |
| 607 | 328 | ||
| 608 | static void destroy_mount_args(struct ceph_mount_args *args) | 329 | /** |
| 330 | * ceph_show_options - Show mount options in /proc/mounts | ||
| 331 | * @m: seq_file to write to | ||
| 332 | * @mnt: mount descriptor | ||
| 333 | */ | ||
| 334 | static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt) | ||
| 609 | { | 335 | { |
| 610 | dout("destroy_mount_args %p\n", args); | 336 | struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb); |
| 611 | kfree(args->snapdir_name); | 337 | struct ceph_mount_options *fsopt = fsc->mount_options; |
| 612 | args->snapdir_name = NULL; | 338 | struct ceph_options *opt = fsc->client->options; |
| 613 | kfree(args->name); | 339 | |
| 614 | args->name = NULL; | 340 | if (opt->flags & CEPH_OPT_FSID) |
| 615 | kfree(args->secret); | 341 | seq_printf(m, ",fsid=%pU", &opt->fsid); |
| 616 | args->secret = NULL; | 342 | if (opt->flags & CEPH_OPT_NOSHARE) |
| 617 | kfree(args); | 343 | seq_puts(m, ",noshare"); |
| 344 | if (opt->flags & CEPH_OPT_NOCRC) | ||
| 345 | seq_puts(m, ",nocrc"); | ||
| 346 | |||
| 347 | if (opt->name) | ||
| 348 | seq_printf(m, ",name=%s", opt->name); | ||
| 349 | if (opt->secret) | ||
| 350 | seq_puts(m, ",secret=<hidden>"); | ||
| 351 | |||
| 352 | if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) | ||
| 353 | seq_printf(m, ",mount_timeout=%d", opt->mount_timeout); | ||
| 354 | if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) | ||
| 355 | seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl); | ||
| 356 | if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT) | ||
| 357 | seq_printf(m, ",osdtimeout=%d", opt->osd_timeout); | ||
| 358 | if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) | ||
| 359 | seq_printf(m, ",osdkeepalivetimeout=%d", | ||
| 360 | opt->osd_keepalive_timeout); | ||
| 361 | |||
| 362 | if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT) | ||
| 363 | seq_puts(m, ",dirstat"); | ||
| 364 | if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0) | ||
| 365 | seq_puts(m, ",norbytes"); | ||
| 366 | if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR) | ||
| 367 | seq_puts(m, ",noasyncreaddir"); | ||
| 368 | |||
| 369 | if (fsopt->wsize) | ||
| 370 | seq_printf(m, ",wsize=%d", fsopt->wsize); | ||
| 371 | if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT) | ||
| 372 | seq_printf(m, ",rsize=%d", fsopt->rsize); | ||
| 373 | if (fsopt->congestion_kb != default_congestion_kb()) | ||
| 374 | seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb); | ||
| 375 | if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT) | ||
| 376 | seq_printf(m, ",caps_wanted_delay_min=%d", | ||
| 377 | fsopt->caps_wanted_delay_min); | ||
| 378 | if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT) | ||
| 379 | seq_printf(m, ",caps_wanted_delay_max=%d", | ||
| 380 | fsopt->caps_wanted_delay_max); | ||
| 381 | if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT) | ||
| 382 | seq_printf(m, ",cap_release_safety=%d", | ||
| 383 | fsopt->cap_release_safety); | ||
| 384 | if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT) | ||
| 385 | seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir); | ||
| 386 | if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT) | ||
| 387 | seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes); | ||
| 388 | if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) | ||
| 389 | seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name); | ||
| 390 | return 0; | ||
| 618 | } | 391 | } |
| 619 | 392 | ||
| 620 | /* | 393 | /* |
| 621 | * create a fresh client instance | 394 | * handle any mon messages the standard library doesn't understand. |
| 395 | * return error if we don't either. | ||
| 622 | */ | 396 | */ |
| 623 | static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) | 397 | static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg) |
| 624 | { | 398 | { |
| 625 | struct ceph_client *client; | 399 | struct ceph_fs_client *fsc = client->private; |
| 400 | int type = le16_to_cpu(msg->hdr.type); | ||
| 401 | |||
| 402 | switch (type) { | ||
| 403 | case CEPH_MSG_MDS_MAP: | ||
| 404 | ceph_mdsc_handle_map(fsc->mdsc, msg); | ||
| 405 | return 0; | ||
| 406 | |||
| 407 | default: | ||
| 408 | return -1; | ||
| 409 | } | ||
| 410 | } | ||
| 411 | |||
| 412 | /* | ||
| 413 | * create a new fs client | ||
| 414 | */ | ||
| 415 | struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, | ||
| 416 | struct ceph_options *opt) | ||
| 417 | { | ||
| 418 | struct ceph_fs_client *fsc; | ||
| 626 | int err = -ENOMEM; | 419 | int err = -ENOMEM; |
| 627 | 420 | ||
| 628 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 421 | fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); |
| 629 | if (client == NULL) | 422 | if (!fsc) |
| 630 | return ERR_PTR(-ENOMEM); | 423 | return ERR_PTR(-ENOMEM); |
| 631 | 424 | ||
| 632 | mutex_init(&client->mount_mutex); | 425 | fsc->client = ceph_create_client(opt, fsc); |
| 633 | 426 | if (IS_ERR(fsc->client)) { | |
| 634 | init_waitqueue_head(&client->auth_wq); | 427 | err = PTR_ERR(fsc->client); |
| 428 | goto fail; | ||
| 429 | } | ||
| 430 | fsc->client->extra_mon_dispatch = extra_mon_dispatch; | ||
| 431 | fsc->client->supported_features |= CEPH_FEATURE_FLOCK; | ||
| 432 | fsc->client->monc.want_mdsmap = 1; | ||
| 635 | 433 | ||
| 636 | client->sb = NULL; | 434 | fsc->mount_options = fsopt; |
| 637 | client->mount_state = CEPH_MOUNT_MOUNTING; | ||
| 638 | client->mount_args = args; | ||
| 639 | 435 | ||
| 640 | client->msgr = NULL; | 436 | fsc->sb = NULL; |
| 437 | fsc->mount_state = CEPH_MOUNT_MOUNTING; | ||
| 641 | 438 | ||
| 642 | client->auth_err = 0; | 439 | atomic_long_set(&fsc->writeback_count, 0); |
| 643 | atomic_long_set(&client->writeback_count, 0); | ||
| 644 | 440 | ||
| 645 | err = bdi_init(&client->backing_dev_info); | 441 | err = bdi_init(&fsc->backing_dev_info); |
| 646 | if (err < 0) | 442 | if (err < 0) |
| 647 | goto fail; | 443 | goto fail_client; |
| 648 | 444 | ||
| 649 | err = -ENOMEM; | 445 | err = -ENOMEM; |
| 650 | client->wb_wq = create_workqueue("ceph-writeback"); | 446 | fsc->wb_wq = create_workqueue("ceph-writeback"); |
| 651 | if (client->wb_wq == NULL) | 447 | if (fsc->wb_wq == NULL) |
| 652 | goto fail_bdi; | 448 | goto fail_bdi; |
| 653 | client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); | 449 | fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); |
| 654 | if (client->pg_inv_wq == NULL) | 450 | if (fsc->pg_inv_wq == NULL) |
| 655 | goto fail_wb_wq; | 451 | goto fail_wb_wq; |
| 656 | client->trunc_wq = create_singlethread_workqueue("ceph-trunc"); | 452 | fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc"); |
| 657 | if (client->trunc_wq == NULL) | 453 | if (fsc->trunc_wq == NULL) |
| 658 | goto fail_pg_inv_wq; | 454 | goto fail_pg_inv_wq; |
| 659 | 455 | ||
| 660 | /* set up mempools */ | 456 | /* set up mempools */ |
| 661 | err = -ENOMEM; | 457 | err = -ENOMEM; |
| 662 | client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, | 458 | fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, |
| 663 | client->mount_args->wsize >> PAGE_CACHE_SHIFT); | 459 | fsc->mount_options->wsize >> PAGE_CACHE_SHIFT); |
| 664 | if (!client->wb_pagevec_pool) | 460 | if (!fsc->wb_pagevec_pool) |
| 665 | goto fail_trunc_wq; | 461 | goto fail_trunc_wq; |
| 666 | 462 | ||
| 667 | /* caps */ | 463 | /* caps */ |
| 668 | client->min_caps = args->max_readdir; | 464 | fsc->min_caps = fsopt->max_readdir; |
| 465 | |||
| 466 | return fsc; | ||
| 669 | 467 | ||
| 670 | /* subsystems */ | ||
| 671 | err = ceph_monc_init(&client->monc, client); | ||
| 672 | if (err < 0) | ||
| 673 | goto fail_mempool; | ||
| 674 | err = ceph_osdc_init(&client->osdc, client); | ||
| 675 | if (err < 0) | ||
| 676 | goto fail_monc; | ||
| 677 | err = ceph_mdsc_init(&client->mdsc, client); | ||
| 678 | if (err < 0) | ||
| 679 | goto fail_osdc; | ||
| 680 | return client; | ||
| 681 | |||
| 682 | fail_osdc: | ||
| 683 | ceph_osdc_stop(&client->osdc); | ||
| 684 | fail_monc: | ||
| 685 | ceph_monc_stop(&client->monc); | ||
| 686 | fail_mempool: | ||
| 687 | mempool_destroy(client->wb_pagevec_pool); | ||
| 688 | fail_trunc_wq: | 468 | fail_trunc_wq: |
| 689 | destroy_workqueue(client->trunc_wq); | 469 | destroy_workqueue(fsc->trunc_wq); |
| 690 | fail_pg_inv_wq: | 470 | fail_pg_inv_wq: |
| 691 | destroy_workqueue(client->pg_inv_wq); | 471 | destroy_workqueue(fsc->pg_inv_wq); |
| 692 | fail_wb_wq: | 472 | fail_wb_wq: |
| 693 | destroy_workqueue(client->wb_wq); | 473 | destroy_workqueue(fsc->wb_wq); |
| 694 | fail_bdi: | 474 | fail_bdi: |
| 695 | bdi_destroy(&client->backing_dev_info); | 475 | bdi_destroy(&fsc->backing_dev_info); |
| 476 | fail_client: | ||
| 477 | ceph_destroy_client(fsc->client); | ||
| 696 | fail: | 478 | fail: |
| 697 | kfree(client); | 479 | kfree(fsc); |
| 698 | return ERR_PTR(err); | 480 | return ERR_PTR(err); |
| 699 | } | 481 | } |
| 700 | 482 | ||
| 701 | static void ceph_destroy_client(struct ceph_client *client) | 483 | void destroy_fs_client(struct ceph_fs_client *fsc) |
| 702 | { | 484 | { |
| 703 | dout("destroy_client %p\n", client); | 485 | dout("destroy_fs_client %p\n", fsc); |
| 704 | 486 | ||
| 705 | /* unmount */ | 487 | destroy_workqueue(fsc->wb_wq); |
| 706 | ceph_mdsc_stop(&client->mdsc); | 488 | destroy_workqueue(fsc->pg_inv_wq); |
| 707 | ceph_osdc_stop(&client->osdc); | 489 | destroy_workqueue(fsc->trunc_wq); |
| 708 | 490 | ||
| 709 | /* | 491 | bdi_destroy(&fsc->backing_dev_info); |
| 710 | * make sure mds and osd connections close out before destroying | ||
| 711 | * the auth module, which is needed to free those connections' | ||
| 712 | * ceph_authorizers. | ||
| 713 | */ | ||
| 714 | ceph_msgr_flush(); | ||
| 715 | |||
| 716 | ceph_monc_stop(&client->monc); | ||
| 717 | 492 | ||
| 718 | ceph_debugfs_client_cleanup(client); | 493 | mempool_destroy(fsc->wb_pagevec_pool); |
| 719 | destroy_workqueue(client->wb_wq); | ||
| 720 | destroy_workqueue(client->pg_inv_wq); | ||
| 721 | destroy_workqueue(client->trunc_wq); | ||
| 722 | 494 | ||
| 723 | bdi_destroy(&client->backing_dev_info); | 495 | destroy_mount_options(fsc->mount_options); |
| 724 | 496 | ||
| 725 | if (client->msgr) | 497 | ceph_fs_debugfs_cleanup(fsc); |
| 726 | ceph_messenger_destroy(client->msgr); | ||
| 727 | mempool_destroy(client->wb_pagevec_pool); | ||
| 728 | 498 | ||
| 729 | destroy_mount_args(client->mount_args); | 499 | ceph_destroy_client(fsc->client); |
| 730 | 500 | ||
| 731 | kfree(client); | 501 | kfree(fsc); |
| 732 | dout("destroy_client %p done\n", client); | 502 | dout("destroy_fs_client %p done\n", fsc); |
| 733 | } | 503 | } |
| 734 | 504 | ||
| 735 | /* | 505 | /* |
| 736 | * Initially learn our fsid, or verify an fsid matches. | 506 | * caches |
| 737 | */ | 507 | */ |
| 738 | int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) | 508 | struct kmem_cache *ceph_inode_cachep; |
| 509 | struct kmem_cache *ceph_cap_cachep; | ||
| 510 | struct kmem_cache *ceph_dentry_cachep; | ||
| 511 | struct kmem_cache *ceph_file_cachep; | ||
| 512 | |||
| 513 | static void ceph_inode_init_once(void *foo) | ||
| 739 | { | 514 | { |
| 740 | if (client->have_fsid) { | 515 | struct ceph_inode_info *ci = foo; |
| 741 | if (ceph_fsid_compare(&client->fsid, fsid)) { | 516 | inode_init_once(&ci->vfs_inode); |
| 742 | pr_err("bad fsid, had %pU got %pU", | 517 | } |
| 743 | &client->fsid, fsid); | 518 | |
| 744 | return -1; | 519 | static int __init init_caches(void) |
| 745 | } | 520 | { |
| 746 | } else { | 521 | ceph_inode_cachep = kmem_cache_create("ceph_inode_info", |
| 747 | pr_info("client%lld fsid %pU\n", client->monc.auth->global_id, | 522 | sizeof(struct ceph_inode_info), |
| 748 | fsid); | 523 | __alignof__(struct ceph_inode_info), |
| 749 | memcpy(&client->fsid, fsid, sizeof(*fsid)); | 524 | (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), |
| 750 | ceph_debugfs_client_init(client); | 525 | ceph_inode_init_once); |
| 751 | client->have_fsid = true; | 526 | if (ceph_inode_cachep == NULL) |
| 752 | } | 527 | return -ENOMEM; |
| 528 | |||
| 529 | ceph_cap_cachep = KMEM_CACHE(ceph_cap, | ||
| 530 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); | ||
| 531 | if (ceph_cap_cachep == NULL) | ||
| 532 | goto bad_cap; | ||
| 533 | |||
| 534 | ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info, | ||
| 535 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); | ||
| 536 | if (ceph_dentry_cachep == NULL) | ||
| 537 | goto bad_dentry; | ||
| 538 | |||
| 539 | ceph_file_cachep = KMEM_CACHE(ceph_file_info, | ||
| 540 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); | ||
| 541 | if (ceph_file_cachep == NULL) | ||
| 542 | goto bad_file; | ||
| 543 | |||
| 753 | return 0; | 544 | return 0; |
| 545 | |||
| 546 | bad_file: | ||
| 547 | kmem_cache_destroy(ceph_dentry_cachep); | ||
| 548 | bad_dentry: | ||
| 549 | kmem_cache_destroy(ceph_cap_cachep); | ||
| 550 | bad_cap: | ||
| 551 | kmem_cache_destroy(ceph_inode_cachep); | ||
| 552 | return -ENOMEM; | ||
| 754 | } | 553 | } |
| 755 | 554 | ||
| 555 | static void destroy_caches(void) | ||
| 556 | { | ||
| 557 | kmem_cache_destroy(ceph_inode_cachep); | ||
| 558 | kmem_cache_destroy(ceph_cap_cachep); | ||
| 559 | kmem_cache_destroy(ceph_dentry_cachep); | ||
| 560 | kmem_cache_destroy(ceph_file_cachep); | ||
| 561 | } | ||
| 562 | |||
| 563 | |||
| 756 | /* | 564 | /* |
| 757 | * true if we have the mon map (and have thus joined the cluster) | 565 | * ceph_umount_begin - initiate forced umount. Tear down down the |
| 566 | * mount, skipping steps that may hang while waiting for server(s). | ||
| 758 | */ | 567 | */ |
| 759 | static int have_mon_and_osd_map(struct ceph_client *client) | 568 | static void ceph_umount_begin(struct super_block *sb) |
| 760 | { | 569 | { |
| 761 | return client->monc.monmap && client->monc.monmap->epoch && | 570 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
| 762 | client->osdc.osdmap && client->osdc.osdmap->epoch; | 571 | |
| 572 | dout("ceph_umount_begin - starting forced umount\n"); | ||
| 573 | if (!fsc) | ||
| 574 | return; | ||
| 575 | fsc->mount_state = CEPH_MOUNT_SHUTDOWN; | ||
| 576 | return; | ||
| 763 | } | 577 | } |
| 764 | 578 | ||
| 579 | static const struct super_operations ceph_super_ops = { | ||
| 580 | .alloc_inode = ceph_alloc_inode, | ||
| 581 | .destroy_inode = ceph_destroy_inode, | ||
| 582 | .write_inode = ceph_write_inode, | ||
| 583 | .sync_fs = ceph_sync_fs, | ||
| 584 | .put_super = ceph_put_super, | ||
| 585 | .show_options = ceph_show_options, | ||
| 586 | .statfs = ceph_statfs, | ||
| 587 | .umount_begin = ceph_umount_begin, | ||
| 588 | }; | ||
| 589 | |||
| 765 | /* | 590 | /* |
| 766 | * Bootstrap mount by opening the root directory. Note the mount | 591 | * Bootstrap mount by opening the root directory. Note the mount |
| 767 | * @started time from caller, and time out if this takes too long. | 592 | * @started time from caller, and time out if this takes too long. |
| 768 | */ | 593 | */ |
| 769 | static struct dentry *open_root_dentry(struct ceph_client *client, | 594 | static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, |
| 770 | const char *path, | 595 | const char *path, |
| 771 | unsigned long started) | 596 | unsigned long started) |
| 772 | { | 597 | { |
| 773 | struct ceph_mds_client *mdsc = &client->mdsc; | 598 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 774 | struct ceph_mds_request *req = NULL; | 599 | struct ceph_mds_request *req = NULL; |
| 775 | int err; | 600 | int err; |
| 776 | struct dentry *root; | 601 | struct dentry *root; |
| @@ -784,14 +609,14 @@ static struct dentry *open_root_dentry(struct ceph_client *client, | |||
| 784 | req->r_ino1.ino = CEPH_INO_ROOT; | 609 | req->r_ino1.ino = CEPH_INO_ROOT; |
| 785 | req->r_ino1.snap = CEPH_NOSNAP; | 610 | req->r_ino1.snap = CEPH_NOSNAP; |
| 786 | req->r_started = started; | 611 | req->r_started = started; |
| 787 | req->r_timeout = client->mount_args->mount_timeout * HZ; | 612 | req->r_timeout = fsc->client->options->mount_timeout * HZ; |
| 788 | req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); | 613 | req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); |
| 789 | req->r_num_caps = 2; | 614 | req->r_num_caps = 2; |
| 790 | err = ceph_mdsc_do_request(mdsc, NULL, req); | 615 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
| 791 | if (err == 0) { | 616 | if (err == 0) { |
| 792 | dout("open_root_inode success\n"); | 617 | dout("open_root_inode success\n"); |
| 793 | if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && | 618 | if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && |
| 794 | client->sb->s_root == NULL) | 619 | fsc->sb->s_root == NULL) |
| 795 | root = d_alloc_root(req->r_target_inode); | 620 | root = d_alloc_root(req->r_target_inode); |
| 796 | else | 621 | else |
| 797 | root = d_obtain_alias(req->r_target_inode); | 622 | root = d_obtain_alias(req->r_target_inode); |
| @@ -804,105 +629,86 @@ static struct dentry *open_root_dentry(struct ceph_client *client, | |||
| 804 | return root; | 629 | return root; |
| 805 | } | 630 | } |
| 806 | 631 | ||
| 632 | |||
| 633 | |||
| 634 | |||
| 807 | /* | 635 | /* |
| 808 | * mount: join the ceph cluster, and open root directory. | 636 | * mount: join the ceph cluster, and open root directory. |
| 809 | */ | 637 | */ |
| 810 | static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, | 638 | static int ceph_mount(struct ceph_fs_client *fsc, struct vfsmount *mnt, |
| 811 | const char *path) | 639 | const char *path) |
| 812 | { | 640 | { |
| 813 | struct ceph_entity_addr *myaddr = NULL; | ||
| 814 | int err; | 641 | int err; |
| 815 | unsigned long timeout = client->mount_args->mount_timeout * HZ; | ||
| 816 | unsigned long started = jiffies; /* note the start time */ | 642 | unsigned long started = jiffies; /* note the start time */ |
| 817 | struct dentry *root; | 643 | struct dentry *root; |
| 644 | int first = 0; /* first vfsmount for this super_block */ | ||
| 818 | 645 | ||
| 819 | dout("mount start\n"); | 646 | dout("mount start\n"); |
| 820 | mutex_lock(&client->mount_mutex); | 647 | mutex_lock(&fsc->client->mount_mutex); |
| 821 | |||
| 822 | /* initialize the messenger */ | ||
| 823 | if (client->msgr == NULL) { | ||
| 824 | if (ceph_test_opt(client, MYIP)) | ||
| 825 | myaddr = &client->mount_args->my_addr; | ||
| 826 | client->msgr = ceph_messenger_create(myaddr); | ||
| 827 | if (IS_ERR(client->msgr)) { | ||
| 828 | err = PTR_ERR(client->msgr); | ||
| 829 | client->msgr = NULL; | ||
| 830 | goto out; | ||
| 831 | } | ||
| 832 | client->msgr->nocrc = ceph_test_opt(client, NOCRC); | ||
| 833 | } | ||
| 834 | 648 | ||
| 835 | /* open session, and wait for mon, mds, and osd maps */ | 649 | err = __ceph_open_session(fsc->client, started); |
| 836 | err = ceph_monc_open_session(&client->monc); | ||
| 837 | if (err < 0) | 650 | if (err < 0) |
| 838 | goto out; | 651 | goto out; |
| 839 | 652 | ||
| 840 | while (!have_mon_and_osd_map(client)) { | ||
| 841 | err = -EIO; | ||
| 842 | if (timeout && time_after_eq(jiffies, started + timeout)) | ||
| 843 | goto out; | ||
| 844 | |||
| 845 | /* wait */ | ||
| 846 | dout("mount waiting for mon_map\n"); | ||
| 847 | err = wait_event_interruptible_timeout(client->auth_wq, | ||
| 848 | have_mon_and_osd_map(client) || (client->auth_err < 0), | ||
| 849 | timeout); | ||
| 850 | if (err == -EINTR || err == -ERESTARTSYS) | ||
| 851 | goto out; | ||
| 852 | if (client->auth_err < 0) { | ||
| 853 | err = client->auth_err; | ||
| 854 | goto out; | ||
| 855 | } | ||
| 856 | } | ||
| 857 | |||
| 858 | dout("mount opening root\n"); | 653 | dout("mount opening root\n"); |
| 859 | root = open_root_dentry(client, "", started); | 654 | root = open_root_dentry(fsc, "", started); |
| 860 | if (IS_ERR(root)) { | 655 | if (IS_ERR(root)) { |
| 861 | err = PTR_ERR(root); | 656 | err = PTR_ERR(root); |
| 862 | goto out; | 657 | goto out; |
| 863 | } | 658 | } |
| 864 | if (client->sb->s_root) | 659 | if (fsc->sb->s_root) { |
| 865 | dput(root); | 660 | dput(root); |
| 866 | else | 661 | } else { |
| 867 | client->sb->s_root = root; | 662 | fsc->sb->s_root = root; |
| 663 | first = 1; | ||
| 664 | |||
| 665 | err = ceph_fs_debugfs_init(fsc); | ||
| 666 | if (err < 0) | ||
| 667 | goto fail; | ||
| 668 | } | ||
| 868 | 669 | ||
| 869 | if (path[0] == 0) { | 670 | if (path[0] == 0) { |
| 870 | dget(root); | 671 | dget(root); |
| 871 | } else { | 672 | } else { |
| 872 | dout("mount opening base mountpoint\n"); | 673 | dout("mount opening base mountpoint\n"); |
| 873 | root = open_root_dentry(client, path, started); | 674 | root = open_root_dentry(fsc, path, started); |
| 874 | if (IS_ERR(root)) { | 675 | if (IS_ERR(root)) { |
| 875 | err = PTR_ERR(root); | 676 | err = PTR_ERR(root); |
| 876 | dput(client->sb->s_root); | 677 | goto fail; |
| 877 | client->sb->s_root = NULL; | ||
| 878 | goto out; | ||
| 879 | } | 678 | } |
| 880 | } | 679 | } |
| 881 | 680 | ||
| 882 | mnt->mnt_root = root; | 681 | mnt->mnt_root = root; |
| 883 | mnt->mnt_sb = client->sb; | 682 | mnt->mnt_sb = fsc->sb; |
| 884 | 683 | ||
| 885 | client->mount_state = CEPH_MOUNT_MOUNTED; | 684 | fsc->mount_state = CEPH_MOUNT_MOUNTED; |
| 886 | dout("mount success\n"); | 685 | dout("mount success\n"); |
| 887 | err = 0; | 686 | err = 0; |
| 888 | 687 | ||
| 889 | out: | 688 | out: |
| 890 | mutex_unlock(&client->mount_mutex); | 689 | mutex_unlock(&fsc->client->mount_mutex); |
| 891 | return err; | 690 | return err; |
| 691 | |||
| 692 | fail: | ||
| 693 | if (first) { | ||
| 694 | dput(fsc->sb->s_root); | ||
| 695 | fsc->sb->s_root = NULL; | ||
| 696 | } | ||
| 697 | goto out; | ||
| 892 | } | 698 | } |
| 893 | 699 | ||
| 894 | static int ceph_set_super(struct super_block *s, void *data) | 700 | static int ceph_set_super(struct super_block *s, void *data) |
| 895 | { | 701 | { |
| 896 | struct ceph_client *client = data; | 702 | struct ceph_fs_client *fsc = data; |
| 897 | int ret; | 703 | int ret; |
| 898 | 704 | ||
| 899 | dout("set_super %p data %p\n", s, data); | 705 | dout("set_super %p data %p\n", s, data); |
| 900 | 706 | ||
| 901 | s->s_flags = client->mount_args->sb_flags; | 707 | s->s_flags = fsc->mount_options->sb_flags; |
| 902 | s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ | 708 | s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ |
| 903 | 709 | ||
| 904 | s->s_fs_info = client; | 710 | s->s_fs_info = fsc; |
| 905 | client->sb = s; | 711 | fsc->sb = s; |
| 906 | 712 | ||
| 907 | s->s_op = &ceph_super_ops; | 713 | s->s_op = &ceph_super_ops; |
| 908 | s->s_export_op = &ceph_export_ops; | 714 | s->s_export_op = &ceph_export_ops; |
| @@ -917,7 +723,7 @@ static int ceph_set_super(struct super_block *s, void *data) | |||
| 917 | 723 | ||
| 918 | fail: | 724 | fail: |
| 919 | s->s_fs_info = NULL; | 725 | s->s_fs_info = NULL; |
| 920 | client->sb = NULL; | 726 | fsc->sb = NULL; |
| 921 | return ret; | 727 | return ret; |
| 922 | } | 728 | } |
| 923 | 729 | ||
| @@ -926,30 +732,23 @@ fail: | |||
| 926 | */ | 732 | */ |
| 927 | static int ceph_compare_super(struct super_block *sb, void *data) | 733 | static int ceph_compare_super(struct super_block *sb, void *data) |
| 928 | { | 734 | { |
| 929 | struct ceph_client *new = data; | 735 | struct ceph_fs_client *new = data; |
| 930 | struct ceph_mount_args *args = new->mount_args; | 736 | struct ceph_mount_options *fsopt = new->mount_options; |
| 931 | struct ceph_client *other = ceph_sb_to_client(sb); | 737 | struct ceph_options *opt = new->client->options; |
| 932 | int i; | 738 | struct ceph_fs_client *other = ceph_sb_to_client(sb); |
| 933 | 739 | ||
| 934 | dout("ceph_compare_super %p\n", sb); | 740 | dout("ceph_compare_super %p\n", sb); |
| 935 | if (args->flags & CEPH_OPT_FSID) { | 741 | |
| 936 | if (ceph_fsid_compare(&args->fsid, &other->fsid)) { | 742 | if (compare_mount_options(fsopt, opt, other)) { |
| 937 | dout("fsid doesn't match\n"); | 743 | dout("monitor(s)/mount options don't match\n"); |
| 938 | return 0; | 744 | return 0; |
| 939 | } | ||
| 940 | } else { | ||
| 941 | /* do we share (a) monitor? */ | ||
| 942 | for (i = 0; i < new->monc.monmap->num_mon; i++) | ||
| 943 | if (ceph_monmap_contains(other->monc.monmap, | ||
| 944 | &new->monc.monmap->mon_inst[i].addr)) | ||
| 945 | break; | ||
| 946 | if (i == new->monc.monmap->num_mon) { | ||
| 947 | dout("mon ip not part of monmap\n"); | ||
| 948 | return 0; | ||
| 949 | } | ||
| 950 | dout("mon ip matches existing sb %p\n", sb); | ||
| 951 | } | 745 | } |
| 952 | if (args->sb_flags != other->mount_args->sb_flags) { | 746 | if ((opt->flags & CEPH_OPT_FSID) && |
| 747 | ceph_fsid_compare(&opt->fsid, &other->client->fsid)) { | ||
| 748 | dout("fsid doesn't match\n"); | ||
| 749 | return 0; | ||
| 750 | } | ||
| 751 | if (fsopt->sb_flags != other->mount_options->sb_flags) { | ||
| 953 | dout("flags differ\n"); | 752 | dout("flags differ\n"); |
| 954 | return 0; | 753 | return 0; |
| 955 | } | 754 | } |
| @@ -961,19 +760,20 @@ static int ceph_compare_super(struct super_block *sb, void *data) | |||
| 961 | */ | 760 | */ |
| 962 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | 761 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
| 963 | 762 | ||
| 964 | static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) | 763 | static int ceph_register_bdi(struct super_block *sb, |
| 764 | struct ceph_fs_client *fsc) | ||
| 965 | { | 765 | { |
| 966 | int err; | 766 | int err; |
| 967 | 767 | ||
| 968 | /* set ra_pages based on rsize mount option? */ | 768 | /* set ra_pages based on rsize mount option? */ |
| 969 | if (client->mount_args->rsize >= PAGE_CACHE_SIZE) | 769 | if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) |
| 970 | client->backing_dev_info.ra_pages = | 770 | fsc->backing_dev_info.ra_pages = |
| 971 | (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) | 771 | (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) |
| 972 | >> PAGE_SHIFT; | 772 | >> PAGE_SHIFT; |
| 973 | err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d", | 773 | err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d", |
| 974 | atomic_long_inc_return(&bdi_seq)); | 774 | atomic_long_inc_return(&bdi_seq)); |
| 975 | if (!err) | 775 | if (!err) |
| 976 | sb->s_bdi = &client->backing_dev_info; | 776 | sb->s_bdi = &fsc->backing_dev_info; |
| 977 | return err; | 777 | return err; |
| 978 | } | 778 | } |
| 979 | 779 | ||
| @@ -982,46 +782,52 @@ static int ceph_get_sb(struct file_system_type *fs_type, | |||
| 982 | struct vfsmount *mnt) | 782 | struct vfsmount *mnt) |
| 983 | { | 783 | { |
| 984 | struct super_block *sb; | 784 | struct super_block *sb; |
| 985 | struct ceph_client *client; | 785 | struct ceph_fs_client *fsc; |
| 986 | int err; | 786 | int err; |
| 987 | int (*compare_super)(struct super_block *, void *) = ceph_compare_super; | 787 | int (*compare_super)(struct super_block *, void *) = ceph_compare_super; |
| 988 | const char *path = NULL; | 788 | const char *path = NULL; |
| 989 | struct ceph_mount_args *args; | 789 | struct ceph_mount_options *fsopt = NULL; |
| 790 | struct ceph_options *opt = NULL; | ||
| 990 | 791 | ||
| 991 | dout("ceph_get_sb\n"); | 792 | dout("ceph_get_sb\n"); |
| 992 | args = parse_mount_args(flags, data, dev_name, &path); | 793 | err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); |
| 993 | if (IS_ERR(args)) { | 794 | if (err < 0) |
| 994 | err = PTR_ERR(args); | ||
| 995 | goto out_final; | 795 | goto out_final; |
| 996 | } | ||
| 997 | 796 | ||
| 998 | /* create client (which we may/may not use) */ | 797 | /* create client (which we may/may not use) */ |
| 999 | client = ceph_create_client(args); | 798 | fsc = create_fs_client(fsopt, opt); |
| 1000 | if (IS_ERR(client)) { | 799 | if (IS_ERR(fsc)) { |
| 1001 | err = PTR_ERR(client); | 800 | err = PTR_ERR(fsc); |
| 801 | kfree(fsopt); | ||
| 802 | kfree(opt); | ||
| 1002 | goto out_final; | 803 | goto out_final; |
| 1003 | } | 804 | } |
| 1004 | 805 | ||
| 1005 | if (client->mount_args->flags & CEPH_OPT_NOSHARE) | 806 | err = ceph_mdsc_init(fsc); |
| 807 | if (err < 0) | ||
| 808 | goto out; | ||
| 809 | |||
| 810 | if (ceph_test_opt(fsc->client, NOSHARE)) | ||
| 1006 | compare_super = NULL; | 811 | compare_super = NULL; |
| 1007 | sb = sget(fs_type, compare_super, ceph_set_super, client); | 812 | sb = sget(fs_type, compare_super, ceph_set_super, fsc); |
| 1008 | if (IS_ERR(sb)) { | 813 | if (IS_ERR(sb)) { |
| 1009 | err = PTR_ERR(sb); | 814 | err = PTR_ERR(sb); |
| 1010 | goto out; | 815 | goto out; |
| 1011 | } | 816 | } |
| 1012 | 817 | ||
| 1013 | if (ceph_sb_to_client(sb) != client) { | 818 | if (ceph_sb_to_client(sb) != fsc) { |
| 1014 | ceph_destroy_client(client); | 819 | ceph_mdsc_destroy(fsc); |
| 1015 | client = ceph_sb_to_client(sb); | 820 | destroy_fs_client(fsc); |
| 1016 | dout("get_sb got existing client %p\n", client); | 821 | fsc = ceph_sb_to_client(sb); |
| 822 | dout("get_sb got existing client %p\n", fsc); | ||
| 1017 | } else { | 823 | } else { |
| 1018 | dout("get_sb using new client %p\n", client); | 824 | dout("get_sb using new client %p\n", fsc); |
| 1019 | err = ceph_register_bdi(sb, client); | 825 | err = ceph_register_bdi(sb, fsc); |
| 1020 | if (err < 0) | 826 | if (err < 0) |
| 1021 | goto out_splat; | 827 | goto out_splat; |
| 1022 | } | 828 | } |
| 1023 | 829 | ||
| 1024 | err = ceph_mount(client, mnt, path); | 830 | err = ceph_mount(fsc, mnt, path); |
| 1025 | if (err < 0) | 831 | if (err < 0) |
| 1026 | goto out_splat; | 832 | goto out_splat; |
| 1027 | dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root, | 833 | dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root, |
| @@ -1029,12 +835,13 @@ static int ceph_get_sb(struct file_system_type *fs_type, | |||
| 1029 | return 0; | 835 | return 0; |
| 1030 | 836 | ||
| 1031 | out_splat: | 837 | out_splat: |
| 1032 | ceph_mdsc_close_sessions(&client->mdsc); | 838 | ceph_mdsc_close_sessions(fsc->mdsc); |
| 1033 | deactivate_locked_super(sb); | 839 | deactivate_locked_super(sb); |
| 1034 | goto out_final; | 840 | goto out_final; |
| 1035 | 841 | ||
| 1036 | out: | 842 | out: |
| 1037 | ceph_destroy_client(client); | 843 | ceph_mdsc_destroy(fsc); |
| 844 | destroy_fs_client(fsc); | ||
| 1038 | out_final: | 845 | out_final: |
| 1039 | dout("ceph_get_sb fail %d\n", err); | 846 | dout("ceph_get_sb fail %d\n", err); |
| 1040 | return err; | 847 | return err; |
| @@ -1042,11 +849,12 @@ out_final: | |||
| 1042 | 849 | ||
| 1043 | static void ceph_kill_sb(struct super_block *s) | 850 | static void ceph_kill_sb(struct super_block *s) |
| 1044 | { | 851 | { |
| 1045 | struct ceph_client *client = ceph_sb_to_client(s); | 852 | struct ceph_fs_client *fsc = ceph_sb_to_client(s); |
| 1046 | dout("kill_sb %p\n", s); | 853 | dout("kill_sb %p\n", s); |
| 1047 | ceph_mdsc_pre_umount(&client->mdsc); | 854 | ceph_mdsc_pre_umount(fsc->mdsc); |
| 1048 | kill_anon_super(s); /* will call put_super after sb is r/o */ | 855 | kill_anon_super(s); /* will call put_super after sb is r/o */ |
| 1049 | ceph_destroy_client(client); | 856 | ceph_mdsc_destroy(fsc); |
| 857 | destroy_fs_client(fsc); | ||
| 1050 | } | 858 | } |
| 1051 | 859 | ||
| 1052 | static struct file_system_type ceph_fs_type = { | 860 | static struct file_system_type ceph_fs_type = { |
| @@ -1062,36 +870,20 @@ static struct file_system_type ceph_fs_type = { | |||
| 1062 | 870 | ||
| 1063 | static int __init init_ceph(void) | 871 | static int __init init_ceph(void) |
| 1064 | { | 872 | { |
| 1065 | int ret = 0; | 873 | int ret = init_caches(); |
| 1066 | |||
| 1067 | ret = ceph_debugfs_init(); | ||
| 1068 | if (ret < 0) | ||
| 1069 | goto out; | ||
| 1070 | |||
| 1071 | ret = ceph_msgr_init(); | ||
| 1072 | if (ret < 0) | ||
| 1073 | goto out_debugfs; | ||
| 1074 | |||
| 1075 | ret = init_caches(); | ||
| 1076 | if (ret) | 874 | if (ret) |
| 1077 | goto out_msgr; | 875 | goto out; |
| 1078 | 876 | ||
| 1079 | ret = register_filesystem(&ceph_fs_type); | 877 | ret = register_filesystem(&ceph_fs_type); |
| 1080 | if (ret) | 878 | if (ret) |
| 1081 | goto out_icache; | 879 | goto out_icache; |
| 1082 | 880 | ||
| 1083 | pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n", | 881 | pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL); |
| 1084 | CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL, | 882 | |
| 1085 | CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT, | ||
| 1086 | CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT); | ||
| 1087 | return 0; | 883 | return 0; |
| 1088 | 884 | ||
| 1089 | out_icache: | 885 | out_icache: |
| 1090 | destroy_caches(); | 886 | destroy_caches(); |
| 1091 | out_msgr: | ||
| 1092 | ceph_msgr_exit(); | ||
| 1093 | out_debugfs: | ||
| 1094 | ceph_debugfs_cleanup(); | ||
| 1095 | out: | 887 | out: |
| 1096 | return ret; | 888 | return ret; |
| 1097 | } | 889 | } |
| @@ -1101,8 +893,6 @@ static void __exit exit_ceph(void) | |||
| 1101 | dout("exit_ceph\n"); | 893 | dout("exit_ceph\n"); |
| 1102 | unregister_filesystem(&ceph_fs_type); | 894 | unregister_filesystem(&ceph_fs_type); |
| 1103 | destroy_caches(); | 895 | destroy_caches(); |
| 1104 | ceph_msgr_exit(); | ||
| 1105 | ceph_debugfs_cleanup(); | ||
| 1106 | } | 896 | } |
| 1107 | 897 | ||
| 1108 | module_init(init_ceph); | 898 | module_init(init_ceph); |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index b87638e84c4b..1886294e12f7 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #ifndef _FS_CEPH_SUPER_H | 1 | #ifndef _FS_CEPH_SUPER_H |
| 2 | #define _FS_CEPH_SUPER_H | 2 | #define _FS_CEPH_SUPER_H |
| 3 | 3 | ||
| 4 | #include "ceph_debug.h" | 4 | #include <linux/ceph/ceph_debug.h> |
| 5 | 5 | ||
| 6 | #include <asm/unaligned.h> | 6 | #include <asm/unaligned.h> |
| 7 | #include <linux/backing-dev.h> | 7 | #include <linux/backing-dev.h> |
| @@ -14,13 +14,7 @@ | |||
| 14 | #include <linux/writeback.h> | 14 | #include <linux/writeback.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | 16 | ||
| 17 | #include "types.h" | 17 | #include <linux/ceph/libceph.h> |
| 18 | #include "messenger.h" | ||
| 19 | #include "msgpool.h" | ||
| 20 | #include "mon_client.h" | ||
| 21 | #include "mds_client.h" | ||
| 22 | #include "osd_client.h" | ||
| 23 | #include "ceph_fs.h" | ||
| 24 | 18 | ||
| 25 | /* f_type in struct statfs */ | 19 | /* f_type in struct statfs */ |
| 26 | #define CEPH_SUPER_MAGIC 0x00c36400 | 20 | #define CEPH_SUPER_MAGIC 0x00c36400 |
| @@ -30,42 +24,25 @@ | |||
| 30 | #define CEPH_BLOCK_SHIFT 20 /* 1 MB */ | 24 | #define CEPH_BLOCK_SHIFT 20 /* 1 MB */ |
| 31 | #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) | 25 | #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) |
| 32 | 26 | ||
| 33 | /* | 27 | #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */ |
| 34 | * Supported features | 28 | #define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */ |
| 35 | */ | 29 | #define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */ |
| 36 | #define CEPH_FEATURE_SUPPORTED CEPH_FEATURE_NOSRCADDR | CEPH_FEATURE_FLOCK | ||
| 37 | #define CEPH_FEATURE_REQUIRED CEPH_FEATURE_NOSRCADDR | ||
| 38 | 30 | ||
| 39 | /* | 31 | #define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES) |
| 40 | * mount options | ||
| 41 | */ | ||
| 42 | #define CEPH_OPT_FSID (1<<0) | ||
| 43 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ | ||
| 44 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ | ||
| 45 | #define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */ | ||
| 46 | #define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */ | ||
| 47 | #define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */ | ||
| 48 | #define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */ | ||
| 49 | 32 | ||
| 50 | #define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES) | 33 | #define ceph_set_mount_opt(fsc, opt) \ |
| 34 | (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; | ||
| 35 | #define ceph_test_mount_opt(fsc, opt) \ | ||
| 36 | (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt)) | ||
| 51 | 37 | ||
| 52 | #define ceph_set_opt(client, opt) \ | 38 | #define CEPH_MAX_READDIR_DEFAULT 1024 |
| 53 | (client)->mount_args->flags |= CEPH_OPT_##opt; | 39 | #define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024) |
| 54 | #define ceph_test_opt(client, opt) \ | 40 | #define CEPH_SNAPDIRNAME_DEFAULT ".snap" |
| 55 | (!!((client)->mount_args->flags & CEPH_OPT_##opt)) | ||
| 56 | 41 | ||
| 57 | 42 | struct ceph_mount_options { | |
| 58 | struct ceph_mount_args { | ||
| 59 | int sb_flags; | ||
| 60 | int flags; | 43 | int flags; |
| 61 | struct ceph_fsid fsid; | 44 | int sb_flags; |
| 62 | struct ceph_entity_addr my_addr; | 45 | |
| 63 | int num_mon; | ||
| 64 | struct ceph_entity_addr *mon_addr; | ||
| 65 | int mount_timeout; | ||
| 66 | int osd_idle_ttl; | ||
| 67 | int osd_timeout; | ||
| 68 | int osd_keepalive_timeout; | ||
| 69 | int wsize; | 46 | int wsize; |
| 70 | int rsize; /* max readahead */ | 47 | int rsize; /* max readahead */ |
| 71 | int congestion_kb; /* max writeback in flight */ | 48 | int congestion_kb; /* max writeback in flight */ |
| @@ -73,82 +50,25 @@ struct ceph_mount_args { | |||
| 73 | int cap_release_safety; | 50 | int cap_release_safety; |
| 74 | int max_readdir; /* max readdir result (entires) */ | 51 | int max_readdir; /* max readdir result (entires) */ |
| 75 | int max_readdir_bytes; /* max readdir result (bytes) */ | 52 | int max_readdir_bytes; /* max readdir result (bytes) */ |
| 76 | char *snapdir_name; /* default ".snap" */ | ||
| 77 | char *name; | ||
| 78 | char *secret; | ||
| 79 | }; | ||
| 80 | 53 | ||
| 81 | /* | 54 | /* |
| 82 | * defaults | 55 | * everything above this point can be memcmp'd; everything below |
| 83 | */ | 56 | * is handled in compare_mount_options() |
| 84 | #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 | 57 | */ |
| 85 | #define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ | ||
| 86 | #define CEPH_OSD_KEEPALIVE_DEFAULT 5 | ||
| 87 | #define CEPH_OSD_IDLE_TTL_DEFAULT 60 | ||
| 88 | #define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ | ||
| 89 | #define CEPH_MAX_READDIR_DEFAULT 1024 | ||
| 90 | #define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024) | ||
| 91 | |||
| 92 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) | ||
| 93 | #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) | ||
| 94 | |||
| 95 | #define CEPH_SNAPDIRNAME_DEFAULT ".snap" | ||
| 96 | #define CEPH_AUTH_NAME_DEFAULT "guest" | ||
| 97 | /* | ||
| 98 | * Delay telling the MDS we no longer want caps, in case we reopen | ||
| 99 | * the file. Delay a minimum amount of time, even if we send a cap | ||
| 100 | * message for some other reason. Otherwise, take the oppotunity to | ||
| 101 | * update the mds to avoid sending another message later. | ||
| 102 | */ | ||
| 103 | #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ | ||
| 104 | #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ | ||
| 105 | |||
| 106 | #define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4) | ||
| 107 | |||
| 108 | /* mount state */ | ||
| 109 | enum { | ||
| 110 | CEPH_MOUNT_MOUNTING, | ||
| 111 | CEPH_MOUNT_MOUNTED, | ||
| 112 | CEPH_MOUNT_UNMOUNTING, | ||
| 113 | CEPH_MOUNT_UNMOUNTED, | ||
| 114 | CEPH_MOUNT_SHUTDOWN, | ||
| 115 | }; | ||
| 116 | |||
| 117 | /* | ||
| 118 | * subtract jiffies | ||
| 119 | */ | ||
| 120 | static inline unsigned long time_sub(unsigned long a, unsigned long b) | ||
| 121 | { | ||
| 122 | BUG_ON(time_after(b, a)); | ||
| 123 | return (long)a - (long)b; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * per-filesystem client state | ||
| 128 | * | ||
| 129 | * possibly shared by multiple mount points, if they are | ||
| 130 | * mounting the same ceph filesystem/cluster. | ||
| 131 | */ | ||
| 132 | struct ceph_client { | ||
| 133 | struct ceph_fsid fsid; | ||
| 134 | bool have_fsid; | ||
| 135 | 58 | ||
| 136 | struct mutex mount_mutex; /* serialize mount attempts */ | 59 | char *snapdir_name; /* default ".snap" */ |
| 137 | struct ceph_mount_args *mount_args; | 60 | }; |
| 138 | 61 | ||
| 62 | struct ceph_fs_client { | ||
| 139 | struct super_block *sb; | 63 | struct super_block *sb; |
| 140 | 64 | ||
| 141 | unsigned long mount_state; | 65 | struct ceph_mount_options *mount_options; |
| 142 | wait_queue_head_t auth_wq; | 66 | struct ceph_client *client; |
| 143 | |||
| 144 | int auth_err; | ||
| 145 | 67 | ||
| 68 | unsigned long mount_state; | ||
| 146 | int min_caps; /* min caps i added */ | 69 | int min_caps; /* min caps i added */ |
| 147 | 70 | ||
| 148 | struct ceph_messenger *msgr; /* messenger instance */ | 71 | struct ceph_mds_client *mdsc; |
| 149 | struct ceph_mon_client monc; | ||
| 150 | struct ceph_mds_client mdsc; | ||
| 151 | struct ceph_osd_client osdc; | ||
| 152 | 72 | ||
| 153 | /* writeback */ | 73 | /* writeback */ |
| 154 | mempool_t *wb_pagevec_pool; | 74 | mempool_t *wb_pagevec_pool; |
| @@ -160,14 +80,14 @@ struct ceph_client { | |||
| 160 | struct backing_dev_info backing_dev_info; | 80 | struct backing_dev_info backing_dev_info; |
| 161 | 81 | ||
| 162 | #ifdef CONFIG_DEBUG_FS | 82 | #ifdef CONFIG_DEBUG_FS |
| 163 | struct dentry *debugfs_monmap; | 83 | struct dentry *debugfs_dentry_lru, *debugfs_caps; |
| 164 | struct dentry *debugfs_mdsmap, *debugfs_osdmap; | ||
| 165 | struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; | ||
| 166 | struct dentry *debugfs_congestion_kb; | 84 | struct dentry *debugfs_congestion_kb; |
| 167 | struct dentry *debugfs_bdi; | 85 | struct dentry *debugfs_bdi; |
| 86 | struct dentry *debugfs_mdsc, *debugfs_mdsmap; | ||
| 168 | #endif | 87 | #endif |
| 169 | }; | 88 | }; |
| 170 | 89 | ||
| 90 | |||
| 171 | /* | 91 | /* |
| 172 | * File i/o capability. This tracks shared state with the metadata | 92 | * File i/o capability. This tracks shared state with the metadata |
| 173 | * server that allows us to cache or writeback attributes or to read | 93 | * server that allows us to cache or writeback attributes or to read |
| @@ -275,6 +195,20 @@ struct ceph_inode_xattr { | |||
| 275 | int should_free_val; | 195 | int should_free_val; |
| 276 | }; | 196 | }; |
| 277 | 197 | ||
| 198 | /* | ||
| 199 | * Ceph dentry state | ||
| 200 | */ | ||
| 201 | struct ceph_dentry_info { | ||
| 202 | struct ceph_mds_session *lease_session; | ||
| 203 | u32 lease_gen, lease_shared_gen; | ||
| 204 | u32 lease_seq; | ||
| 205 | unsigned long lease_renew_after, lease_renew_from; | ||
| 206 | struct list_head lru; | ||
| 207 | struct dentry *dentry; | ||
| 208 | u64 time; | ||
| 209 | u64 offset; | ||
| 210 | }; | ||
| 211 | |||
| 278 | struct ceph_inode_xattrs_info { | 212 | struct ceph_inode_xattrs_info { |
| 279 | /* | 213 | /* |
| 280 | * (still encoded) xattr blob. we avoid the overhead of parsing | 214 | * (still encoded) xattr blob. we avoid the overhead of parsing |
| @@ -296,11 +230,6 @@ struct ceph_inode_xattrs_info { | |||
| 296 | /* | 230 | /* |
| 297 | * Ceph inode. | 231 | * Ceph inode. |
| 298 | */ | 232 | */ |
| 299 | #define CEPH_I_COMPLETE 1 /* we have complete directory cached */ | ||
| 300 | #define CEPH_I_NODELAY 4 /* do not delay cap release */ | ||
| 301 | #define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ | ||
| 302 | #define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ | ||
| 303 | |||
| 304 | struct ceph_inode_info { | 233 | struct ceph_inode_info { |
| 305 | struct ceph_vino i_vino; /* ceph ino + snap */ | 234 | struct ceph_vino i_vino; /* ceph ino + snap */ |
| 306 | 235 | ||
| @@ -391,6 +320,63 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode) | |||
| 391 | return container_of(inode, struct ceph_inode_info, vfs_inode); | 320 | return container_of(inode, struct ceph_inode_info, vfs_inode); |
| 392 | } | 321 | } |
| 393 | 322 | ||
| 323 | static inline struct ceph_vino ceph_vino(struct inode *inode) | ||
| 324 | { | ||
| 325 | return ceph_inode(inode)->i_vino; | ||
| 326 | } | ||
| 327 | |||
| 328 | /* | ||
| 329 | * ino_t is <64 bits on many architectures, blech. | ||
| 330 | * | ||
| 331 | * don't include snap in ino hash, at least for now. | ||
| 332 | */ | ||
| 333 | static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) | ||
| 334 | { | ||
| 335 | ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */ | ||
| 336 | #if BITS_PER_LONG == 32 | ||
| 337 | ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8; | ||
| 338 | if (!ino) | ||
| 339 | ino = 1; | ||
| 340 | #endif | ||
| 341 | return ino; | ||
| 342 | } | ||
| 343 | |||
| 344 | /* for printf-style formatting */ | ||
| 345 | #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap | ||
| 346 | |||
| 347 | static inline u64 ceph_ino(struct inode *inode) | ||
| 348 | { | ||
| 349 | return ceph_inode(inode)->i_vino.ino; | ||
| 350 | } | ||
| 351 | static inline u64 ceph_snap(struct inode *inode) | ||
| 352 | { | ||
| 353 | return ceph_inode(inode)->i_vino.snap; | ||
| 354 | } | ||
| 355 | |||
| 356 | static inline int ceph_ino_compare(struct inode *inode, void *data) | ||
| 357 | { | ||
| 358 | struct ceph_vino *pvino = (struct ceph_vino *)data; | ||
| 359 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 360 | return ci->i_vino.ino == pvino->ino && | ||
| 361 | ci->i_vino.snap == pvino->snap; | ||
| 362 | } | ||
| 363 | |||
| 364 | static inline struct inode *ceph_find_inode(struct super_block *sb, | ||
| 365 | struct ceph_vino vino) | ||
| 366 | { | ||
| 367 | ino_t t = ceph_vino_to_ino(vino); | ||
| 368 | return ilookup5(sb, t, ceph_ino_compare, &vino); | ||
| 369 | } | ||
| 370 | |||
| 371 | |||
| 372 | /* | ||
| 373 | * Ceph inode. | ||
| 374 | */ | ||
| 375 | #define CEPH_I_COMPLETE 1 /* we have complete directory cached */ | ||
| 376 | #define CEPH_I_NODELAY 4 /* do not delay cap release */ | ||
| 377 | #define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ | ||
| 378 | #define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ | ||
| 379 | |||
| 394 | static inline void ceph_i_clear(struct inode *inode, unsigned mask) | 380 | static inline void ceph_i_clear(struct inode *inode, unsigned mask) |
| 395 | { | 381 | { |
| 396 | struct ceph_inode_info *ci = ceph_inode(inode); | 382 | struct ceph_inode_info *ci = ceph_inode(inode); |
| @@ -414,8 +400,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask) | |||
| 414 | struct ceph_inode_info *ci = ceph_inode(inode); | 400 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 415 | bool r; | 401 | bool r; |
| 416 | 402 | ||
| 417 | smp_mb(); | 403 | spin_lock(&inode->i_lock); |
| 418 | r = (ci->i_ceph_flags & mask) == mask; | 404 | r = (ci->i_ceph_flags & mask) == mask; |
| 405 | spin_unlock(&inode->i_lock); | ||
| 419 | return r; | 406 | return r; |
| 420 | } | 407 | } |
| 421 | 408 | ||
| @@ -432,20 +419,6 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, | |||
| 432 | struct ceph_inode_frag *pfrag, | 419 | struct ceph_inode_frag *pfrag, |
| 433 | int *found); | 420 | int *found); |
| 434 | 421 | ||
| 435 | /* | ||
| 436 | * Ceph dentry state | ||
| 437 | */ | ||
| 438 | struct ceph_dentry_info { | ||
| 439 | struct ceph_mds_session *lease_session; | ||
| 440 | u32 lease_gen, lease_shared_gen; | ||
| 441 | u32 lease_seq; | ||
| 442 | unsigned long lease_renew_after, lease_renew_from; | ||
| 443 | struct list_head lru; | ||
| 444 | struct dentry *dentry; | ||
| 445 | u64 time; | ||
| 446 | u64 offset; | ||
| 447 | }; | ||
| 448 | |||
| 449 | static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry) | 422 | static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry) |
| 450 | { | 423 | { |
| 451 | return (struct ceph_dentry_info *)dentry->d_fsdata; | 424 | return (struct ceph_dentry_info *)dentry->d_fsdata; |
| @@ -456,22 +429,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off) | |||
| 456 | return ((loff_t)frag << 32) | (loff_t)off; | 429 | return ((loff_t)frag << 32) | (loff_t)off; |
| 457 | } | 430 | } |
| 458 | 431 | ||
| 459 | /* | ||
| 460 | * ino_t is <64 bits on many architectures, blech. | ||
| 461 | * | ||
| 462 | * don't include snap in ino hash, at least for now. | ||
| 463 | */ | ||
| 464 | static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) | ||
| 465 | { | ||
| 466 | ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */ | ||
| 467 | #if BITS_PER_LONG == 32 | ||
| 468 | ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8; | ||
| 469 | if (!ino) | ||
| 470 | ino = 1; | ||
| 471 | #endif | ||
| 472 | return ino; | ||
| 473 | } | ||
| 474 | |||
| 475 | static inline int ceph_set_ino_cb(struct inode *inode, void *data) | 432 | static inline int ceph_set_ino_cb(struct inode *inode, void *data) |
| 476 | { | 433 | { |
| 477 | ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; | 434 | ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; |
| @@ -479,39 +436,6 @@ static inline int ceph_set_ino_cb(struct inode *inode, void *data) | |||
| 479 | return 0; | 436 | return 0; |
| 480 | } | 437 | } |
| 481 | 438 | ||
| 482 | static inline struct ceph_vino ceph_vino(struct inode *inode) | ||
| 483 | { | ||
| 484 | return ceph_inode(inode)->i_vino; | ||
| 485 | } | ||
| 486 | |||
| 487 | /* for printf-style formatting */ | ||
| 488 | #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap | ||
| 489 | |||
| 490 | static inline u64 ceph_ino(struct inode *inode) | ||
| 491 | { | ||
| 492 | return ceph_inode(inode)->i_vino.ino; | ||
| 493 | } | ||
| 494 | static inline u64 ceph_snap(struct inode *inode) | ||
| 495 | { | ||
| 496 | return ceph_inode(inode)->i_vino.snap; | ||
| 497 | } | ||
| 498 | |||
| 499 | static inline int ceph_ino_compare(struct inode *inode, void *data) | ||
| 500 | { | ||
| 501 | struct ceph_vino *pvino = (struct ceph_vino *)data; | ||
| 502 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 503 | return ci->i_vino.ino == pvino->ino && | ||
| 504 | ci->i_vino.snap == pvino->snap; | ||
| 505 | } | ||
| 506 | |||
| 507 | static inline struct inode *ceph_find_inode(struct super_block *sb, | ||
| 508 | struct ceph_vino vino) | ||
| 509 | { | ||
| 510 | ino_t t = ceph_vino_to_ino(vino); | ||
| 511 | return ilookup5(sb, t, ceph_ino_compare, &vino); | ||
| 512 | } | ||
| 513 | |||
| 514 | |||
| 515 | /* | 439 | /* |
| 516 | * caps helpers | 440 | * caps helpers |
| 517 | */ | 441 | */ |
| @@ -576,18 +500,18 @@ extern int ceph_reserve_caps(struct ceph_mds_client *mdsc, | |||
| 576 | struct ceph_cap_reservation *ctx, int need); | 500 | struct ceph_cap_reservation *ctx, int need); |
| 577 | extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc, | 501 | extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc, |
| 578 | struct ceph_cap_reservation *ctx); | 502 | struct ceph_cap_reservation *ctx); |
| 579 | extern void ceph_reservation_status(struct ceph_client *client, | 503 | extern void ceph_reservation_status(struct ceph_fs_client *client, |
| 580 | int *total, int *avail, int *used, | 504 | int *total, int *avail, int *used, |
| 581 | int *reserved, int *min); | 505 | int *reserved, int *min); |
| 582 | 506 | ||
| 583 | static inline struct ceph_client *ceph_inode_to_client(struct inode *inode) | 507 | static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode) |
| 584 | { | 508 | { |
| 585 | return (struct ceph_client *)inode->i_sb->s_fs_info; | 509 | return (struct ceph_fs_client *)inode->i_sb->s_fs_info; |
| 586 | } | 510 | } |
| 587 | 511 | ||
| 588 | static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb) | 512 | static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb) |
| 589 | { | 513 | { |
| 590 | return (struct ceph_client *)sb->s_fs_info; | 514 | return (struct ceph_fs_client *)sb->s_fs_info; |
| 591 | } | 515 | } |
| 592 | 516 | ||
| 593 | 517 | ||
| @@ -617,51 +541,6 @@ struct ceph_file_info { | |||
| 617 | 541 | ||
| 618 | 542 | ||
| 619 | /* | 543 | /* |
| 620 | * snapshots | ||
| 621 | */ | ||
| 622 | |||
| 623 | /* | ||
| 624 | * A "snap context" is the set of existing snapshots when we | ||
| 625 | * write data. It is used by the OSD to guide its COW behavior. | ||
| 626 | * | ||
| 627 | * The ceph_snap_context is refcounted, and attached to each dirty | ||
| 628 | * page, indicating which context the dirty data belonged when it was | ||
| 629 | * dirtied. | ||
| 630 | */ | ||
| 631 | struct ceph_snap_context { | ||
| 632 | atomic_t nref; | ||
| 633 | u64 seq; | ||
| 634 | int num_snaps; | ||
| 635 | u64 snaps[]; | ||
| 636 | }; | ||
| 637 | |||
| 638 | static inline struct ceph_snap_context * | ||
| 639 | ceph_get_snap_context(struct ceph_snap_context *sc) | ||
| 640 | { | ||
| 641 | /* | ||
| 642 | printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
| 643 | atomic_read(&sc->nref)+1); | ||
| 644 | */ | ||
| 645 | if (sc) | ||
| 646 | atomic_inc(&sc->nref); | ||
| 647 | return sc; | ||
| 648 | } | ||
| 649 | |||
| 650 | static inline void ceph_put_snap_context(struct ceph_snap_context *sc) | ||
| 651 | { | ||
| 652 | if (!sc) | ||
| 653 | return; | ||
| 654 | /* | ||
| 655 | printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
| 656 | atomic_read(&sc->nref)-1); | ||
| 657 | */ | ||
| 658 | if (atomic_dec_and_test(&sc->nref)) { | ||
| 659 | /*printk(" deleting snap_context %p\n", sc);*/ | ||
| 660 | kfree(sc); | ||
| 661 | } | ||
| 662 | } | ||
| 663 | |||
| 664 | /* | ||
| 665 | * A "snap realm" describes a subset of the file hierarchy sharing | 544 | * A "snap realm" describes a subset of the file hierarchy sharing |
| 666 | * the same set of snapshots that apply to it. The realms themselves | 545 | * the same set of snapshots that apply to it. The realms themselves |
| 667 | * are organized into a hierarchy, such that children inherit (some of) | 546 | * are organized into a hierarchy, such that children inherit (some of) |
| @@ -699,16 +578,33 @@ struct ceph_snap_realm { | |||
| 699 | spinlock_t inodes_with_caps_lock; | 578 | spinlock_t inodes_with_caps_lock; |
| 700 | }; | 579 | }; |
| 701 | 580 | ||
| 702 | 581 | static inline int default_congestion_kb(void) | |
| 703 | |||
| 704 | /* | ||
| 705 | * calculate the number of pages a given length and offset map onto, | ||
| 706 | * if we align the data. | ||
| 707 | */ | ||
| 708 | static inline int calc_pages_for(u64 off, u64 len) | ||
| 709 | { | 582 | { |
| 710 | return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - | 583 | int congestion_kb; |
| 711 | (off >> PAGE_CACHE_SHIFT); | 584 | |
| 585 | /* | ||
| 586 | * Copied from NFS | ||
| 587 | * | ||
| 588 | * congestion size, scale with available memory. | ||
| 589 | * | ||
| 590 | * 64MB: 8192k | ||
| 591 | * 128MB: 11585k | ||
| 592 | * 256MB: 16384k | ||
| 593 | * 512MB: 23170k | ||
| 594 | * 1GB: 32768k | ||
| 595 | * 2GB: 46340k | ||
| 596 | * 4GB: 65536k | ||
| 597 | * 8GB: 92681k | ||
| 598 | * 16GB: 131072k | ||
| 599 | * | ||
| 600 | * This allows larger machines to have larger/more transfers. | ||
| 601 | * Limit the default to 256M | ||
| 602 | */ | ||
| 603 | congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); | ||
| 604 | if (congestion_kb > 256*1024) | ||
| 605 | congestion_kb = 256*1024; | ||
| 606 | |||
| 607 | return congestion_kb; | ||
| 712 | } | 608 | } |
| 713 | 609 | ||
| 714 | 610 | ||
| @@ -741,16 +637,6 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci) | |||
| 741 | ci_item)->writing; | 637 | ci_item)->writing; |
| 742 | } | 638 | } |
| 743 | 639 | ||
| 744 | |||
| 745 | /* super.c */ | ||
| 746 | extern struct kmem_cache *ceph_inode_cachep; | ||
| 747 | extern struct kmem_cache *ceph_cap_cachep; | ||
| 748 | extern struct kmem_cache *ceph_dentry_cachep; | ||
| 749 | extern struct kmem_cache *ceph_file_cachep; | ||
| 750 | |||
| 751 | extern const char *ceph_msg_type_name(int type); | ||
| 752 | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | ||
| 753 | |||
| 754 | /* inode.c */ | 640 | /* inode.c */ |
| 755 | extern const struct inode_operations ceph_file_iops; | 641 | extern const struct inode_operations ceph_file_iops; |
| 756 | 642 | ||
| @@ -857,12 +743,18 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); | |||
| 857 | /* file.c */ | 743 | /* file.c */ |
| 858 | extern const struct file_operations ceph_file_fops; | 744 | extern const struct file_operations ceph_file_fops; |
| 859 | extern const struct address_space_operations ceph_aops; | 745 | extern const struct address_space_operations ceph_aops; |
| 746 | extern int ceph_copy_to_page_vector(struct page **pages, | ||
| 747 | const char *data, | ||
| 748 | loff_t off, size_t len); | ||
| 749 | extern int ceph_copy_from_page_vector(struct page **pages, | ||
| 750 | char *data, | ||
| 751 | loff_t off, size_t len); | ||
| 752 | extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); | ||
| 860 | extern int ceph_open(struct inode *inode, struct file *file); | 753 | extern int ceph_open(struct inode *inode, struct file *file); |
| 861 | extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, | 754 | extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, |
| 862 | struct nameidata *nd, int mode, | 755 | struct nameidata *nd, int mode, |
| 863 | int locked_dir); | 756 | int locked_dir); |
| 864 | extern int ceph_release(struct inode *inode, struct file *filp); | 757 | extern int ceph_release(struct inode *inode, struct file *filp); |
| 865 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
| 866 | 758 | ||
| 867 | /* dir.c */ | 759 | /* dir.c */ |
| 868 | extern const struct file_operations ceph_dir_fops; | 760 | extern const struct file_operations ceph_dir_fops; |
| @@ -892,12 +784,6 @@ extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | |||
| 892 | /* export.c */ | 784 | /* export.c */ |
| 893 | extern const struct export_operations ceph_export_ops; | 785 | extern const struct export_operations ceph_export_ops; |
| 894 | 786 | ||
| 895 | /* debugfs.c */ | ||
| 896 | extern int ceph_debugfs_init(void); | ||
| 897 | extern void ceph_debugfs_cleanup(void); | ||
| 898 | extern int ceph_debugfs_client_init(struct ceph_client *client); | ||
| 899 | extern void ceph_debugfs_client_cleanup(struct ceph_client *client); | ||
| 900 | |||
| 901 | /* locks.c */ | 787 | /* locks.c */ |
| 902 | extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); | 788 | extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); |
| 903 | extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); | 789 | extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); |
| @@ -914,4 +800,8 @@ static inline struct inode *get_dentry_parent_inode(struct dentry *dentry) | |||
| 914 | return NULL; | 800 | return NULL; |
| 915 | } | 801 | } |
| 916 | 802 | ||
| 803 | /* debugfs.c */ | ||
| 804 | extern int ceph_fs_debugfs_init(struct ceph_fs_client *client); | ||
| 805 | extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client); | ||
| 806 | |||
| 917 | #endif /* _FS_CEPH_SUPER_H */ | 807 | #endif /* _FS_CEPH_SUPER_H */ |
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 9578af610b73..6e12a6ba5f79 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | |||
| 2 | #include "super.h" | 3 | #include "super.h" |
| 3 | #include "decode.h" | 4 | #include "mds_client.h" |
| 5 | |||
| 6 | #include <linux/ceph/decode.h> | ||
| 4 | 7 | ||
| 5 | #include <linux/xattr.h> | 8 | #include <linux/xattr.h> |
| 6 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
| @@ -620,12 +623,12 @@ out: | |||
| 620 | static int ceph_sync_setxattr(struct dentry *dentry, const char *name, | 623 | static int ceph_sync_setxattr(struct dentry *dentry, const char *name, |
| 621 | const char *value, size_t size, int flags) | 624 | const char *value, size_t size, int flags) |
| 622 | { | 625 | { |
| 623 | struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); | 626 | struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); |
| 624 | struct inode *inode = dentry->d_inode; | 627 | struct inode *inode = dentry->d_inode; |
| 625 | struct ceph_inode_info *ci = ceph_inode(inode); | 628 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 626 | struct inode *parent_inode = dentry->d_parent->d_inode; | 629 | struct inode *parent_inode = dentry->d_parent->d_inode; |
| 627 | struct ceph_mds_request *req; | 630 | struct ceph_mds_request *req; |
| 628 | struct ceph_mds_client *mdsc = &client->mdsc; | 631 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 629 | int err; | 632 | int err; |
| 630 | int i, nr_pages; | 633 | int i, nr_pages; |
| 631 | struct page **pages = NULL; | 634 | struct page **pages = NULL; |
| @@ -713,10 +716,9 @@ int ceph_setxattr(struct dentry *dentry, const char *name, | |||
| 713 | 716 | ||
| 714 | /* preallocate memory for xattr name, value, index node */ | 717 | /* preallocate memory for xattr name, value, index node */ |
| 715 | err = -ENOMEM; | 718 | err = -ENOMEM; |
| 716 | newname = kmalloc(name_len + 1, GFP_NOFS); | 719 | newname = kmemdup(name, name_len + 1, GFP_NOFS); |
| 717 | if (!newname) | 720 | if (!newname) |
| 718 | goto out; | 721 | goto out; |
| 719 | memcpy(newname, name, name_len + 1); | ||
| 720 | 722 | ||
| 721 | if (val_len) { | 723 | if (val_len) { |
| 722 | newval = kmalloc(val_len + 1, GFP_NOFS); | 724 | newval = kmalloc(val_len + 1, GFP_NOFS); |
| @@ -777,8 +779,8 @@ out: | |||
| 777 | 779 | ||
| 778 | static int ceph_send_removexattr(struct dentry *dentry, const char *name) | 780 | static int ceph_send_removexattr(struct dentry *dentry, const char *name) |
| 779 | { | 781 | { |
| 780 | struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); | 782 | struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); |
| 781 | struct ceph_mds_client *mdsc = &client->mdsc; | 783 | struct ceph_mds_client *mdsc = fsc->mdsc; |
| 782 | struct inode *inode = dentry->d_inode; | 784 | struct inode *inode = dentry->d_inode; |
| 783 | struct inode *parent_inode = dentry->d_parent->d_inode; | 785 | struct inode *parent_inode = dentry->d_parent->d_inode; |
| 784 | struct ceph_mds_request *req; | 786 | struct ceph_mds_request *req; |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index c65c3419dd37..7e83b356cc9e 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -232,7 +232,7 @@ static int | |||
| 232 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 232 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, |
| 233 | void **request_buf) | 233 | void **request_buf) |
| 234 | { | 234 | { |
| 235 | int rc = 0; | 235 | int rc; |
| 236 | 236 | ||
| 237 | rc = cifs_reconnect_tcon(tcon, smb_command); | 237 | rc = cifs_reconnect_tcon(tcon, smb_command); |
| 238 | if (rc) | 238 | if (rc) |
| @@ -250,7 +250,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
| 250 | if (tcon != NULL) | 250 | if (tcon != NULL) |
| 251 | cifs_stats_inc(&tcon->num_smbs_sent); | 251 | cifs_stats_inc(&tcon->num_smbs_sent); |
| 252 | 252 | ||
| 253 | return rc; | 253 | return 0; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | int | 256 | int |
| @@ -281,16 +281,9 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
| 281 | 281 | ||
| 282 | /* If the return code is zero, this function must fill in request_buf pointer */ | 282 | /* If the return code is zero, this function must fill in request_buf pointer */ |
| 283 | static int | 283 | static int |
| 284 | smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 284 | __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, |
| 285 | void **request_buf /* returned */ , | 285 | void **request_buf, void **response_buf) |
| 286 | void **response_buf /* returned */ ) | ||
| 287 | { | 286 | { |
| 288 | int rc = 0; | ||
| 289 | |||
| 290 | rc = cifs_reconnect_tcon(tcon, smb_command); | ||
| 291 | if (rc) | ||
| 292 | return rc; | ||
| 293 | |||
| 294 | *request_buf = cifs_buf_get(); | 287 | *request_buf = cifs_buf_get(); |
| 295 | if (*request_buf == NULL) { | 288 | if (*request_buf == NULL) { |
| 296 | /* BB should we add a retry in here if not a writepage? */ | 289 | /* BB should we add a retry in here if not a writepage? */ |
| @@ -309,7 +302,31 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
| 309 | if (tcon != NULL) | 302 | if (tcon != NULL) |
| 310 | cifs_stats_inc(&tcon->num_smbs_sent); | 303 | cifs_stats_inc(&tcon->num_smbs_sent); |
| 311 | 304 | ||
| 312 | return rc; | 305 | return 0; |
| 306 | } | ||
| 307 | |||
| 308 | /* If the return code is zero, this function must fill in request_buf pointer */ | ||
| 309 | static int | ||
| 310 | smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | ||
| 311 | void **request_buf, void **response_buf) | ||
| 312 | { | ||
| 313 | int rc; | ||
| 314 | |||
| 315 | rc = cifs_reconnect_tcon(tcon, smb_command); | ||
| 316 | if (rc) | ||
| 317 | return rc; | ||
| 318 | |||
| 319 | return __smb_init(smb_command, wct, tcon, request_buf, response_buf); | ||
| 320 | } | ||
| 321 | |||
| 322 | static int | ||
| 323 | smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, | ||
| 324 | void **request_buf, void **response_buf) | ||
| 325 | { | ||
| 326 | if (tcon->ses->need_reconnect || tcon->need_reconnect) | ||
| 327 | return -EHOSTDOWN; | ||
| 328 | |||
| 329 | return __smb_init(smb_command, wct, tcon, request_buf, response_buf); | ||
| 313 | } | 330 | } |
| 314 | 331 | ||
| 315 | static int validate_t2(struct smb_t2_rsp *pSMB) | 332 | static int validate_t2(struct smb_t2_rsp *pSMB) |
| @@ -4534,8 +4551,8 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon) | |||
| 4534 | 4551 | ||
| 4535 | cFYI(1, "In QFSUnixInfo"); | 4552 | cFYI(1, "In QFSUnixInfo"); |
| 4536 | QFSUnixRetry: | 4553 | QFSUnixRetry: |
| 4537 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4554 | rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, |
| 4538 | (void **) &pSMBr); | 4555 | (void **) &pSMB, (void **) &pSMBr); |
| 4539 | if (rc) | 4556 | if (rc) |
| 4540 | return rc; | 4557 | return rc; |
| 4541 | 4558 | ||
| @@ -4604,8 +4621,8 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap) | |||
| 4604 | cFYI(1, "In SETFSUnixInfo"); | 4621 | cFYI(1, "In SETFSUnixInfo"); |
| 4605 | SETFSUnixRetry: | 4622 | SETFSUnixRetry: |
| 4606 | /* BB switch to small buf init to save memory */ | 4623 | /* BB switch to small buf init to save memory */ |
| 4607 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4624 | rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, |
| 4608 | (void **) &pSMBr); | 4625 | (void **) &pSMB, (void **) &pSMBr); |
| 4609 | if (rc) | 4626 | if (rc) |
| 4610 | return rc; | 4627 | return rc; |
| 4611 | 4628 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 93f77d438d3c..53cce8cc2224 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
| @@ -801,6 +801,8 @@ retry_iget5_locked: | |||
| 801 | inode->i_flags |= S_NOATIME | S_NOCMTIME; | 801 | inode->i_flags |= S_NOATIME | S_NOCMTIME; |
| 802 | if (inode->i_state & I_NEW) { | 802 | if (inode->i_state & I_NEW) { |
| 803 | inode->i_ino = hash; | 803 | inode->i_ino = hash; |
| 804 | if (S_ISREG(inode->i_mode)) | ||
| 805 | inode->i_data.backing_dev_info = sb->s_bdi; | ||
| 804 | #ifdef CONFIG_CIFS_FSCACHE | 806 | #ifdef CONFIG_CIFS_FSCACHE |
| 805 | /* initialize per-inode cache cookie pointer */ | 807 | /* initialize per-inode cache cookie pointer */ |
| 806 | CIFS_I(inode)->fscache = NULL; | 808 | CIFS_I(inode)->fscache = NULL; |
| @@ -2014,3 +2014,43 @@ fail_creds: | |||
| 2014 | fail: | 2014 | fail: |
| 2015 | return; | 2015 | return; |
| 2016 | } | 2016 | } |
| 2017 | |||
| 2018 | /* | ||
| 2019 | * Core dumping helper functions. These are the only things you should | ||
| 2020 | * do on a core-file: use only these functions to write out all the | ||
| 2021 | * necessary info. | ||
| 2022 | */ | ||
| 2023 | int dump_write(struct file *file, const void *addr, int nr) | ||
| 2024 | { | ||
| 2025 | return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; | ||
| 2026 | } | ||
| 2027 | EXPORT_SYMBOL(dump_write); | ||
| 2028 | |||
| 2029 | int dump_seek(struct file *file, loff_t off) | ||
| 2030 | { | ||
| 2031 | int ret = 1; | ||
| 2032 | |||
| 2033 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
| 2034 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | ||
| 2035 | return 0; | ||
| 2036 | } else { | ||
| 2037 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); | ||
| 2038 | |||
| 2039 | if (!buf) | ||
| 2040 | return 0; | ||
| 2041 | while (off > 0) { | ||
| 2042 | unsigned long n = off; | ||
| 2043 | |||
| 2044 | if (n > PAGE_SIZE) | ||
| 2045 | n = PAGE_SIZE; | ||
| 2046 | if (!dump_write(file, buf, n)) { | ||
| 2047 | ret = 0; | ||
| 2048 | break; | ||
| 2049 | } | ||
| 2050 | off -= n; | ||
| 2051 | } | ||
| 2052 | free_page((unsigned long)buf); | ||
| 2053 | } | ||
| 2054 | return ret; | ||
| 2055 | } | ||
| 2056 | EXPORT_SYMBOL(dump_seek); | ||
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index eb7368ebd8cd..3eadd97324b1 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
| @@ -54,6 +54,9 @@ struct page_collect { | |||
| 54 | unsigned nr_pages; | 54 | unsigned nr_pages; |
| 55 | unsigned long length; | 55 | unsigned long length; |
| 56 | loff_t pg_first; /* keep 64bit also in 32-arches */ | 56 | loff_t pg_first; /* keep 64bit also in 32-arches */ |
| 57 | bool read_4_write; /* This means two things: that the read is sync | ||
| 58 | * And the pages should not be unlocked. | ||
| 59 | */ | ||
| 57 | }; | 60 | }; |
| 58 | 61 | ||
| 59 | static void _pcol_init(struct page_collect *pcol, unsigned expected_pages, | 62 | static void _pcol_init(struct page_collect *pcol, unsigned expected_pages, |
| @@ -71,6 +74,7 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages, | |||
| 71 | pcol->nr_pages = 0; | 74 | pcol->nr_pages = 0; |
| 72 | pcol->length = 0; | 75 | pcol->length = 0; |
| 73 | pcol->pg_first = -1; | 76 | pcol->pg_first = -1; |
| 77 | pcol->read_4_write = false; | ||
| 74 | } | 78 | } |
| 75 | 79 | ||
| 76 | static void _pcol_reset(struct page_collect *pcol) | 80 | static void _pcol_reset(struct page_collect *pcol) |
| @@ -347,7 +351,8 @@ static int readpage_strip(void *data, struct page *page) | |||
| 347 | if (PageError(page)) | 351 | if (PageError(page)) |
| 348 | ClearPageError(page); | 352 | ClearPageError(page); |
| 349 | 353 | ||
| 350 | unlock_page(page); | 354 | if (!pcol->read_4_write) |
| 355 | unlock_page(page); | ||
| 351 | EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page," | 356 | EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page," |
| 352 | " splitting\n", inode->i_ino, page->index); | 357 | " splitting\n", inode->i_ino, page->index); |
| 353 | 358 | ||
| @@ -428,6 +433,7 @@ static int _readpage(struct page *page, bool is_sync) | |||
| 428 | /* readpage_strip might call read_exec(,is_sync==false) at several | 433 | /* readpage_strip might call read_exec(,is_sync==false) at several |
| 429 | * places but not if we have a single page. | 434 | * places but not if we have a single page. |
| 430 | */ | 435 | */ |
| 436 | pcol.read_4_write = is_sync; | ||
| 431 | ret = readpage_strip(&pcol, page); | 437 | ret = readpage_strip(&pcol, page); |
| 432 | if (ret) { | 438 | if (ret) { |
| 433 | EXOFS_ERR("_readpage => %d\n", ret); | 439 | EXOFS_ERR("_readpage => %d\n", ret); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5581122bd2c0..ab38fef1c9a1 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -72,22 +72,11 @@ int writeback_in_progress(struct backing_dev_info *bdi) | |||
| 72 | static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) | 72 | static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) |
| 73 | { | 73 | { |
| 74 | struct super_block *sb = inode->i_sb; | 74 | struct super_block *sb = inode->i_sb; |
| 75 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | ||
| 76 | 75 | ||
| 77 | /* | 76 | if (strcmp(sb->s_type->name, "bdev") == 0) |
| 78 | * For inodes on standard filesystems, we use superblock's bdi. For | 77 | return inode->i_mapping->backing_dev_info; |
| 79 | * inodes on virtual filesystems, we want to use inode mapping's bdi | 78 | |
| 80 | * because they can possibly point to something useful (think about | 79 | return sb->s_bdi; |
| 81 | * block_dev filesystem). | ||
| 82 | */ | ||
| 83 | if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) { | ||
| 84 | /* Some device inodes could play dirty tricks. Catch them... */ | ||
| 85 | WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi), | ||
| 86 | "Dirtiable inode bdi %s != sb bdi %s\n", | ||
| 87 | bdi->name, sb->s_bdi->name); | ||
| 88 | return sb->s_bdi; | ||
| 89 | } | ||
| 90 | return bdi; | ||
| 91 | } | 80 | } |
| 92 | 81 | ||
| 93 | static void bdi_queue_work(struct backing_dev_info *bdi, | 82 | static void bdi_queue_work(struct backing_dev_info *bdi, |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index d367af1514ef..cde755cca564 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -1354,7 +1354,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |||
| 1354 | loff_t file_size; | 1354 | loff_t file_size; |
| 1355 | unsigned int num; | 1355 | unsigned int num; |
| 1356 | unsigned int offset; | 1356 | unsigned int offset; |
| 1357 | size_t total_len; | 1357 | size_t total_len = 0; |
| 1358 | 1358 | ||
| 1359 | req = fuse_get_req(fc); | 1359 | req = fuse_get_req(fc); |
| 1360 | if (IS_ERR(req)) | 1360 | if (IS_ERR(req)) |
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index cc9665522148..c465ae066c62 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config GFS2_FS | 1 | config GFS2_FS |
| 2 | tristate "GFS2 file system support" | 2 | tristate "GFS2 file system support" |
| 3 | depends on EXPERIMENTAL && (64BIT || LBDAF) | 3 | depends on (64BIT || LBDAF) |
| 4 | select DLM if GFS2_FS_LOCKING_DLM | 4 | select DLM if GFS2_FS_LOCKING_DLM |
| 5 | select CONFIGFS_FS if GFS2_FS_LOCKING_DLM | 5 | select CONFIGFS_FS if GFS2_FS_LOCKING_DLM |
| 6 | select SYSFS if GFS2_FS_LOCKING_DLM | 6 | select SYSFS if GFS2_FS_LOCKING_DLM |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 194fe16d8418..6b24afb96aae 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
| @@ -36,8 +36,8 @@ | |||
| 36 | #include "glops.h" | 36 | #include "glops.h" |
| 37 | 37 | ||
| 38 | 38 | ||
| 39 | static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, | 39 | void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, |
| 40 | unsigned int from, unsigned int to) | 40 | unsigned int from, unsigned int to) |
| 41 | { | 41 | { |
| 42 | struct buffer_head *head = page_buffers(page); | 42 | struct buffer_head *head = page_buffers(page); |
| 43 | unsigned int bsize = head->b_size; | 43 | unsigned int bsize = head->b_size; |
| @@ -615,7 +615,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, | |||
| 615 | unsigned int data_blocks = 0, ind_blocks = 0, rblocks; | 615 | unsigned int data_blocks = 0, ind_blocks = 0, rblocks; |
| 616 | int alloc_required; | 616 | int alloc_required; |
| 617 | int error = 0; | 617 | int error = 0; |
| 618 | struct gfs2_alloc *al; | 618 | struct gfs2_alloc *al = NULL; |
| 619 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 619 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
| 620 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | 620 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); |
| 621 | unsigned to = from + len; | 621 | unsigned to = from + len; |
| @@ -663,6 +663,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, | |||
| 663 | rblocks += RES_STATFS + RES_QUOTA; | 663 | rblocks += RES_STATFS + RES_QUOTA; |
| 664 | if (&ip->i_inode == sdp->sd_rindex) | 664 | if (&ip->i_inode == sdp->sd_rindex) |
| 665 | rblocks += 2 * RES_STATFS; | 665 | rblocks += 2 * RES_STATFS; |
| 666 | if (alloc_required) | ||
| 667 | rblocks += gfs2_rg_blocks(al); | ||
| 666 | 668 | ||
| 667 | error = gfs2_trans_begin(sdp, rblocks, | 669 | error = gfs2_trans_begin(sdp, rblocks, |
| 668 | PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); | 670 | PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); |
| @@ -696,13 +698,11 @@ out: | |||
| 696 | 698 | ||
| 697 | page_cache_release(page); | 699 | page_cache_release(page); |
| 698 | 700 | ||
| 699 | /* | 701 | gfs2_trans_end(sdp); |
| 700 | * XXX(truncate): the call below should probably be replaced with | ||
| 701 | * a call to the gfs2-specific truncate blocks helper to actually | ||
| 702 | * release disk blocks.. | ||
| 703 | */ | ||
| 704 | if (pos + len > ip->i_inode.i_size) | 702 | if (pos + len > ip->i_inode.i_size) |
| 705 | truncate_setsize(&ip->i_inode, ip->i_inode.i_size); | 703 | gfs2_trim_blocks(&ip->i_inode); |
| 704 | goto out_trans_fail; | ||
| 705 | |||
| 706 | out_endtrans: | 706 | out_endtrans: |
| 707 | gfs2_trans_end(sdp); | 707 | gfs2_trans_end(sdp); |
| 708 | out_trans_fail: | 708 | out_trans_fail: |
| @@ -802,10 +802,8 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, | |||
| 802 | page_cache_release(page); | 802 | page_cache_release(page); |
| 803 | 803 | ||
| 804 | if (copied) { | 804 | if (copied) { |
| 805 | if (inode->i_size < to) { | 805 | if (inode->i_size < to) |
| 806 | i_size_write(inode, to); | 806 | i_size_write(inode, to); |
| 807 | ip->i_disksize = inode->i_size; | ||
| 808 | } | ||
| 809 | gfs2_dinode_out(ip, di); | 807 | gfs2_dinode_out(ip, di); |
| 810 | mark_inode_dirty(inode); | 808 | mark_inode_dirty(inode); |
| 811 | } | 809 | } |
| @@ -876,8 +874,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, | |||
| 876 | 874 | ||
| 877 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); | 875 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); |
| 878 | if (ret > 0) { | 876 | if (ret > 0) { |
| 879 | if (inode->i_size > ip->i_disksize) | ||
| 880 | ip->i_disksize = inode->i_size; | ||
| 881 | gfs2_dinode_out(ip, dibh->b_data); | 877 | gfs2_dinode_out(ip, dibh->b_data); |
| 882 | mark_inode_dirty(inode); | 878 | mark_inode_dirty(inode); |
| 883 | } | 879 | } |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 6f482809d1a3..5476c066d4ee 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
| @@ -50,7 +50,7 @@ struct strip_mine { | |||
| 50 | * @ip: the inode | 50 | * @ip: the inode |
| 51 | * @dibh: the dinode buffer | 51 | * @dibh: the dinode buffer |
| 52 | * @block: the block number that was allocated | 52 | * @block: the block number that was allocated |
| 53 | * @private: any locked page held by the caller process | 53 | * @page: The (optional) page. This is looked up if @page is NULL |
| 54 | * | 54 | * |
| 55 | * Returns: errno | 55 | * Returns: errno |
| 56 | */ | 56 | */ |
| @@ -109,8 +109,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 109 | /** | 109 | /** |
| 110 | * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big | 110 | * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big |
| 111 | * @ip: The GFS2 inode to unstuff | 111 | * @ip: The GFS2 inode to unstuff |
| 112 | * @unstuffer: the routine that handles unstuffing a non-zero length file | 112 | * @page: The (optional) page. This is looked up if the @page is NULL |
| 113 | * @private: private data for the unstuffer | ||
| 114 | * | 113 | * |
| 115 | * This routine unstuffs a dinode and returns it to a "normal" state such | 114 | * This routine unstuffs a dinode and returns it to a "normal" state such |
| 116 | * that the height can be grown in the traditional way. | 115 | * that the height can be grown in the traditional way. |
| @@ -132,7 +131,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) | |||
| 132 | if (error) | 131 | if (error) |
| 133 | goto out; | 132 | goto out; |
| 134 | 133 | ||
| 135 | if (ip->i_disksize) { | 134 | if (i_size_read(&ip->i_inode)) { |
| 136 | /* Get a free block, fill it with the stuffed data, | 135 | /* Get a free block, fill it with the stuffed data, |
| 137 | and write it out to disk */ | 136 | and write it out to disk */ |
| 138 | 137 | ||
| @@ -161,7 +160,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) | |||
| 161 | di = (struct gfs2_dinode *)dibh->b_data; | 160 | di = (struct gfs2_dinode *)dibh->b_data; |
| 162 | gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); | 161 | gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); |
| 163 | 162 | ||
| 164 | if (ip->i_disksize) { | 163 | if (i_size_read(&ip->i_inode)) { |
| 165 | *(__be64 *)(di + 1) = cpu_to_be64(block); | 164 | *(__be64 *)(di + 1) = cpu_to_be64(block); |
| 166 | gfs2_add_inode_blocks(&ip->i_inode, 1); | 165 | gfs2_add_inode_blocks(&ip->i_inode, 1); |
| 167 | di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); | 166 | di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); |
| @@ -885,83 +884,14 @@ out: | |||
| 885 | } | 884 | } |
| 886 | 885 | ||
| 887 | /** | 886 | /** |
| 888 | * do_grow - Make a file look bigger than it is | ||
| 889 | * @ip: the inode | ||
| 890 | * @size: the size to set the file to | ||
| 891 | * | ||
| 892 | * Called with an exclusive lock on @ip. | ||
| 893 | * | ||
| 894 | * Returns: errno | ||
| 895 | */ | ||
| 896 | |||
| 897 | static int do_grow(struct gfs2_inode *ip, u64 size) | ||
| 898 | { | ||
| 899 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
| 900 | struct gfs2_alloc *al; | ||
| 901 | struct buffer_head *dibh; | ||
| 902 | int error; | ||
| 903 | |||
| 904 | al = gfs2_alloc_get(ip); | ||
| 905 | if (!al) | ||
| 906 | return -ENOMEM; | ||
| 907 | |||
| 908 | error = gfs2_quota_lock_check(ip); | ||
| 909 | if (error) | ||
| 910 | goto out; | ||
| 911 | |||
| 912 | al->al_requested = sdp->sd_max_height + RES_DATA; | ||
| 913 | |||
| 914 | error = gfs2_inplace_reserve(ip); | ||
| 915 | if (error) | ||
| 916 | goto out_gunlock_q; | ||
| 917 | |||
| 918 | error = gfs2_trans_begin(sdp, | ||
| 919 | sdp->sd_max_height + al->al_rgd->rd_length + | ||
| 920 | RES_JDATA + RES_DINODE + RES_STATFS + RES_QUOTA, 0); | ||
| 921 | if (error) | ||
| 922 | goto out_ipres; | ||
| 923 | |||
| 924 | error = gfs2_meta_inode_buffer(ip, &dibh); | ||
| 925 | if (error) | ||
| 926 | goto out_end_trans; | ||
| 927 | |||
| 928 | if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { | ||
| 929 | if (gfs2_is_stuffed(ip)) { | ||
| 930 | error = gfs2_unstuff_dinode(ip, NULL); | ||
| 931 | if (error) | ||
| 932 | goto out_brelse; | ||
| 933 | } | ||
| 934 | } | ||
| 935 | |||
| 936 | ip->i_disksize = size; | ||
| 937 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | ||
| 938 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
| 939 | gfs2_dinode_out(ip, dibh->b_data); | ||
| 940 | |||
| 941 | out_brelse: | ||
| 942 | brelse(dibh); | ||
| 943 | out_end_trans: | ||
| 944 | gfs2_trans_end(sdp); | ||
| 945 | out_ipres: | ||
| 946 | gfs2_inplace_release(ip); | ||
| 947 | out_gunlock_q: | ||
| 948 | gfs2_quota_unlock(ip); | ||
| 949 | out: | ||
| 950 | gfs2_alloc_put(ip); | ||
| 951 | return error; | ||
| 952 | } | ||
| 953 | |||
| 954 | |||
| 955 | /** | ||
| 956 | * gfs2_block_truncate_page - Deal with zeroing out data for truncate | 887 | * gfs2_block_truncate_page - Deal with zeroing out data for truncate |
| 957 | * | 888 | * |
| 958 | * This is partly borrowed from ext3. | 889 | * This is partly borrowed from ext3. |
| 959 | */ | 890 | */ |
| 960 | static int gfs2_block_truncate_page(struct address_space *mapping) | 891 | static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) |
| 961 | { | 892 | { |
| 962 | struct inode *inode = mapping->host; | 893 | struct inode *inode = mapping->host; |
| 963 | struct gfs2_inode *ip = GFS2_I(inode); | 894 | struct gfs2_inode *ip = GFS2_I(inode); |
| 964 | loff_t from = inode->i_size; | ||
| 965 | unsigned long index = from >> PAGE_CACHE_SHIFT; | 895 | unsigned long index = from >> PAGE_CACHE_SHIFT; |
| 966 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 896 | unsigned offset = from & (PAGE_CACHE_SIZE-1); |
| 967 | unsigned blocksize, iblock, length, pos; | 897 | unsigned blocksize, iblock, length, pos; |
| @@ -1023,9 +953,11 @@ unlock: | |||
| 1023 | return err; | 953 | return err; |
| 1024 | } | 954 | } |
| 1025 | 955 | ||
| 1026 | static int trunc_start(struct gfs2_inode *ip, u64 size) | 956 | static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize) |
| 1027 | { | 957 | { |
| 1028 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 958 | struct gfs2_inode *ip = GFS2_I(inode); |
| 959 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
| 960 | struct address_space *mapping = inode->i_mapping; | ||
| 1029 | struct buffer_head *dibh; | 961 | struct buffer_head *dibh; |
| 1030 | int journaled = gfs2_is_jdata(ip); | 962 | int journaled = gfs2_is_jdata(ip); |
| 1031 | int error; | 963 | int error; |
| @@ -1039,31 +971,26 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) | |||
| 1039 | if (error) | 971 | if (error) |
| 1040 | goto out; | 972 | goto out; |
| 1041 | 973 | ||
| 974 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
| 975 | |||
| 1042 | if (gfs2_is_stuffed(ip)) { | 976 | if (gfs2_is_stuffed(ip)) { |
| 1043 | u64 dsize = size + sizeof(struct gfs2_dinode); | 977 | gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); |
| 1044 | ip->i_disksize = size; | ||
| 1045 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | ||
| 1046 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
| 1047 | gfs2_dinode_out(ip, dibh->b_data); | ||
| 1048 | if (dsize > dibh->b_size) | ||
| 1049 | dsize = dibh->b_size; | ||
| 1050 | gfs2_buffer_clear_tail(dibh, dsize); | ||
| 1051 | error = 1; | ||
| 1052 | } else { | 978 | } else { |
| 1053 | if (size & (u64)(sdp->sd_sb.sb_bsize - 1)) | 979 | if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) { |
| 1054 | error = gfs2_block_truncate_page(ip->i_inode.i_mapping); | 980 | error = gfs2_block_truncate_page(mapping, newsize); |
| 1055 | 981 | if (error) | |
| 1056 | if (!error) { | 982 | goto out_brelse; |
| 1057 | ip->i_disksize = size; | ||
| 1058 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | ||
| 1059 | ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; | ||
| 1060 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
| 1061 | gfs2_dinode_out(ip, dibh->b_data); | ||
| 1062 | } | 983 | } |
| 984 | ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; | ||
| 1063 | } | 985 | } |
| 1064 | 986 | ||
| 1065 | brelse(dibh); | 987 | i_size_write(inode, newsize); |
| 988 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | ||
| 989 | gfs2_dinode_out(ip, dibh->b_data); | ||
| 1066 | 990 | ||
| 991 | truncate_pagecache(inode, oldsize, newsize); | ||
| 992 | out_brelse: | ||
| 993 | brelse(dibh); | ||
| 1067 | out: | 994 | out: |
| 1068 | gfs2_trans_end(sdp); | 995 | gfs2_trans_end(sdp); |
| 1069 | return error; | 996 | return error; |
| @@ -1123,7 +1050,7 @@ static int trunc_end(struct gfs2_inode *ip) | |||
| 1123 | if (error) | 1050 | if (error) |
| 1124 | goto out; | 1051 | goto out; |
| 1125 | 1052 | ||
| 1126 | if (!ip->i_disksize) { | 1053 | if (!i_size_read(&ip->i_inode)) { |
| 1127 | ip->i_height = 0; | 1054 | ip->i_height = 0; |
| 1128 | ip->i_goal = ip->i_no_addr; | 1055 | ip->i_goal = ip->i_no_addr; |
| 1129 | gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); | 1056 | gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); |
| @@ -1143,92 +1070,154 @@ out: | |||
| 1143 | 1070 | ||
| 1144 | /** | 1071 | /** |
| 1145 | * do_shrink - make a file smaller | 1072 | * do_shrink - make a file smaller |
| 1146 | * @ip: the inode | 1073 | * @inode: the inode |
| 1147 | * @size: the size to make the file | 1074 | * @oldsize: the current inode size |
| 1148 | * @truncator: function to truncate the last partial block | 1075 | * @newsize: the size to make the file |
| 1149 | * | 1076 | * |
| 1150 | * Called with an exclusive lock on @ip. | 1077 | * Called with an exclusive lock on @inode. The @size must |
| 1078 | * be equal to or smaller than the current inode size. | ||
| 1151 | * | 1079 | * |
| 1152 | * Returns: errno | 1080 | * Returns: errno |
| 1153 | */ | 1081 | */ |
| 1154 | 1082 | ||
| 1155 | static int do_shrink(struct gfs2_inode *ip, u64 size) | 1083 | static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize) |
| 1156 | { | 1084 | { |
| 1085 | struct gfs2_inode *ip = GFS2_I(inode); | ||
| 1157 | int error; | 1086 | int error; |
| 1158 | 1087 | ||
| 1159 | error = trunc_start(ip, size); | 1088 | error = trunc_start(inode, oldsize, newsize); |
| 1160 | if (error < 0) | 1089 | if (error < 0) |
| 1161 | return error; | 1090 | return error; |
| 1162 | if (error > 0) | 1091 | if (gfs2_is_stuffed(ip)) |
| 1163 | return 0; | 1092 | return 0; |
| 1164 | 1093 | ||
| 1165 | error = trunc_dealloc(ip, size); | 1094 | error = trunc_dealloc(ip, newsize); |
| 1166 | if (!error) | 1095 | if (error == 0) |
| 1167 | error = trunc_end(ip); | 1096 | error = trunc_end(ip); |
| 1168 | 1097 | ||
| 1169 | return error; | 1098 | return error; |
| 1170 | } | 1099 | } |
| 1171 | 1100 | ||
| 1172 | static int do_touch(struct gfs2_inode *ip, u64 size) | 1101 | void gfs2_trim_blocks(struct inode *inode) |
| 1173 | { | 1102 | { |
| 1174 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1103 | u64 size = inode->i_size; |
| 1104 | int ret; | ||
| 1105 | |||
| 1106 | ret = do_shrink(inode, size, size); | ||
| 1107 | WARN_ON(ret != 0); | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | /** | ||
| 1111 | * do_grow - Touch and update inode size | ||
| 1112 | * @inode: The inode | ||
| 1113 | * @size: The new size | ||
| 1114 | * | ||
| 1115 | * This function updates the timestamps on the inode and | ||
| 1116 | * may also increase the size of the inode. This function | ||
| 1117 | * must not be called with @size any smaller than the current | ||
| 1118 | * inode size. | ||
| 1119 | * | ||
| 1120 | * Although it is not strictly required to unstuff files here, | ||
| 1121 | * earlier versions of GFS2 have a bug in the stuffed file reading | ||
| 1122 | * code which will result in a buffer overrun if the size is larger | ||
| 1123 | * than the max stuffed file size. In order to prevent this from | ||
| 1124 | * occuring, such files are unstuffed, but in other cases we can | ||
| 1125 | * just update the inode size directly. | ||
| 1126 | * | ||
| 1127 | * Returns: 0 on success, or -ve on error | ||
| 1128 | */ | ||
| 1129 | |||
| 1130 | static int do_grow(struct inode *inode, u64 size) | ||
| 1131 | { | ||
| 1132 | struct gfs2_inode *ip = GFS2_I(inode); | ||
| 1133 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
| 1175 | struct buffer_head *dibh; | 1134 | struct buffer_head *dibh; |
| 1135 | struct gfs2_alloc *al = NULL; | ||
| 1176 | int error; | 1136 | int error; |
| 1177 | 1137 | ||
| 1178 | error = gfs2_trans_begin(sdp, RES_DINODE, 0); | 1138 | if (gfs2_is_stuffed(ip) && |
| 1139 | (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) { | ||
| 1140 | al = gfs2_alloc_get(ip); | ||
| 1141 | if (al == NULL) | ||
| 1142 | return -ENOMEM; | ||
| 1143 | |||
| 1144 | error = gfs2_quota_lock_check(ip); | ||
| 1145 | if (error) | ||
| 1146 | goto do_grow_alloc_put; | ||
| 1147 | |||
| 1148 | al->al_requested = 1; | ||
| 1149 | error = gfs2_inplace_reserve(ip); | ||
| 1150 | if (error) | ||
| 1151 | goto do_grow_qunlock; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0); | ||
| 1179 | if (error) | 1155 | if (error) |
| 1180 | return error; | 1156 | goto do_grow_release; |
| 1181 | 1157 | ||
| 1182 | down_write(&ip->i_rw_mutex); | 1158 | if (al) { |
| 1159 | error = gfs2_unstuff_dinode(ip, NULL); | ||
| 1160 | if (error) | ||
| 1161 | goto do_end_trans; | ||
| 1162 | } | ||
| 1183 | 1163 | ||
| 1184 | error = gfs2_meta_inode_buffer(ip, &dibh); | 1164 | error = gfs2_meta_inode_buffer(ip, &dibh); |
| 1185 | if (error) | 1165 | if (error) |
| 1186 | goto do_touch_out; | 1166 | goto do_end_trans; |
| 1187 | 1167 | ||
| 1168 | i_size_write(inode, size); | ||
| 1188 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | 1169 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; |
| 1189 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1170 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
| 1190 | gfs2_dinode_out(ip, dibh->b_data); | 1171 | gfs2_dinode_out(ip, dibh->b_data); |
| 1191 | brelse(dibh); | 1172 | brelse(dibh); |
| 1192 | 1173 | ||
| 1193 | do_touch_out: | 1174 | do_end_trans: |
| 1194 | up_write(&ip->i_rw_mutex); | ||
| 1195 | gfs2_trans_end(sdp); | 1175 | gfs2_trans_end(sdp); |
| 1176 | do_grow_release: | ||
| 1177 | if (al) { | ||
| 1178 | gfs2_inplace_release(ip); | ||
| 1179 | do_grow_qunlock: | ||
| 1180 | gfs2_quota_unlock(ip); | ||
| 1181 | do_grow_alloc_put: | ||
| 1182 | gfs2_alloc_put(ip); | ||
| 1183 | } | ||
| 1196 | return error; | 1184 | return error; |
| 1197 | } | 1185 | } |
| 1198 | 1186 | ||
| 1199 | /** | 1187 | /** |
| 1200 | * gfs2_truncatei - make a file a given size | 1188 | * gfs2_setattr_size - make a file a given size |
| 1201 | * @ip: the inode | 1189 | * @inode: the inode |
| 1202 | * @size: the size to make the file | 1190 | * @newsize: the size to make the file |
| 1203 | * @truncator: function to truncate the last partial block | ||
| 1204 | * | 1191 | * |
| 1205 | * The file size can grow, shrink, or stay the same size. | 1192 | * The file size can grow, shrink, or stay the same size. This |
| 1193 | * is called holding i_mutex and an exclusive glock on the inode | ||
| 1194 | * in question. | ||
| 1206 | * | 1195 | * |
| 1207 | * Returns: errno | 1196 | * Returns: errno |
| 1208 | */ | 1197 | */ |
| 1209 | 1198 | ||
| 1210 | int gfs2_truncatei(struct gfs2_inode *ip, u64 size) | 1199 | int gfs2_setattr_size(struct inode *inode, u64 newsize) |
| 1211 | { | 1200 | { |
| 1212 | int error; | 1201 | int ret; |
| 1202 | u64 oldsize; | ||
| 1213 | 1203 | ||
| 1214 | if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode))) | 1204 | BUG_ON(!S_ISREG(inode->i_mode)); |
| 1215 | return -EINVAL; | ||
| 1216 | 1205 | ||
| 1217 | if (size > ip->i_disksize) | 1206 | ret = inode_newsize_ok(inode, newsize); |
| 1218 | error = do_grow(ip, size); | 1207 | if (ret) |
| 1219 | else if (size < ip->i_disksize) | 1208 | return ret; |
| 1220 | error = do_shrink(ip, size); | ||
| 1221 | else | ||
| 1222 | /* update time stamps */ | ||
| 1223 | error = do_touch(ip, size); | ||
| 1224 | 1209 | ||
| 1225 | return error; | 1210 | oldsize = inode->i_size; |
| 1211 | if (newsize >= oldsize) | ||
| 1212 | return do_grow(inode, newsize); | ||
| 1213 | |||
| 1214 | return do_shrink(inode, oldsize, newsize); | ||
| 1226 | } | 1215 | } |
| 1227 | 1216 | ||
| 1228 | int gfs2_truncatei_resume(struct gfs2_inode *ip) | 1217 | int gfs2_truncatei_resume(struct gfs2_inode *ip) |
| 1229 | { | 1218 | { |
| 1230 | int error; | 1219 | int error; |
| 1231 | error = trunc_dealloc(ip, ip->i_disksize); | 1220 | error = trunc_dealloc(ip, i_size_read(&ip->i_inode)); |
| 1232 | if (!error) | 1221 | if (!error) |
| 1233 | error = trunc_end(ip); | 1222 | error = trunc_end(ip); |
| 1234 | return error; | 1223 | return error; |
| @@ -1269,7 +1258,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | |||
| 1269 | 1258 | ||
| 1270 | shift = sdp->sd_sb.sb_bsize_shift; | 1259 | shift = sdp->sd_sb.sb_bsize_shift; |
| 1271 | BUG_ON(gfs2_is_dir(ip)); | 1260 | BUG_ON(gfs2_is_dir(ip)); |
| 1272 | end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; | 1261 | end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; |
| 1273 | lblock = offset >> shift; | 1262 | lblock = offset >> shift; |
| 1274 | lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; | 1263 | lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; |
| 1275 | if (lblock_stop > end_of_file) | 1264 | if (lblock_stop > end_of_file) |
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h index a20a5213135a..42fea03e2bd9 100644 --- a/fs/gfs2/bmap.h +++ b/fs/gfs2/bmap.h | |||
| @@ -44,14 +44,16 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, | |||
| 44 | } | 44 | } |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); | 47 | extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); |
| 48 | int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create); | 48 | extern int gfs2_block_map(struct inode *inode, sector_t lblock, |
| 49 | int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen); | 49 | struct buffer_head *bh, int create); |
| 50 | 50 | extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, | |
| 51 | int gfs2_truncatei(struct gfs2_inode *ip, u64 size); | 51 | u64 *dblock, unsigned *extlen); |
| 52 | int gfs2_truncatei_resume(struct gfs2_inode *ip); | 52 | extern int gfs2_setattr_size(struct inode *inode, u64 size); |
| 53 | int gfs2_file_dealloc(struct gfs2_inode *ip); | 53 | extern void gfs2_trim_blocks(struct inode *inode); |
| 54 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | 54 | extern int gfs2_truncatei_resume(struct gfs2_inode *ip); |
| 55 | unsigned int len); | 55 | extern int gfs2_file_dealloc(struct gfs2_inode *ip); |
| 56 | extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | ||
| 57 | unsigned int len); | ||
| 56 | 58 | ||
| 57 | #endif /* __BMAP_DOT_H__ */ | 59 | #endif /* __BMAP_DOT_H__ */ |
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index bb7907bde3d8..6798755b3858 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c | |||
| @@ -49,7 +49,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) | |||
| 49 | ip = GFS2_I(inode); | 49 | ip = GFS2_I(inode); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | if (sdp->sd_args.ar_localcaching) | 52 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) |
| 53 | goto valid; | 53 | goto valid; |
| 54 | 54 | ||
| 55 | had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); | 55 | had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index b9dd88a78dd4..5c356d09c321 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
| @@ -79,6 +79,9 @@ | |||
| 79 | #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) | 79 | #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) |
| 80 | #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1)) | 80 | #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1)) |
| 81 | 81 | ||
| 82 | struct qstr gfs2_qdot __read_mostly; | ||
| 83 | struct qstr gfs2_qdotdot __read_mostly; | ||
| 84 | |||
| 82 | typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len, | 85 | typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len, |
| 83 | u64 leaf_no, void *data); | 86 | u64 leaf_no, void *data); |
| 84 | typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent, | 87 | typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent, |
| @@ -127,8 +130,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, | |||
| 127 | 130 | ||
| 128 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 131 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
| 129 | memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); | 132 | memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); |
| 130 | if (ip->i_disksize < offset + size) | 133 | if (ip->i_inode.i_size < offset + size) |
| 131 | ip->i_disksize = offset + size; | 134 | i_size_write(&ip->i_inode, offset + size); |
| 132 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | 135 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; |
| 133 | gfs2_dinode_out(ip, dibh->b_data); | 136 | gfs2_dinode_out(ip, dibh->b_data); |
| 134 | 137 | ||
| @@ -225,8 +228,8 @@ out: | |||
| 225 | if (error) | 228 | if (error) |
| 226 | return error; | 229 | return error; |
| 227 | 230 | ||
| 228 | if (ip->i_disksize < offset + copied) | 231 | if (ip->i_inode.i_size < offset + copied) |
| 229 | ip->i_disksize = offset + copied; | 232 | i_size_write(&ip->i_inode, offset + copied); |
| 230 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | 233 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; |
| 231 | 234 | ||
| 232 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 235 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
| @@ -275,12 +278,13 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset, | |||
| 275 | unsigned int o; | 278 | unsigned int o; |
| 276 | int copied = 0; | 279 | int copied = 0; |
| 277 | int error = 0; | 280 | int error = 0; |
| 281 | u64 disksize = i_size_read(&ip->i_inode); | ||
| 278 | 282 | ||
| 279 | if (offset >= ip->i_disksize) | 283 | if (offset >= disksize) |
| 280 | return 0; | 284 | return 0; |
| 281 | 285 | ||
| 282 | if (offset + size > ip->i_disksize) | 286 | if (offset + size > disksize) |
| 283 | size = ip->i_disksize - offset; | 287 | size = disksize - offset; |
| 284 | 288 | ||
| 285 | if (!size) | 289 | if (!size) |
| 286 | return 0; | 290 | return 0; |
| @@ -727,7 +731,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, | |||
| 727 | unsigned hsize = 1 << ip->i_depth; | 731 | unsigned hsize = 1 << ip->i_depth; |
| 728 | unsigned index; | 732 | unsigned index; |
| 729 | u64 ln; | 733 | u64 ln; |
| 730 | if (hsize * sizeof(u64) != ip->i_disksize) { | 734 | if (hsize * sizeof(u64) != i_size_read(inode)) { |
| 731 | gfs2_consist_inode(ip); | 735 | gfs2_consist_inode(ip); |
| 732 | return ERR_PTR(-EIO); | 736 | return ERR_PTR(-EIO); |
| 733 | } | 737 | } |
| @@ -879,7 +883,7 @@ static int dir_make_exhash(struct inode *inode) | |||
| 879 | for (x = sdp->sd_hash_ptrs; x--; lp++) | 883 | for (x = sdp->sd_hash_ptrs; x--; lp++) |
| 880 | *lp = cpu_to_be64(bn); | 884 | *lp = cpu_to_be64(bn); |
| 881 | 885 | ||
| 882 | dip->i_disksize = sdp->sd_sb.sb_bsize / 2; | 886 | i_size_write(inode, sdp->sd_sb.sb_bsize / 2); |
| 883 | gfs2_add_inode_blocks(&dip->i_inode, 1); | 887 | gfs2_add_inode_blocks(&dip->i_inode, 1); |
| 884 | dip->i_diskflags |= GFS2_DIF_EXHASH; | 888 | dip->i_diskflags |= GFS2_DIF_EXHASH; |
| 885 | 889 | ||
| @@ -1057,11 +1061,12 @@ static int dir_double_exhash(struct gfs2_inode *dip) | |||
| 1057 | u64 *buf; | 1061 | u64 *buf; |
| 1058 | u64 *from, *to; | 1062 | u64 *from, *to; |
| 1059 | u64 block; | 1063 | u64 block; |
| 1064 | u64 disksize = i_size_read(&dip->i_inode); | ||
| 1060 | int x; | 1065 | int x; |
| 1061 | int error = 0; | 1066 | int error = 0; |
| 1062 | 1067 | ||
| 1063 | hsize = 1 << dip->i_depth; | 1068 | hsize = 1 << dip->i_depth; |
| 1064 | if (hsize * sizeof(u64) != dip->i_disksize) { | 1069 | if (hsize * sizeof(u64) != disksize) { |
| 1065 | gfs2_consist_inode(dip); | 1070 | gfs2_consist_inode(dip); |
| 1066 | return -EIO; | 1071 | return -EIO; |
| 1067 | } | 1072 | } |
| @@ -1072,7 +1077,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) | |||
| 1072 | if (!buf) | 1077 | if (!buf) |
| 1073 | return -ENOMEM; | 1078 | return -ENOMEM; |
| 1074 | 1079 | ||
| 1075 | for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { | 1080 | for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) { |
| 1076 | error = gfs2_dir_read_data(dip, (char *)buf, | 1081 | error = gfs2_dir_read_data(dip, (char *)buf, |
| 1077 | block * sdp->sd_hash_bsize, | 1082 | block * sdp->sd_hash_bsize, |
| 1078 | sdp->sd_hash_bsize, 1); | 1083 | sdp->sd_hash_bsize, 1); |
| @@ -1370,7 +1375,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, | |||
| 1370 | unsigned depth = 0; | 1375 | unsigned depth = 0; |
| 1371 | 1376 | ||
| 1372 | hsize = 1 << dip->i_depth; | 1377 | hsize = 1 << dip->i_depth; |
| 1373 | if (hsize * sizeof(u64) != dip->i_disksize) { | 1378 | if (hsize * sizeof(u64) != i_size_read(inode)) { |
| 1374 | gfs2_consist_inode(dip); | 1379 | gfs2_consist_inode(dip); |
| 1375 | return -EIO; | 1380 | return -EIO; |
| 1376 | } | 1381 | } |
| @@ -1784,7 +1789,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data) | |||
| 1784 | int error = 0; | 1789 | int error = 0; |
| 1785 | 1790 | ||
| 1786 | hsize = 1 << dip->i_depth; | 1791 | hsize = 1 << dip->i_depth; |
| 1787 | if (hsize * sizeof(u64) != dip->i_disksize) { | 1792 | if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) { |
| 1788 | gfs2_consist_inode(dip); | 1793 | gfs2_consist_inode(dip); |
| 1789 | return -EIO; | 1794 | return -EIO; |
| 1790 | } | 1795 | } |
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h index 4f919440c3be..a98f644bd3df 100644 --- a/fs/gfs2/dir.h +++ b/fs/gfs2/dir.h | |||
| @@ -17,23 +17,24 @@ struct inode; | |||
| 17 | struct gfs2_inode; | 17 | struct gfs2_inode; |
| 18 | struct gfs2_inum; | 18 | struct gfs2_inum; |
| 19 | 19 | ||
| 20 | struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *filename); | 20 | extern struct inode *gfs2_dir_search(struct inode *dir, |
| 21 | int gfs2_dir_check(struct inode *dir, const struct qstr *filename, | 21 | const struct qstr *filename); |
| 22 | const struct gfs2_inode *ip); | 22 | extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename, |
| 23 | int gfs2_dir_add(struct inode *inode, const struct qstr *filename, | 23 | const struct gfs2_inode *ip); |
| 24 | const struct gfs2_inode *ip, unsigned int type); | 24 | extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename, |
| 25 | int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); | 25 | const struct gfs2_inode *ip, unsigned int type); |
| 26 | int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, | 26 | extern int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); |
| 27 | filldir_t filldir); | 27 | extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, |
| 28 | int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, | 28 | filldir_t filldir); |
| 29 | const struct gfs2_inode *nip, unsigned int new_type); | 29 | extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, |
| 30 | const struct gfs2_inode *nip, unsigned int new_type); | ||
| 30 | 31 | ||
| 31 | int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip); | 32 | extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip); |
| 32 | 33 | ||
| 33 | int gfs2_diradd_alloc_required(struct inode *dir, | 34 | extern int gfs2_diradd_alloc_required(struct inode *dir, |
| 34 | const struct qstr *filename); | 35 | const struct qstr *filename); |
| 35 | int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, | 36 | extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, |
| 36 | struct buffer_head **bhp); | 37 | struct buffer_head **bhp); |
| 37 | 38 | ||
| 38 | static inline u32 gfs2_disk_hash(const char *data, int len) | 39 | static inline u32 gfs2_disk_hash(const char *data, int len) |
| 39 | { | 40 | { |
| @@ -61,4 +62,7 @@ static inline void gfs2_qstr2dirent(const struct qstr *name, u16 reclen, struct | |||
| 61 | memcpy(dent + 1, name->name, name->len); | 62 | memcpy(dent + 1, name->name, name->len); |
| 62 | } | 63 | } |
| 63 | 64 | ||
| 65 | extern struct qstr gfs2_qdot; | ||
| 66 | extern struct qstr gfs2_qdotdot; | ||
| 67 | |||
| 64 | #endif /* __DIR_DOT_H__ */ | 68 | #endif /* __DIR_DOT_H__ */ |
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index dfe237a3f8ad..06d582732d34 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c | |||
| @@ -126,16 +126,9 @@ static int gfs2_get_name(struct dentry *parent, char *name, | |||
| 126 | 126 | ||
| 127 | static struct dentry *gfs2_get_parent(struct dentry *child) | 127 | static struct dentry *gfs2_get_parent(struct dentry *child) |
| 128 | { | 128 | { |
| 129 | struct qstr dotdot; | ||
| 130 | struct dentry *dentry; | 129 | struct dentry *dentry; |
| 131 | 130 | ||
| 132 | /* | 131 | dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); |
| 133 | * XXX(hch): it would be a good idea to keep this around as a | ||
| 134 | * static variable. | ||
| 135 | */ | ||
| 136 | gfs2_str2qstr(&dotdot, ".."); | ||
| 137 | |||
| 138 | dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &dotdot, 1)); | ||
| 139 | if (!IS_ERR(dentry)) | 132 | if (!IS_ERR(dentry)) |
| 140 | dentry->d_op = &gfs2_dops; | 133 | dentry->d_op = &gfs2_dops; |
| 141 | return dentry; | 134 | return dentry; |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 4edd662c8232..237ee6a940df 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
| @@ -382,8 +382,10 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 382 | rblocks = RES_DINODE + ind_blocks; | 382 | rblocks = RES_DINODE + ind_blocks; |
| 383 | if (gfs2_is_jdata(ip)) | 383 | if (gfs2_is_jdata(ip)) |
| 384 | rblocks += data_blocks ? data_blocks : 1; | 384 | rblocks += data_blocks ? data_blocks : 1; |
| 385 | if (ind_blocks || data_blocks) | 385 | if (ind_blocks || data_blocks) { |
| 386 | rblocks += RES_STATFS + RES_QUOTA; | 386 | rblocks += RES_STATFS + RES_QUOTA; |
| 387 | rblocks += gfs2_rg_blocks(al); | ||
| 388 | } | ||
| 387 | ret = gfs2_trans_begin(sdp, rblocks, 0); | 389 | ret = gfs2_trans_begin(sdp, rblocks, 0); |
| 388 | if (ret) | 390 | if (ret) |
| 389 | goto out_trans_fail; | 391 | goto out_trans_fail; |
| @@ -491,7 +493,7 @@ static int gfs2_open(struct inode *inode, struct file *file) | |||
| 491 | goto fail; | 493 | goto fail; |
| 492 | 494 | ||
| 493 | if (!(file->f_flags & O_LARGEFILE) && | 495 | if (!(file->f_flags & O_LARGEFILE) && |
| 494 | ip->i_disksize > MAX_NON_LFS) { | 496 | i_size_read(inode) > MAX_NON_LFS) { |
| 495 | error = -EOVERFLOW; | 497 | error = -EOVERFLOW; |
| 496 | goto fail_gunlock; | 498 | goto fail_gunlock; |
| 497 | } | 499 | } |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 9adf8f924e08..87778857f099 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -441,6 +441,8 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) | |||
| 441 | else | 441 | else |
| 442 | gfs2_glock_put_nolock(gl); | 442 | gfs2_glock_put_nolock(gl); |
| 443 | } | 443 | } |
| 444 | if (held1 && held2 && list_empty(&gl->gl_holders)) | ||
| 445 | clear_bit(GLF_QUEUED, &gl->gl_flags); | ||
| 444 | 446 | ||
| 445 | gl->gl_state = new_state; | 447 | gl->gl_state = new_state; |
| 446 | gl->gl_tchange = jiffies; | 448 | gl->gl_tchange = jiffies; |
| @@ -1012,6 +1014,7 @@ fail: | |||
| 1012 | if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) | 1014 | if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) |
| 1013 | insert_pt = &gh2->gh_list; | 1015 | insert_pt = &gh2->gh_list; |
| 1014 | } | 1016 | } |
| 1017 | set_bit(GLF_QUEUED, &gl->gl_flags); | ||
| 1015 | if (likely(insert_pt == NULL)) { | 1018 | if (likely(insert_pt == NULL)) { |
| 1016 | list_add_tail(&gh->gh_list, &gl->gl_holders); | 1019 | list_add_tail(&gh->gh_list, &gl->gl_holders); |
| 1017 | if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) | 1020 | if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) |
| @@ -1310,10 +1313,12 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
| 1310 | 1313 | ||
| 1311 | gfs2_glock_hold(gl); | 1314 | gfs2_glock_hold(gl); |
| 1312 | holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; | 1315 | holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; |
| 1313 | if (time_before(now, holdtime)) | 1316 | if (test_bit(GLF_QUEUED, &gl->gl_flags)) { |
| 1314 | delay = holdtime - now; | 1317 | if (time_before(now, holdtime)) |
| 1315 | if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) | 1318 | delay = holdtime - now; |
| 1316 | delay = gl->gl_ops->go_min_hold_time; | 1319 | if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) |
| 1320 | delay = gl->gl_ops->go_min_hold_time; | ||
| 1321 | } | ||
| 1317 | 1322 | ||
| 1318 | spin_lock(&gl->gl_spin); | 1323 | spin_lock(&gl->gl_spin); |
| 1319 | handle_callback(gl, state, delay); | 1324 | handle_callback(gl, state, delay); |
| @@ -1512,7 +1517,7 @@ static void clear_glock(struct gfs2_glock *gl) | |||
| 1512 | spin_unlock(&lru_lock); | 1517 | spin_unlock(&lru_lock); |
| 1513 | 1518 | ||
| 1514 | spin_lock(&gl->gl_spin); | 1519 | spin_lock(&gl->gl_spin); |
| 1515 | if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) | 1520 | if (gl->gl_state != LM_ST_UNLOCKED) |
| 1516 | handle_callback(gl, LM_ST_UNLOCKED, 0); | 1521 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
| 1517 | spin_unlock(&gl->gl_spin); | 1522 | spin_unlock(&gl->gl_spin); |
| 1518 | gfs2_glock_hold(gl); | 1523 | gfs2_glock_hold(gl); |
| @@ -1660,6 +1665,8 @@ static const char *gflags2str(char *buf, const unsigned long *gflags) | |||
| 1660 | *p++ = 'I'; | 1665 | *p++ = 'I'; |
| 1661 | if (test_bit(GLF_FROZEN, gflags)) | 1666 | if (test_bit(GLF_FROZEN, gflags)) |
| 1662 | *p++ = 'F'; | 1667 | *p++ = 'F'; |
| 1668 | if (test_bit(GLF_QUEUED, gflags)) | ||
| 1669 | *p++ = 'q'; | ||
| 1663 | *p = 0; | 1670 | *p = 0; |
| 1664 | return buf; | 1671 | return buf; |
| 1665 | } | 1672 | } |
| @@ -1776,10 +1783,12 @@ int __init gfs2_glock_init(void) | |||
| 1776 | } | 1783 | } |
| 1777 | #endif | 1784 | #endif |
| 1778 | 1785 | ||
| 1779 | glock_workqueue = create_workqueue("glock_workqueue"); | 1786 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_RESCUER | |
| 1787 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | ||
| 1780 | if (IS_ERR(glock_workqueue)) | 1788 | if (IS_ERR(glock_workqueue)) |
| 1781 | return PTR_ERR(glock_workqueue); | 1789 | return PTR_ERR(glock_workqueue); |
| 1782 | gfs2_delete_workqueue = create_workqueue("delete_workqueue"); | 1790 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_RESCUER | |
| 1791 | WQ_FREEZEABLE, 0); | ||
| 1783 | if (IS_ERR(gfs2_delete_workqueue)) { | 1792 | if (IS_ERR(gfs2_delete_workqueue)) { |
| 1784 | destroy_workqueue(glock_workqueue); | 1793 | destroy_workqueue(glock_workqueue); |
| 1785 | return PTR_ERR(gfs2_delete_workqueue); | 1794 | return PTR_ERR(gfs2_delete_workqueue); |
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 2bda1911b156..db1c26d6d220 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h | |||
| @@ -215,7 +215,7 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); | |||
| 215 | void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); | 215 | void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); |
| 216 | 216 | ||
| 217 | /** | 217 | /** |
| 218 | * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock | 218 | * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock |
| 219 | * @gl: the glock | 219 | * @gl: the glock |
| 220 | * @state: the state we're requesting | 220 | * @state: the state we're requesting |
| 221 | * @flags: the modifier flags | 221 | * @flags: the modifier flags |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 49f97d3bb690..0d149dcc04e5 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
| @@ -262,13 +262,12 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) | |||
| 262 | const struct gfs2_inode *ip = gl->gl_object; | 262 | const struct gfs2_inode *ip = gl->gl_object; |
| 263 | if (ip == NULL) | 263 | if (ip == NULL) |
| 264 | return 0; | 264 | return 0; |
| 265 | gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n", | 265 | gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", |
| 266 | (unsigned long long)ip->i_no_formal_ino, | 266 | (unsigned long long)ip->i_no_formal_ino, |
| 267 | (unsigned long long)ip->i_no_addr, | 267 | (unsigned long long)ip->i_no_addr, |
| 268 | IF2DT(ip->i_inode.i_mode), ip->i_flags, | 268 | IF2DT(ip->i_inode.i_mode), ip->i_flags, |
| 269 | (unsigned int)ip->i_diskflags, | 269 | (unsigned int)ip->i_diskflags, |
| 270 | (unsigned long long)ip->i_inode.i_size, | 270 | (unsigned long long)i_size_read(&ip->i_inode)); |
| 271 | (unsigned long long)ip->i_disksize); | ||
| 272 | return 0; | 271 | return 0; |
| 273 | } | 272 | } |
| 274 | 273 | ||
| @@ -453,7 +452,6 @@ const struct gfs2_glock_operations *gfs2_glops_list[] = { | |||
| 453 | [LM_TYPE_META] = &gfs2_meta_glops, | 452 | [LM_TYPE_META] = &gfs2_meta_glops, |
| 454 | [LM_TYPE_INODE] = &gfs2_inode_glops, | 453 | [LM_TYPE_INODE] = &gfs2_inode_glops, |
| 455 | [LM_TYPE_RGRP] = &gfs2_rgrp_glops, | 454 | [LM_TYPE_RGRP] = &gfs2_rgrp_glops, |
| 456 | [LM_TYPE_NONDISK] = &gfs2_trans_glops, | ||
| 457 | [LM_TYPE_IOPEN] = &gfs2_iopen_glops, | 455 | [LM_TYPE_IOPEN] = &gfs2_iopen_glops, |
| 458 | [LM_TYPE_FLOCK] = &gfs2_flock_glops, | 456 | [LM_TYPE_FLOCK] = &gfs2_flock_glops, |
| 459 | [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, | 457 | [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index fdbf4b366fa5..764fbb49efc8 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
| @@ -196,6 +196,7 @@ enum { | |||
| 196 | GLF_REPLY_PENDING = 9, | 196 | GLF_REPLY_PENDING = 9, |
| 197 | GLF_INITIAL = 10, | 197 | GLF_INITIAL = 10, |
| 198 | GLF_FROZEN = 11, | 198 | GLF_FROZEN = 11, |
| 199 | GLF_QUEUED = 12, | ||
| 199 | }; | 200 | }; |
| 200 | 201 | ||
| 201 | struct gfs2_glock { | 202 | struct gfs2_glock { |
| @@ -267,7 +268,6 @@ struct gfs2_inode { | |||
| 267 | u64 i_no_formal_ino; | 268 | u64 i_no_formal_ino; |
| 268 | u64 i_generation; | 269 | u64 i_generation; |
| 269 | u64 i_eattr; | 270 | u64 i_eattr; |
| 270 | loff_t i_disksize; | ||
| 271 | unsigned long i_flags; /* GIF_... */ | 271 | unsigned long i_flags; /* GIF_... */ |
| 272 | struct gfs2_glock *i_gl; /* Move into i_gh? */ | 272 | struct gfs2_glock *i_gl; /* Move into i_gh? */ |
| 273 | struct gfs2_holder i_iopen_gh; | 273 | struct gfs2_holder i_iopen_gh; |
| @@ -416,11 +416,8 @@ struct gfs2_args { | |||
| 416 | char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */ | 416 | char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */ |
| 417 | char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */ | 417 | char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */ |
| 418 | unsigned int ar_spectator:1; /* Don't get a journal */ | 418 | unsigned int ar_spectator:1; /* Don't get a journal */ |
| 419 | unsigned int ar_ignore_local_fs:1; /* Ignore optimisations */ | ||
| 420 | unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */ | 419 | unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */ |
| 421 | unsigned int ar_localcaching:1; /* Local caching */ | ||
| 422 | unsigned int ar_debug:1; /* Oops on errors */ | 420 | unsigned int ar_debug:1; /* Oops on errors */ |
| 423 | unsigned int ar_upgrade:1; /* Upgrade ondisk format */ | ||
| 424 | unsigned int ar_posix_acl:1; /* Enable posix acls */ | 421 | unsigned int ar_posix_acl:1; /* Enable posix acls */ |
| 425 | unsigned int ar_quota:2; /* off/account/on */ | 422 | unsigned int ar_quota:2; /* off/account/on */ |
| 426 | unsigned int ar_suiddir:1; /* suiddir support */ | 423 | unsigned int ar_suiddir:1; /* suiddir support */ |
| @@ -497,7 +494,7 @@ struct gfs2_sb_host { | |||
| 497 | */ | 494 | */ |
| 498 | 495 | ||
| 499 | struct lm_lockstruct { | 496 | struct lm_lockstruct { |
| 500 | unsigned int ls_jid; | 497 | int ls_jid; |
| 501 | unsigned int ls_first; | 498 | unsigned int ls_first; |
| 502 | unsigned int ls_first_done; | 499 | unsigned int ls_first_done; |
| 503 | unsigned int ls_nodir; | 500 | unsigned int ls_nodir; |
| @@ -572,6 +569,7 @@ struct gfs2_sbd { | |||
| 572 | struct list_head sd_rindex_mru_list; | 569 | struct list_head sd_rindex_mru_list; |
| 573 | struct gfs2_rgrpd *sd_rindex_forward; | 570 | struct gfs2_rgrpd *sd_rindex_forward; |
| 574 | unsigned int sd_rgrps; | 571 | unsigned int sd_rgrps; |
| 572 | unsigned int sd_max_rg_data; | ||
| 575 | 573 | ||
| 576 | /* Journal index stuff */ | 574 | /* Journal index stuff */ |
| 577 | 575 | ||
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 08140f185a37..06370f8bd8cf 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
| @@ -359,8 +359,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) | |||
| 359 | * to do that. | 359 | * to do that. |
| 360 | */ | 360 | */ |
| 361 | ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); | 361 | ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); |
| 362 | ip->i_disksize = be64_to_cpu(str->di_size); | 362 | i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); |
| 363 | i_size_write(&ip->i_inode, ip->i_disksize); | ||
| 364 | gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); | 363 | gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); |
| 365 | atime.tv_sec = be64_to_cpu(str->di_atime); | 364 | atime.tv_sec = be64_to_cpu(str->di_atime); |
| 366 | atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); | 365 | atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); |
| @@ -1055,7 +1054,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) | |||
| 1055 | str->di_uid = cpu_to_be32(ip->i_inode.i_uid); | 1054 | str->di_uid = cpu_to_be32(ip->i_inode.i_uid); |
| 1056 | str->di_gid = cpu_to_be32(ip->i_inode.i_gid); | 1055 | str->di_gid = cpu_to_be32(ip->i_inode.i_gid); |
| 1057 | str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); | 1056 | str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); |
| 1058 | str->di_size = cpu_to_be64(ip->i_disksize); | 1057 | str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); |
| 1059 | str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); | 1058 | str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); |
| 1060 | str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); | 1059 | str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); |
| 1061 | str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); | 1060 | str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); |
| @@ -1085,8 +1084,8 @@ void gfs2_dinode_print(const struct gfs2_inode *ip) | |||
| 1085 | (unsigned long long)ip->i_no_formal_ino); | 1084 | (unsigned long long)ip->i_no_formal_ino); |
| 1086 | printk(KERN_INFO " no_addr = %llu\n", | 1085 | printk(KERN_INFO " no_addr = %llu\n", |
| 1087 | (unsigned long long)ip->i_no_addr); | 1086 | (unsigned long long)ip->i_no_addr); |
| 1088 | printk(KERN_INFO " i_disksize = %llu\n", | 1087 | printk(KERN_INFO " i_size = %llu\n", |
| 1089 | (unsigned long long)ip->i_disksize); | 1088 | (unsigned long long)i_size_read(&ip->i_inode)); |
| 1090 | printk(KERN_INFO " blocks = %llu\n", | 1089 | printk(KERN_INFO " blocks = %llu\n", |
| 1091 | (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); | 1090 | (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); |
| 1092 | printk(KERN_INFO " i_goal = %llu\n", | 1091 | printk(KERN_INFO " i_goal = %llu\n", |
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 300ada3f21de..6720d7d5fbc6 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
| @@ -19,6 +19,8 @@ extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask); | |||
| 19 | extern int gfs2_internal_read(struct gfs2_inode *ip, | 19 | extern int gfs2_internal_read(struct gfs2_inode *ip, |
| 20 | struct file_ra_state *ra_state, | 20 | struct file_ra_state *ra_state, |
| 21 | char *buf, loff_t *pos, unsigned size); | 21 | char *buf, loff_t *pos, unsigned size); |
| 22 | extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, | ||
| 23 | unsigned int from, unsigned int to); | ||
| 22 | extern void gfs2_set_aops(struct inode *inode); | 24 | extern void gfs2_set_aops(struct inode *inode); |
| 23 | 25 | ||
| 24 | static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) | 26 | static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) |
| @@ -80,6 +82,19 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip, | |||
| 80 | dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr); | 82 | dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr); |
| 81 | } | 83 | } |
| 82 | 84 | ||
| 85 | static inline int gfs2_check_internal_file_size(struct inode *inode, | ||
| 86 | u64 minsize, u64 maxsize) | ||
| 87 | { | ||
| 88 | u64 size = i_size_read(inode); | ||
| 89 | if (size < minsize || size > maxsize) | ||
| 90 | goto err; | ||
| 91 | if (size & ((1 << inode->i_blkbits) - 1)) | ||
| 92 | goto err; | ||
| 93 | return 0; | ||
| 94 | err: | ||
| 95 | gfs2_consist_inode(GFS2_I(inode)); | ||
| 96 | return -EIO; | ||
| 97 | } | ||
| 83 | 98 | ||
| 84 | extern void gfs2_set_iop(struct inode *inode); | 99 | extern void gfs2_set_iop(struct inode *inode); |
| 85 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, | 100 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 0e0470ed34c2..1c09425b45fd 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
| @@ -42,9 +42,9 @@ static void gdlm_ast(void *arg) | |||
| 42 | ret |= LM_OUT_CANCELED; | 42 | ret |= LM_OUT_CANCELED; |
| 43 | goto out; | 43 | goto out; |
| 44 | case -EAGAIN: /* Try lock fails */ | 44 | case -EAGAIN: /* Try lock fails */ |
| 45 | case -EDEADLK: /* Deadlock detected */ | ||
| 45 | goto out; | 46 | goto out; |
| 46 | case -EINVAL: /* Invalid */ | 47 | case -ETIMEDOUT: /* Canceled due to timeout */ |
| 47 | case -ENOMEM: /* Out of memory */ | ||
| 48 | ret |= LM_OUT_ERROR; | 48 | ret |= LM_OUT_ERROR; |
| 49 | goto out; | 49 | goto out; |
| 50 | case 0: /* Success */ | 50 | case 0: /* Success */ |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index b1e9630eb46a..d7eb1e209aa8 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include "glock.h" | 24 | #include "glock.h" |
| 25 | #include "quota.h" | 25 | #include "quota.h" |
| 26 | #include "recovery.h" | 26 | #include "recovery.h" |
| 27 | #include "dir.h" | ||
| 27 | 28 | ||
| 28 | static struct shrinker qd_shrinker = { | 29 | static struct shrinker qd_shrinker = { |
| 29 | .shrink = gfs2_shrink_qd_memory, | 30 | .shrink = gfs2_shrink_qd_memory, |
| @@ -78,6 +79,9 @@ static int __init init_gfs2_fs(void) | |||
| 78 | { | 79 | { |
| 79 | int error; | 80 | int error; |
| 80 | 81 | ||
| 82 | gfs2_str2qstr(&gfs2_qdot, "."); | ||
| 83 | gfs2_str2qstr(&gfs2_qdotdot, ".."); | ||
| 84 | |||
| 81 | error = gfs2_sys_init(); | 85 | error = gfs2_sys_init(); |
| 82 | if (error) | 86 | if (error) |
| 83 | return error; | 87 | return error; |
| @@ -140,7 +144,7 @@ static int __init init_gfs2_fs(void) | |||
| 140 | 144 | ||
| 141 | error = -ENOMEM; | 145 | error = -ENOMEM; |
| 142 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", | 146 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", |
| 143 | WQ_NON_REENTRANT | WQ_RESCUER, 0); | 147 | WQ_RESCUER | WQ_FREEZEABLE, 0); |
| 144 | if (!gfs_recovery_wq) | 148 | if (!gfs_recovery_wq) |
| 145 | goto fail_wq; | 149 | goto fail_wq; |
| 146 | 150 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 4d4b1e8ac64c..aeafc233dc89 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
| @@ -38,14 +38,6 @@ | |||
| 38 | #define DO 0 | 38 | #define DO 0 |
| 39 | #define UNDO 1 | 39 | #define UNDO 1 |
| 40 | 40 | ||
| 41 | static const u32 gfs2_old_fs_formats[] = { | ||
| 42 | 0 | ||
| 43 | }; | ||
| 44 | |||
| 45 | static const u32 gfs2_old_multihost_formats[] = { | ||
| 46 | 0 | ||
| 47 | }; | ||
| 48 | |||
| 49 | /** | 41 | /** |
| 50 | * gfs2_tune_init - Fill a gfs2_tune structure with default values | 42 | * gfs2_tune_init - Fill a gfs2_tune structure with default values |
| 51 | * @gt: tune | 43 | * @gt: tune |
| @@ -135,8 +127,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
| 135 | 127 | ||
| 136 | static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent) | 128 | static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent) |
| 137 | { | 129 | { |
| 138 | unsigned int x; | ||
| 139 | |||
| 140 | if (sb->sb_magic != GFS2_MAGIC || | 130 | if (sb->sb_magic != GFS2_MAGIC || |
| 141 | sb->sb_type != GFS2_METATYPE_SB) { | 131 | sb->sb_type != GFS2_METATYPE_SB) { |
| 142 | if (!silent) | 132 | if (!silent) |
| @@ -150,55 +140,9 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int sile | |||
| 150 | sb->sb_multihost_format == GFS2_FORMAT_MULTI) | 140 | sb->sb_multihost_format == GFS2_FORMAT_MULTI) |
| 151 | return 0; | 141 | return 0; |
| 152 | 142 | ||
| 153 | if (sb->sb_fs_format != GFS2_FORMAT_FS) { | 143 | fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); |
| 154 | for (x = 0; gfs2_old_fs_formats[x]; x++) | ||
| 155 | if (gfs2_old_fs_formats[x] == sb->sb_fs_format) | ||
| 156 | break; | ||
| 157 | 144 | ||
| 158 | if (!gfs2_old_fs_formats[x]) { | 145 | return -EINVAL; |
| 159 | printk(KERN_WARNING | ||
| 160 | "GFS2: code version (%u, %u) is incompatible " | ||
| 161 | "with ondisk format (%u, %u)\n", | ||
| 162 | GFS2_FORMAT_FS, GFS2_FORMAT_MULTI, | ||
| 163 | sb->sb_fs_format, sb->sb_multihost_format); | ||
| 164 | printk(KERN_WARNING | ||
| 165 | "GFS2: I don't know how to upgrade this FS\n"); | ||
| 166 | return -EINVAL; | ||
| 167 | } | ||
| 168 | } | ||
| 169 | |||
| 170 | if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) { | ||
| 171 | for (x = 0; gfs2_old_multihost_formats[x]; x++) | ||
| 172 | if (gfs2_old_multihost_formats[x] == | ||
| 173 | sb->sb_multihost_format) | ||
| 174 | break; | ||
| 175 | |||
| 176 | if (!gfs2_old_multihost_formats[x]) { | ||
| 177 | printk(KERN_WARNING | ||
| 178 | "GFS2: code version (%u, %u) is incompatible " | ||
| 179 | "with ondisk format (%u, %u)\n", | ||
| 180 | GFS2_FORMAT_FS, GFS2_FORMAT_MULTI, | ||
| 181 | sb->sb_fs_format, sb->sb_multihost_format); | ||
| 182 | printk(KERN_WARNING | ||
| 183 | "GFS2: I don't know how to upgrade this FS\n"); | ||
| 184 | return -EINVAL; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | if (!sdp->sd_args.ar_upgrade) { | ||
| 189 | printk(KERN_WARNING | ||
| 190 | "GFS2: code version (%u, %u) is incompatible " | ||
| 191 | "with ondisk format (%u, %u)\n", | ||
| 192 | GFS2_FORMAT_FS, GFS2_FORMAT_MULTI, | ||
| 193 | sb->sb_fs_format, sb->sb_multihost_format); | ||
| 194 | printk(KERN_INFO | ||
| 195 | "GFS2: Use the \"upgrade\" mount option to upgrade " | ||
| 196 | "the FS\n"); | ||
| 197 | printk(KERN_INFO "GFS2: See the manual for more details\n"); | ||
| 198 | return -EINVAL; | ||
| 199 | } | ||
| 200 | |||
| 201 | return 0; | ||
| 202 | } | 146 | } |
| 203 | 147 | ||
| 204 | static void end_bio_io_page(struct bio *bio, int error) | 148 | static void end_bio_io_page(struct bio *bio, int error) |
| @@ -586,7 +530,7 @@ static int map_journal_extents(struct gfs2_sbd *sdp) | |||
| 586 | 530 | ||
| 587 | prev_db = 0; | 531 | prev_db = 0; |
| 588 | 532 | ||
| 589 | for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) { | 533 | for (lb = 0; lb < i_size_read(jd->jd_inode) >> sdp->sd_sb.sb_bsize_shift; lb++) { |
| 590 | bh.b_state = 0; | 534 | bh.b_state = 0; |
| 591 | bh.b_blocknr = 0; | 535 | bh.b_blocknr = 0; |
| 592 | bh.b_size = 1 << ip->i_inode.i_blkbits; | 536 | bh.b_size = 1 << ip->i_inode.i_blkbits; |
| @@ -1022,7 +966,6 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) | |||
| 1022 | if (!strcmp("lock_nolock", proto)) { | 966 | if (!strcmp("lock_nolock", proto)) { |
| 1023 | lm = &nolock_ops; | 967 | lm = &nolock_ops; |
| 1024 | sdp->sd_args.ar_localflocks = 1; | 968 | sdp->sd_args.ar_localflocks = 1; |
| 1025 | sdp->sd_args.ar_localcaching = 1; | ||
| 1026 | #ifdef CONFIG_GFS2_FS_LOCKING_DLM | 969 | #ifdef CONFIG_GFS2_FS_LOCKING_DLM |
| 1027 | } else if (!strcmp("lock_dlm", proto)) { | 970 | } else if (!strcmp("lock_dlm", proto)) { |
| 1028 | lm = &gfs2_dlm_ops; | 971 | lm = &gfs2_dlm_ops; |
| @@ -1113,8 +1056,6 @@ static int gfs2_journalid_wait(void *word) | |||
| 1113 | 1056 | ||
| 1114 | static int wait_on_journal(struct gfs2_sbd *sdp) | 1057 | static int wait_on_journal(struct gfs2_sbd *sdp) |
| 1115 | { | 1058 | { |
| 1116 | if (sdp->sd_args.ar_spectator) | ||
| 1117 | return 0; | ||
| 1118 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | 1059 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) |
| 1119 | return 0; | 1060 | return 0; |
| 1120 | 1061 | ||
| @@ -1217,6 +1158,20 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent | |||
| 1217 | if (error) | 1158 | if (error) |
| 1218 | goto fail_sb; | 1159 | goto fail_sb; |
| 1219 | 1160 | ||
| 1161 | /* | ||
| 1162 | * If user space has failed to join the cluster or some similar | ||
| 1163 | * failure has occurred, then the journal id will contain a | ||
| 1164 | * negative (error) number. This will then be returned to the | ||
| 1165 | * caller (of the mount syscall). We do this even for spectator | ||
| 1166 | * mounts (which just write a jid of 0 to indicate "ok" even though | ||
| 1167 | * the jid is unused in the spectator case) | ||
| 1168 | */ | ||
| 1169 | if (sdp->sd_lockstruct.ls_jid < 0) { | ||
| 1170 | error = sdp->sd_lockstruct.ls_jid; | ||
| 1171 | sdp->sd_lockstruct.ls_jid = 0; | ||
| 1172 | goto fail_sb; | ||
| 1173 | } | ||
| 1174 | |||
| 1220 | error = init_inodes(sdp, DO); | 1175 | error = init_inodes(sdp, DO); |
| 1221 | if (error) | 1176 | if (error) |
| 1222 | goto fail_sb; | 1177 | goto fail_sb; |
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 1009be2c9737..0534510200d5 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include <linux/gfs2_ondisk.h> | 18 | #include <linux/gfs2_ondisk.h> |
| 19 | #include <linux/crc32.h> | 19 | #include <linux/crc32.h> |
| 20 | #include <linux/fiemap.h> | 20 | #include <linux/fiemap.h> |
| 21 | #include <linux/swap.h> | ||
| 22 | #include <linux/falloc.h> | ||
| 21 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
| 22 | 24 | ||
| 23 | #include "gfs2.h" | 25 | #include "gfs2.h" |
| @@ -217,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, | |||
| 217 | goto out_gunlock_q; | 219 | goto out_gunlock_q; |
| 218 | 220 | ||
| 219 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + | 221 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + |
| 220 | al->al_rgd->rd_length + | 222 | gfs2_rg_blocks(al) + |
| 221 | 2 * RES_DINODE + RES_STATFS + | 223 | 2 * RES_DINODE + RES_STATFS + |
| 222 | RES_QUOTA, 0); | 224 | RES_QUOTA, 0); |
| 223 | if (error) | 225 | if (error) |
| @@ -406,7 +408,6 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, | |||
| 406 | 408 | ||
| 407 | ip = ghs[1].gh_gl->gl_object; | 409 | ip = ghs[1].gh_gl->gl_object; |
| 408 | 410 | ||
| 409 | ip->i_disksize = size; | ||
| 410 | i_size_write(inode, size); | 411 | i_size_write(inode, size); |
| 411 | 412 | ||
| 412 | error = gfs2_meta_inode_buffer(ip, &dibh); | 413 | error = gfs2_meta_inode_buffer(ip, &dibh); |
| @@ -461,7 +462,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 461 | ip = ghs[1].gh_gl->gl_object; | 462 | ip = ghs[1].gh_gl->gl_object; |
| 462 | 463 | ||
| 463 | ip->i_inode.i_nlink = 2; | 464 | ip->i_inode.i_nlink = 2; |
| 464 | ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); | 465 | i_size_write(inode, sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)); |
| 465 | ip->i_diskflags |= GFS2_DIF_JDATA; | 466 | ip->i_diskflags |= GFS2_DIF_JDATA; |
| 466 | ip->i_entries = 2; | 467 | ip->i_entries = 2; |
| 467 | 468 | ||
| @@ -470,18 +471,15 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 470 | if (!gfs2_assert_withdraw(sdp, !error)) { | 471 | if (!gfs2_assert_withdraw(sdp, !error)) { |
| 471 | struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; | 472 | struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; |
| 472 | struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1); | 473 | struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1); |
| 473 | struct qstr str; | ||
| 474 | 474 | ||
| 475 | gfs2_str2qstr(&str, "."); | ||
| 476 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 475 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
| 477 | gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent); | 476 | gfs2_qstr2dirent(&gfs2_qdot, GFS2_DIRENT_SIZE(gfs2_qdot.len), dent); |
| 478 | dent->de_inum = di->di_num; /* already GFS2 endian */ | 477 | dent->de_inum = di->di_num; /* already GFS2 endian */ |
| 479 | dent->de_type = cpu_to_be16(DT_DIR); | 478 | dent->de_type = cpu_to_be16(DT_DIR); |
| 480 | di->di_entries = cpu_to_be32(1); | 479 | di->di_entries = cpu_to_be32(1); |
| 481 | 480 | ||
| 482 | gfs2_str2qstr(&str, ".."); | ||
| 483 | dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1)); | 481 | dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1)); |
| 484 | gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent); | 482 | gfs2_qstr2dirent(&gfs2_qdotdot, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent); |
| 485 | 483 | ||
| 486 | gfs2_inum_out(dip, dent); | 484 | gfs2_inum_out(dip, dent); |
| 487 | dent->de_type = cpu_to_be16(DT_DIR); | 485 | dent->de_type = cpu_to_be16(DT_DIR); |
| @@ -522,7 +520,6 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 522 | static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, | 520 | static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, |
| 523 | struct gfs2_inode *ip) | 521 | struct gfs2_inode *ip) |
| 524 | { | 522 | { |
| 525 | struct qstr dotname; | ||
| 526 | int error; | 523 | int error; |
| 527 | 524 | ||
| 528 | if (ip->i_entries != 2) { | 525 | if (ip->i_entries != 2) { |
| @@ -539,13 +536,11 @@ static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, | |||
| 539 | if (error) | 536 | if (error) |
| 540 | return error; | 537 | return error; |
| 541 | 538 | ||
| 542 | gfs2_str2qstr(&dotname, "."); | 539 | error = gfs2_dir_del(ip, &gfs2_qdot); |
| 543 | error = gfs2_dir_del(ip, &dotname); | ||
| 544 | if (error) | 540 | if (error) |
| 545 | return error; | 541 | return error; |
| 546 | 542 | ||
| 547 | gfs2_str2qstr(&dotname, ".."); | 543 | error = gfs2_dir_del(ip, &gfs2_qdotdot); |
| 548 | error = gfs2_dir_del(ip, &dotname); | ||
| 549 | if (error) | 544 | if (error) |
| 550 | return error; | 545 | return error; |
| 551 | 546 | ||
| @@ -694,11 +689,8 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to) | |||
| 694 | struct inode *dir = &to->i_inode; | 689 | struct inode *dir = &to->i_inode; |
| 695 | struct super_block *sb = dir->i_sb; | 690 | struct super_block *sb = dir->i_sb; |
| 696 | struct inode *tmp; | 691 | struct inode *tmp; |
| 697 | struct qstr dotdot; | ||
| 698 | int error = 0; | 692 | int error = 0; |
| 699 | 693 | ||
| 700 | gfs2_str2qstr(&dotdot, ".."); | ||
| 701 | |||
| 702 | igrab(dir); | 694 | igrab(dir); |
| 703 | 695 | ||
| 704 | for (;;) { | 696 | for (;;) { |
| @@ -711,7 +703,7 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to) | |||
| 711 | break; | 703 | break; |
| 712 | } | 704 | } |
| 713 | 705 | ||
| 714 | tmp = gfs2_lookupi(dir, &dotdot, 1); | 706 | tmp = gfs2_lookupi(dir, &gfs2_qdotdot, 1); |
| 715 | if (IS_ERR(tmp)) { | 707 | if (IS_ERR(tmp)) { |
| 716 | error = PTR_ERR(tmp); | 708 | error = PTR_ERR(tmp); |
| 717 | break; | 709 | break; |
| @@ -744,7 +736,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
| 744 | struct gfs2_inode *ip = GFS2_I(odentry->d_inode); | 736 | struct gfs2_inode *ip = GFS2_I(odentry->d_inode); |
| 745 | struct gfs2_inode *nip = NULL; | 737 | struct gfs2_inode *nip = NULL; |
| 746 | struct gfs2_sbd *sdp = GFS2_SB(odir); | 738 | struct gfs2_sbd *sdp = GFS2_SB(odir); |
| 747 | struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; | 739 | struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }, ri_gh; |
| 748 | struct gfs2_rgrpd *nrgd; | 740 | struct gfs2_rgrpd *nrgd; |
| 749 | unsigned int num_gh; | 741 | unsigned int num_gh; |
| 750 | int dir_rename = 0; | 742 | int dir_rename = 0; |
| @@ -758,6 +750,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
| 758 | return 0; | 750 | return 0; |
| 759 | } | 751 | } |
| 760 | 752 | ||
| 753 | error = gfs2_rindex_hold(sdp, &ri_gh); | ||
| 754 | if (error) | ||
| 755 | return error; | ||
| 761 | 756 | ||
| 762 | if (odip != ndip) { | 757 | if (odip != ndip) { |
| 763 | error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, | 758 | error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, |
| @@ -887,12 +882,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
| 887 | 882 | ||
| 888 | al->al_requested = sdp->sd_max_dirres; | 883 | al->al_requested = sdp->sd_max_dirres; |
| 889 | 884 | ||
| 890 | error = gfs2_inplace_reserve(ndip); | 885 | error = gfs2_inplace_reserve_ri(ndip); |
| 891 | if (error) | 886 | if (error) |
| 892 | goto out_gunlock_q; | 887 | goto out_gunlock_q; |
| 893 | 888 | ||
| 894 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + | 889 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + |
| 895 | al->al_rgd->rd_length + | 890 | gfs2_rg_blocks(al) + |
| 896 | 4 * RES_DINODE + 4 * RES_LEAF + | 891 | 4 * RES_DINODE + 4 * RES_LEAF + |
| 897 | RES_STATFS + RES_QUOTA + 4, 0); | 892 | RES_STATFS + RES_QUOTA + 4, 0); |
| 898 | if (error) | 893 | if (error) |
| @@ -920,9 +915,6 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
| 920 | } | 915 | } |
| 921 | 916 | ||
| 922 | if (dir_rename) { | 917 | if (dir_rename) { |
| 923 | struct qstr name; | ||
| 924 | gfs2_str2qstr(&name, ".."); | ||
| 925 | |||
| 926 | error = gfs2_change_nlink(ndip, +1); | 918 | error = gfs2_change_nlink(ndip, +1); |
| 927 | if (error) | 919 | if (error) |
| 928 | goto out_end_trans; | 920 | goto out_end_trans; |
| @@ -930,7 +922,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
| 930 | if (error) | 922 | if (error) |
| 931 | goto out_end_trans; | 923 | goto out_end_trans; |
| 932 | 924 | ||
| 933 | error = gfs2_dir_mvino(ip, &name, ndip, DT_DIR); | 925 | error = gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR); |
| 934 | if (error) | 926 | if (error) |
| 935 | goto out_end_trans; | 927 | goto out_end_trans; |
| 936 | } else { | 928 | } else { |
| @@ -972,6 +964,7 @@ out_gunlock_r: | |||
| 972 | if (r_gh.gh_gl) | 964 | if (r_gh.gh_gl) |
| 973 | gfs2_glock_dq_uninit(&r_gh); | 965 | gfs2_glock_dq_uninit(&r_gh); |
| 974 | out: | 966 | out: |
| 967 | gfs2_glock_dq_uninit(&ri_gh); | ||
| 975 | return error; | 968 | return error; |
| 976 | } | 969 | } |
| 977 | 970 | ||
| @@ -990,7 +983,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
| 990 | struct gfs2_inode *ip = GFS2_I(dentry->d_inode); | 983 | struct gfs2_inode *ip = GFS2_I(dentry->d_inode); |
| 991 | struct gfs2_holder i_gh; | 984 | struct gfs2_holder i_gh; |
| 992 | struct buffer_head *dibh; | 985 | struct buffer_head *dibh; |
| 993 | unsigned int x; | 986 | unsigned int x, size; |
| 994 | char *buf; | 987 | char *buf; |
| 995 | int error; | 988 | int error; |
| 996 | 989 | ||
| @@ -1002,7 +995,8 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
| 1002 | return NULL; | 995 | return NULL; |
| 1003 | } | 996 | } |
| 1004 | 997 | ||
| 1005 | if (!ip->i_disksize) { | 998 | size = (unsigned int)i_size_read(&ip->i_inode); |
| 999 | if (size == 0) { | ||
| 1006 | gfs2_consist_inode(ip); | 1000 | gfs2_consist_inode(ip); |
| 1007 | buf = ERR_PTR(-EIO); | 1001 | buf = ERR_PTR(-EIO); |
| 1008 | goto out; | 1002 | goto out; |
| @@ -1014,7 +1008,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
| 1014 | goto out; | 1008 | goto out; |
| 1015 | } | 1009 | } |
| 1016 | 1010 | ||
| 1017 | x = ip->i_disksize + 1; | 1011 | x = size + 1; |
| 1018 | buf = kmalloc(x, GFP_NOFS); | 1012 | buf = kmalloc(x, GFP_NOFS); |
| 1019 | if (!buf) | 1013 | if (!buf) |
| 1020 | buf = ERR_PTR(-ENOMEM); | 1014 | buf = ERR_PTR(-ENOMEM); |
| @@ -1071,30 +1065,6 @@ int gfs2_permission(struct inode *inode, int mask) | |||
| 1071 | return error; | 1065 | return error; |
| 1072 | } | 1066 | } |
| 1073 | 1067 | ||
| 1074 | /* | ||
| 1075 | * XXX(truncate): the truncate_setsize calls should be moved to the end. | ||
| 1076 | */ | ||
| 1077 | static int setattr_size(struct inode *inode, struct iattr *attr) | ||
| 1078 | { | ||
| 1079 | struct gfs2_inode *ip = GFS2_I(inode); | ||
| 1080 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
| 1081 | int error; | ||
| 1082 | |||
| 1083 | if (attr->ia_size != ip->i_disksize) { | ||
| 1084 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); | ||
| 1085 | if (error) | ||
| 1086 | return error; | ||
| 1087 | truncate_setsize(inode, attr->ia_size); | ||
| 1088 | gfs2_trans_end(sdp); | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | error = gfs2_truncatei(ip, attr->ia_size); | ||
| 1092 | if (error && (inode->i_size != ip->i_disksize)) | ||
| 1093 | i_size_write(inode, ip->i_disksize); | ||
| 1094 | |||
| 1095 | return error; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | static int setattr_chown(struct inode *inode, struct iattr *attr) | 1068 | static int setattr_chown(struct inode *inode, struct iattr *attr) |
| 1099 | { | 1069 | { |
| 1100 | struct gfs2_inode *ip = GFS2_I(inode); | 1070 | struct gfs2_inode *ip = GFS2_I(inode); |
| @@ -1195,7 +1165,7 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 1195 | goto out; | 1165 | goto out; |
| 1196 | 1166 | ||
| 1197 | if (attr->ia_valid & ATTR_SIZE) | 1167 | if (attr->ia_valid & ATTR_SIZE) |
| 1198 | error = setattr_size(inode, attr); | 1168 | error = gfs2_setattr_size(inode, attr->ia_size); |
| 1199 | else if (attr->ia_valid & (ATTR_UID | ATTR_GID)) | 1169 | else if (attr->ia_valid & (ATTR_UID | ATTR_GID)) |
| 1200 | error = setattr_chown(inode, attr); | 1170 | error = setattr_chown(inode, attr); |
| 1201 | else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode)) | 1171 | else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode)) |
| @@ -1301,6 +1271,257 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name) | |||
| 1301 | return ret; | 1271 | return ret; |
| 1302 | } | 1272 | } |
| 1303 | 1273 | ||
| 1274 | static void empty_write_end(struct page *page, unsigned from, | ||
| 1275 | unsigned to) | ||
| 1276 | { | ||
| 1277 | struct gfs2_inode *ip = GFS2_I(page->mapping->host); | ||
| 1278 | |||
| 1279 | page_zero_new_buffers(page, from, to); | ||
| 1280 | flush_dcache_page(page); | ||
| 1281 | mark_page_accessed(page); | ||
| 1282 | |||
| 1283 | if (!gfs2_is_writeback(ip)) | ||
| 1284 | gfs2_page_add_databufs(ip, page, from, to); | ||
| 1285 | |||
| 1286 | block_commit_write(page, from, to); | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | |||
| 1290 | static int write_empty_blocks(struct page *page, unsigned from, unsigned to) | ||
| 1291 | { | ||
| 1292 | unsigned start, end, next; | ||
| 1293 | struct buffer_head *bh, *head; | ||
| 1294 | int error; | ||
| 1295 | |||
| 1296 | if (!page_has_buffers(page)) { | ||
| 1297 | error = block_prepare_write(page, from, to, gfs2_block_map); | ||
| 1298 | if (unlikely(error)) | ||
| 1299 | return error; | ||
| 1300 | |||
| 1301 | empty_write_end(page, from, to); | ||
| 1302 | return 0; | ||
| 1303 | } | ||
| 1304 | |||
| 1305 | bh = head = page_buffers(page); | ||
| 1306 | next = end = 0; | ||
| 1307 | while (next < from) { | ||
| 1308 | next += bh->b_size; | ||
| 1309 | bh = bh->b_this_page; | ||
| 1310 | } | ||
| 1311 | start = next; | ||
| 1312 | do { | ||
| 1313 | next += bh->b_size; | ||
| 1314 | if (buffer_mapped(bh)) { | ||
| 1315 | if (end) { | ||
| 1316 | error = block_prepare_write(page, start, end, | ||
| 1317 | gfs2_block_map); | ||
| 1318 | if (unlikely(error)) | ||
| 1319 | return error; | ||
| 1320 | empty_write_end(page, start, end); | ||
| 1321 | end = 0; | ||
| 1322 | } | ||
| 1323 | start = next; | ||
| 1324 | } | ||
| 1325 | else | ||
| 1326 | end = next; | ||
| 1327 | bh = bh->b_this_page; | ||
| 1328 | } while (next < to); | ||
| 1329 | |||
| 1330 | if (end) { | ||
| 1331 | error = block_prepare_write(page, start, end, gfs2_block_map); | ||
| 1332 | if (unlikely(error)) | ||
| 1333 | return error; | ||
| 1334 | empty_write_end(page, start, end); | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | return 0; | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, | ||
| 1341 | int mode) | ||
| 1342 | { | ||
| 1343 | struct gfs2_inode *ip = GFS2_I(inode); | ||
| 1344 | struct buffer_head *dibh; | ||
| 1345 | int error; | ||
| 1346 | u64 start = offset >> PAGE_CACHE_SHIFT; | ||
| 1347 | unsigned int start_offset = offset & ~PAGE_CACHE_MASK; | ||
| 1348 | u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT; | ||
| 1349 | pgoff_t curr; | ||
| 1350 | struct page *page; | ||
| 1351 | unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK; | ||
| 1352 | unsigned int from, to; | ||
| 1353 | |||
| 1354 | if (!end_offset) | ||
| 1355 | end_offset = PAGE_CACHE_SIZE; | ||
| 1356 | |||
| 1357 | error = gfs2_meta_inode_buffer(ip, &dibh); | ||
| 1358 | if (unlikely(error)) | ||
| 1359 | goto out; | ||
| 1360 | |||
| 1361 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
| 1362 | |||
| 1363 | if (gfs2_is_stuffed(ip)) { | ||
| 1364 | error = gfs2_unstuff_dinode(ip, NULL); | ||
| 1365 | if (unlikely(error)) | ||
| 1366 | goto out; | ||
| 1367 | } | ||
| 1368 | |||
| 1369 | curr = start; | ||
| 1370 | offset = start << PAGE_CACHE_SHIFT; | ||
| 1371 | from = start_offset; | ||
| 1372 | to = PAGE_CACHE_SIZE; | ||
| 1373 | while (curr <= end) { | ||
| 1374 | page = grab_cache_page_write_begin(inode->i_mapping, curr, | ||
| 1375 | AOP_FLAG_NOFS); | ||
| 1376 | if (unlikely(!page)) { | ||
| 1377 | error = -ENOMEM; | ||
| 1378 | goto out; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | if (curr == end) | ||
| 1382 | to = end_offset; | ||
| 1383 | error = write_empty_blocks(page, from, to); | ||
| 1384 | if (!error && offset + to > inode->i_size && | ||
| 1385 | !(mode & FALLOC_FL_KEEP_SIZE)) { | ||
| 1386 | i_size_write(inode, offset + to); | ||
| 1387 | } | ||
| 1388 | unlock_page(page); | ||
| 1389 | page_cache_release(page); | ||
| 1390 | if (error) | ||
| 1391 | goto out; | ||
| 1392 | curr++; | ||
| 1393 | offset += PAGE_CACHE_SIZE; | ||
| 1394 | from = 0; | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | gfs2_dinode_out(ip, dibh->b_data); | ||
| 1398 | mark_inode_dirty(inode); | ||
| 1399 | |||
| 1400 | brelse(dibh); | ||
| 1401 | |||
| 1402 | out: | ||
| 1403 | return error; | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, | ||
| 1407 | unsigned int *data_blocks, unsigned int *ind_blocks) | ||
| 1408 | { | ||
| 1409 | const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
| 1410 | unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone; | ||
| 1411 | unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); | ||
| 1412 | |||
| 1413 | for (tmp = max_data; tmp > sdp->sd_diptrs;) { | ||
| 1414 | tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); | ||
| 1415 | max_data -= tmp; | ||
| 1416 | } | ||
| 1417 | /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, | ||
| 1418 | so it might end up with fewer data blocks */ | ||
| 1419 | if (max_data <= *data_blocks) | ||
| 1420 | return; | ||
| 1421 | *data_blocks = max_data; | ||
| 1422 | *ind_blocks = max_blocks - max_data; | ||
| 1423 | *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; | ||
| 1424 | if (*len > max) { | ||
| 1425 | *len = max; | ||
| 1426 | gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); | ||
| 1427 | } | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset, | ||
| 1431 | loff_t len) | ||
| 1432 | { | ||
| 1433 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
| 1434 | struct gfs2_inode *ip = GFS2_I(inode); | ||
| 1435 | unsigned int data_blocks = 0, ind_blocks = 0, rblocks; | ||
| 1436 | loff_t bytes, max_bytes; | ||
| 1437 | struct gfs2_alloc *al; | ||
| 1438 | int error; | ||
| 1439 | loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; | ||
| 1440 | next = (next + 1) << sdp->sd_sb.sb_bsize_shift; | ||
| 1441 | |||
| 1442 | offset = (offset >> sdp->sd_sb.sb_bsize_shift) << | ||
| 1443 | sdp->sd_sb.sb_bsize_shift; | ||
| 1444 | |||
| 1445 | len = next - offset; | ||
| 1446 | bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; | ||
| 1447 | if (!bytes) | ||
| 1448 | bytes = UINT_MAX; | ||
| 1449 | |||
| 1450 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); | ||
| 1451 | error = gfs2_glock_nq(&ip->i_gh); | ||
| 1452 | if (unlikely(error)) | ||
| 1453 | goto out_uninit; | ||
| 1454 | |||
| 1455 | if (!gfs2_write_alloc_required(ip, offset, len)) | ||
| 1456 | goto out_unlock; | ||
| 1457 | |||
| 1458 | while (len > 0) { | ||
| 1459 | if (len < bytes) | ||
| 1460 | bytes = len; | ||
| 1461 | al = gfs2_alloc_get(ip); | ||
| 1462 | if (!al) { | ||
| 1463 | error = -ENOMEM; | ||
| 1464 | goto out_unlock; | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | error = gfs2_quota_lock_check(ip); | ||
| 1468 | if (error) | ||
| 1469 | goto out_alloc_put; | ||
| 1470 | |||
| 1471 | retry: | ||
| 1472 | gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); | ||
| 1473 | |||
| 1474 | al->al_requested = data_blocks + ind_blocks; | ||
| 1475 | error = gfs2_inplace_reserve(ip); | ||
| 1476 | if (error) { | ||
| 1477 | if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { | ||
| 1478 | bytes >>= 1; | ||
| 1479 | goto retry; | ||
| 1480 | } | ||
| 1481 | goto out_qunlock; | ||
| 1482 | } | ||
| 1483 | max_bytes = bytes; | ||
| 1484 | calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks); | ||
| 1485 | al->al_requested = data_blocks + ind_blocks; | ||
| 1486 | |||
| 1487 | rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + | ||
| 1488 | RES_RG_HDR + gfs2_rg_blocks(al); | ||
| 1489 | if (gfs2_is_jdata(ip)) | ||
| 1490 | rblocks += data_blocks ? data_blocks : 1; | ||
| 1491 | |||
| 1492 | error = gfs2_trans_begin(sdp, rblocks, | ||
| 1493 | PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); | ||
| 1494 | if (error) | ||
| 1495 | goto out_trans_fail; | ||
| 1496 | |||
| 1497 | error = fallocate_chunk(inode, offset, max_bytes, mode); | ||
| 1498 | gfs2_trans_end(sdp); | ||
| 1499 | |||
| 1500 | if (error) | ||
| 1501 | goto out_trans_fail; | ||
| 1502 | |||
| 1503 | len -= max_bytes; | ||
| 1504 | offset += max_bytes; | ||
| 1505 | gfs2_inplace_release(ip); | ||
| 1506 | gfs2_quota_unlock(ip); | ||
| 1507 | gfs2_alloc_put(ip); | ||
| 1508 | } | ||
| 1509 | goto out_unlock; | ||
| 1510 | |||
| 1511 | out_trans_fail: | ||
| 1512 | gfs2_inplace_release(ip); | ||
| 1513 | out_qunlock: | ||
| 1514 | gfs2_quota_unlock(ip); | ||
| 1515 | out_alloc_put: | ||
| 1516 | gfs2_alloc_put(ip); | ||
| 1517 | out_unlock: | ||
| 1518 | gfs2_glock_dq(&ip->i_gh); | ||
| 1519 | out_uninit: | ||
| 1520 | gfs2_holder_uninit(&ip->i_gh); | ||
| 1521 | return error; | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | |||
| 1304 | static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 1525 | static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
| 1305 | u64 start, u64 len) | 1526 | u64 start, u64 len) |
| 1306 | { | 1527 | { |
| @@ -1351,6 +1572,7 @@ const struct inode_operations gfs2_file_iops = { | |||
| 1351 | .getxattr = gfs2_getxattr, | 1572 | .getxattr = gfs2_getxattr, |
| 1352 | .listxattr = gfs2_listxattr, | 1573 | .listxattr = gfs2_listxattr, |
| 1353 | .removexattr = gfs2_removexattr, | 1574 | .removexattr = gfs2_removexattr, |
| 1575 | .fallocate = gfs2_fallocate, | ||
| 1354 | .fiemap = gfs2_fiemap, | 1576 | .fiemap = gfs2_fiemap, |
| 1355 | }; | 1577 | }; |
| 1356 | 1578 | ||
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 1bc6b5695e6d..58a9b9998b42 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
| @@ -735,10 +735,8 @@ get_a_page: | |||
| 735 | goto out; | 735 | goto out; |
| 736 | 736 | ||
| 737 | size = loc + sizeof(struct gfs2_quota); | 737 | size = loc + sizeof(struct gfs2_quota); |
| 738 | if (size > inode->i_size) { | 738 | if (size > inode->i_size) |
| 739 | ip->i_disksize = size; | ||
| 740 | i_size_write(inode, size); | 739 | i_size_write(inode, size); |
| 741 | } | ||
| 742 | inode->i_mtime = inode->i_atime = CURRENT_TIME; | 740 | inode->i_mtime = inode->i_atime = CURRENT_TIME; |
| 743 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 741 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
| 744 | gfs2_dinode_out(ip, dibh->b_data); | 742 | gfs2_dinode_out(ip, dibh->b_data); |
| @@ -817,7 +815,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
| 817 | goto out_alloc; | 815 | goto out_alloc; |
| 818 | 816 | ||
| 819 | if (nalloc) | 817 | if (nalloc) |
| 820 | blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS; | 818 | blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS; |
| 821 | 819 | ||
| 822 | error = gfs2_trans_begin(sdp, blocks, 0); | 820 | error = gfs2_trans_begin(sdp, blocks, 0); |
| 823 | if (error) | 821 | if (error) |
| @@ -1190,18 +1188,17 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void * | |||
| 1190 | int gfs2_quota_init(struct gfs2_sbd *sdp) | 1188 | int gfs2_quota_init(struct gfs2_sbd *sdp) |
| 1191 | { | 1189 | { |
| 1192 | struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); | 1190 | struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); |
| 1193 | unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; | 1191 | u64 size = i_size_read(sdp->sd_qc_inode); |
| 1192 | unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; | ||
| 1194 | unsigned int x, slot = 0; | 1193 | unsigned int x, slot = 0; |
| 1195 | unsigned int found = 0; | 1194 | unsigned int found = 0; |
| 1196 | u64 dblock; | 1195 | u64 dblock; |
| 1197 | u32 extlen = 0; | 1196 | u32 extlen = 0; |
| 1198 | int error; | 1197 | int error; |
| 1199 | 1198 | ||
| 1200 | if (!ip->i_disksize || ip->i_disksize > (64 << 20) || | 1199 | if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) |
| 1201 | ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) { | ||
| 1202 | gfs2_consist_inode(ip); | ||
| 1203 | return -EIO; | 1200 | return -EIO; |
| 1204 | } | 1201 | |
| 1205 | sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; | 1202 | sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; |
| 1206 | sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); | 1203 | sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); |
| 1207 | 1204 | ||
| @@ -1589,6 +1586,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, | |||
| 1589 | error = gfs2_inplace_reserve(ip); | 1586 | error = gfs2_inplace_reserve(ip); |
| 1590 | if (error) | 1587 | if (error) |
| 1591 | goto out_alloc; | 1588 | goto out_alloc; |
| 1589 | blocks += gfs2_rg_blocks(al); | ||
| 1592 | } | 1590 | } |
| 1593 | 1591 | ||
| 1594 | error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); | 1592 | error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index f7f89a94a5a4..f2a02edcac8f 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
| @@ -455,11 +455,13 @@ void gfs2_recover_func(struct work_struct *work) | |||
| 455 | int ro = 0; | 455 | int ro = 0; |
| 456 | unsigned int pass; | 456 | unsigned int pass; |
| 457 | int error; | 457 | int error; |
| 458 | int jlocked = 0; | ||
| 458 | 459 | ||
| 459 | if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) { | 460 | if (sdp->sd_args.ar_spectator || |
| 461 | (jd->jd_jid != sdp->sd_lockstruct.ls_jid)) { | ||
| 460 | fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n", | 462 | fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n", |
| 461 | jd->jd_jid); | 463 | jd->jd_jid); |
| 462 | 464 | jlocked = 1; | |
| 463 | /* Acquire the journal lock so we can do recovery */ | 465 | /* Acquire the journal lock so we can do recovery */ |
| 464 | 466 | ||
| 465 | error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops, | 467 | error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops, |
| @@ -554,13 +556,12 @@ void gfs2_recover_func(struct work_struct *work) | |||
| 554 | jd->jd_jid, t); | 556 | jd->jd_jid, t); |
| 555 | } | 557 | } |
| 556 | 558 | ||
| 557 | if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) | ||
| 558 | gfs2_glock_dq_uninit(&ji_gh); | ||
| 559 | |||
| 560 | gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); | 559 | gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); |
| 561 | 560 | ||
| 562 | if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) | 561 | if (jlocked) { |
| 562 | gfs2_glock_dq_uninit(&ji_gh); | ||
| 563 | gfs2_glock_dq_uninit(&j_gh); | 563 | gfs2_glock_dq_uninit(&j_gh); |
| 564 | } | ||
| 564 | 565 | ||
| 565 | fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); | 566 | fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); |
| 566 | goto done; | 567 | goto done; |
| @@ -568,7 +569,7 @@ void gfs2_recover_func(struct work_struct *work) | |||
| 568 | fail_gunlock_tr: | 569 | fail_gunlock_tr: |
| 569 | gfs2_glock_dq_uninit(&t_gh); | 570 | gfs2_glock_dq_uninit(&t_gh); |
| 570 | fail_gunlock_ji: | 571 | fail_gunlock_ji: |
| 571 | if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) { | 572 | if (jlocked) { |
| 572 | gfs2_glock_dq_uninit(&ji_gh); | 573 | gfs2_glock_dq_uninit(&ji_gh); |
| 573 | fail_gunlock_j: | 574 | fail_gunlock_j: |
| 574 | gfs2_glock_dq_uninit(&j_gh); | 575 | gfs2_glock_dq_uninit(&j_gh); |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 171a744f8e45..fb67f593f408 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
| @@ -500,7 +500,7 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp) | |||
| 500 | for (rgrps = 0;; rgrps++) { | 500 | for (rgrps = 0;; rgrps++) { |
| 501 | loff_t pos = rgrps * sizeof(struct gfs2_rindex); | 501 | loff_t pos = rgrps * sizeof(struct gfs2_rindex); |
| 502 | 502 | ||
| 503 | if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize) | 503 | if (pos + sizeof(struct gfs2_rindex) >= i_size_read(inode)) |
| 504 | break; | 504 | break; |
| 505 | error = gfs2_internal_read(ip, &ra_state, buf, &pos, | 505 | error = gfs2_internal_read(ip, &ra_state, buf, &pos, |
| 506 | sizeof(struct gfs2_rindex)); | 506 | sizeof(struct gfs2_rindex)); |
| @@ -588,7 +588,9 @@ static int gfs2_ri_update(struct gfs2_inode *ip) | |||
| 588 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 588 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
| 589 | struct inode *inode = &ip->i_inode; | 589 | struct inode *inode = &ip->i_inode; |
| 590 | struct file_ra_state ra_state; | 590 | struct file_ra_state ra_state; |
| 591 | u64 rgrp_count = ip->i_disksize; | 591 | u64 rgrp_count = i_size_read(inode); |
| 592 | struct gfs2_rgrpd *rgd; | ||
| 593 | unsigned int max_data = 0; | ||
| 592 | int error; | 594 | int error; |
| 593 | 595 | ||
| 594 | do_div(rgrp_count, sizeof(struct gfs2_rindex)); | 596 | do_div(rgrp_count, sizeof(struct gfs2_rindex)); |
| @@ -603,6 +605,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip) | |||
| 603 | } | 605 | } |
| 604 | } | 606 | } |
| 605 | 607 | ||
| 608 | list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list) | ||
| 609 | if (rgd->rd_data > max_data) | ||
| 610 | max_data = rgd->rd_data; | ||
| 611 | sdp->sd_max_rg_data = max_data; | ||
| 606 | sdp->sd_rindex_uptodate = 1; | 612 | sdp->sd_rindex_uptodate = 1; |
| 607 | return 0; | 613 | return 0; |
| 608 | } | 614 | } |
| @@ -622,13 +628,15 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip) | |||
| 622 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 628 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
| 623 | struct inode *inode = &ip->i_inode; | 629 | struct inode *inode = &ip->i_inode; |
| 624 | struct file_ra_state ra_state; | 630 | struct file_ra_state ra_state; |
| 631 | struct gfs2_rgrpd *rgd; | ||
| 632 | unsigned int max_data = 0; | ||
| 625 | int error; | 633 | int error; |
| 626 | 634 | ||
| 627 | file_ra_state_init(&ra_state, inode->i_mapping); | 635 | file_ra_state_init(&ra_state, inode->i_mapping); |
| 628 | for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { | 636 | for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { |
| 629 | /* Ignore partials */ | 637 | /* Ignore partials */ |
| 630 | if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > | 638 | if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > |
| 631 | ip->i_disksize) | 639 | i_size_read(inode)) |
| 632 | break; | 640 | break; |
| 633 | error = read_rindex_entry(ip, &ra_state); | 641 | error = read_rindex_entry(ip, &ra_state); |
| 634 | if (error) { | 642 | if (error) { |
| @@ -636,6 +644,10 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip) | |||
| 636 | return error; | 644 | return error; |
| 637 | } | 645 | } |
| 638 | } | 646 | } |
| 647 | list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list) | ||
| 648 | if (rgd->rd_data > max_data) | ||
| 649 | max_data = rgd->rd_data; | ||
| 650 | sdp->sd_max_rg_data = max_data; | ||
| 639 | 651 | ||
| 640 | sdp->sd_rindex_uptodate = 1; | 652 | sdp->sd_rindex_uptodate = 1; |
| 641 | return 0; | 653 | return 0; |
| @@ -1188,7 +1200,8 @@ out: | |||
| 1188 | * Returns: errno | 1200 | * Returns: errno |
| 1189 | */ | 1201 | */ |
| 1190 | 1202 | ||
| 1191 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) | 1203 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, |
| 1204 | char *file, unsigned int line) | ||
| 1192 | { | 1205 | { |
| 1193 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1206 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
| 1194 | struct gfs2_alloc *al = ip->i_alloc; | 1207 | struct gfs2_alloc *al = ip->i_alloc; |
| @@ -1199,12 +1212,15 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) | |||
| 1199 | return -EINVAL; | 1212 | return -EINVAL; |
| 1200 | 1213 | ||
| 1201 | try_again: | 1214 | try_again: |
| 1202 | /* We need to hold the rindex unless the inode we're using is | 1215 | if (hold_rindex) { |
| 1203 | the rindex itself, in which case it's already held. */ | 1216 | /* We need to hold the rindex unless the inode we're using is |
| 1204 | if (ip != GFS2_I(sdp->sd_rindex)) | 1217 | the rindex itself, in which case it's already held. */ |
| 1205 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); | 1218 | if (ip != GFS2_I(sdp->sd_rindex)) |
| 1206 | else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */ | 1219 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); |
| 1207 | error = gfs2_ri_update_special(ip); | 1220 | else if (!sdp->sd_rgrps) /* We may not have the rindex read |
| 1221 | in, so: */ | ||
| 1222 | error = gfs2_ri_update_special(ip); | ||
| 1223 | } | ||
| 1208 | 1224 | ||
| 1209 | if (error) | 1225 | if (error) |
| 1210 | return error; | 1226 | return error; |
| @@ -1215,7 +1231,7 @@ try_again: | |||
| 1215 | try to free it, and try the allocation again. */ | 1231 | try to free it, and try the allocation again. */ |
| 1216 | error = get_local_rgrp(ip, &unlinked, &last_unlinked); | 1232 | error = get_local_rgrp(ip, &unlinked, &last_unlinked); |
| 1217 | if (error) { | 1233 | if (error) { |
| 1218 | if (ip != GFS2_I(sdp->sd_rindex)) | 1234 | if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) |
| 1219 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1235 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
| 1220 | if (error != -EAGAIN) | 1236 | if (error != -EAGAIN) |
| 1221 | return error; | 1237 | return error; |
| @@ -1257,7 +1273,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip) | |||
| 1257 | al->al_rgd = NULL; | 1273 | al->al_rgd = NULL; |
| 1258 | if (al->al_rgd_gh.gh_gl) | 1274 | if (al->al_rgd_gh.gh_gl) |
| 1259 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1275 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
| 1260 | if (ip != GFS2_I(sdp->sd_rindex)) | 1276 | if (ip != GFS2_I(sdp->sd_rindex) && al->al_ri_gh.gh_gl) |
| 1261 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1277 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
| 1262 | } | 1278 | } |
| 1263 | 1279 | ||
| @@ -1496,11 +1512,19 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n) | |||
| 1496 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1512 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
| 1497 | struct buffer_head *dibh; | 1513 | struct buffer_head *dibh; |
| 1498 | struct gfs2_alloc *al = ip->i_alloc; | 1514 | struct gfs2_alloc *al = ip->i_alloc; |
| 1499 | struct gfs2_rgrpd *rgd = al->al_rgd; | 1515 | struct gfs2_rgrpd *rgd; |
| 1500 | u32 goal, blk; | 1516 | u32 goal, blk; |
| 1501 | u64 block; | 1517 | u64 block; |
| 1502 | int error; | 1518 | int error; |
| 1503 | 1519 | ||
| 1520 | /* Only happens if there is a bug in gfs2, return something distinctive | ||
| 1521 | * to ensure that it is noticed. | ||
| 1522 | */ | ||
| 1523 | if (al == NULL) | ||
| 1524 | return -ECANCELED; | ||
| 1525 | |||
| 1526 | rgd = al->al_rgd; | ||
| 1527 | |||
| 1504 | if (rgrp_contains_block(rgd, ip->i_goal)) | 1528 | if (rgrp_contains_block(rgd, ip->i_goal)) |
| 1505 | goal = ip->i_goal - rgd->rd_data0; | 1529 | goal = ip->i_goal - rgd->rd_data0; |
| 1506 | else | 1530 | else |
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index f07119d89557..0e35c0466f9a 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h | |||
| @@ -39,10 +39,12 @@ static inline void gfs2_alloc_put(struct gfs2_inode *ip) | |||
| 39 | ip->i_alloc = NULL; | 39 | ip->i_alloc = NULL; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, | 42 | extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, |
| 43 | unsigned int line); | 43 | char *file, unsigned int line); |
| 44 | #define gfs2_inplace_reserve(ip) \ | 44 | #define gfs2_inplace_reserve(ip) \ |
| 45 | gfs2_inplace_reserve_i((ip), __FILE__, __LINE__) | 45 | gfs2_inplace_reserve_i((ip), 1, __FILE__, __LINE__) |
| 46 | #define gfs2_inplace_reserve_ri(ip) \ | ||
| 47 | gfs2_inplace_reserve_i((ip), 0, __FILE__, __LINE__) | ||
| 46 | 48 | ||
| 47 | extern void gfs2_inplace_release(struct gfs2_inode *ip); | 49 | extern void gfs2_inplace_release(struct gfs2_inode *ip); |
| 48 | 50 | ||
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 77cb9f830ee4..047d1176096c 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
| @@ -85,6 +85,7 @@ static const match_table_t tokens = { | |||
| 85 | {Opt_locktable, "locktable=%s"}, | 85 | {Opt_locktable, "locktable=%s"}, |
| 86 | {Opt_hostdata, "hostdata=%s"}, | 86 | {Opt_hostdata, "hostdata=%s"}, |
| 87 | {Opt_spectator, "spectator"}, | 87 | {Opt_spectator, "spectator"}, |
| 88 | {Opt_spectator, "norecovery"}, | ||
| 88 | {Opt_ignore_local_fs, "ignore_local_fs"}, | 89 | {Opt_ignore_local_fs, "ignore_local_fs"}, |
| 89 | {Opt_localflocks, "localflocks"}, | 90 | {Opt_localflocks, "localflocks"}, |
| 90 | {Opt_localcaching, "localcaching"}, | 91 | {Opt_localcaching, "localcaching"}, |
| @@ -159,13 +160,13 @@ int gfs2_mount_args(struct gfs2_args *args, char *options) | |||
| 159 | args->ar_spectator = 1; | 160 | args->ar_spectator = 1; |
| 160 | break; | 161 | break; |
| 161 | case Opt_ignore_local_fs: | 162 | case Opt_ignore_local_fs: |
| 162 | args->ar_ignore_local_fs = 1; | 163 | /* Retained for backwards compat only */ |
| 163 | break; | 164 | break; |
| 164 | case Opt_localflocks: | 165 | case Opt_localflocks: |
| 165 | args->ar_localflocks = 1; | 166 | args->ar_localflocks = 1; |
| 166 | break; | 167 | break; |
| 167 | case Opt_localcaching: | 168 | case Opt_localcaching: |
| 168 | args->ar_localcaching = 1; | 169 | /* Retained for backwards compat only */ |
| 169 | break; | 170 | break; |
| 170 | case Opt_debug: | 171 | case Opt_debug: |
| 171 | if (args->ar_errors == GFS2_ERRORS_PANIC) { | 172 | if (args->ar_errors == GFS2_ERRORS_PANIC) { |
| @@ -179,7 +180,7 @@ int gfs2_mount_args(struct gfs2_args *args, char *options) | |||
| 179 | args->ar_debug = 0; | 180 | args->ar_debug = 0; |
| 180 | break; | 181 | break; |
| 181 | case Opt_upgrade: | 182 | case Opt_upgrade: |
| 182 | args->ar_upgrade = 1; | 183 | /* Retained for backwards compat only */ |
| 183 | break; | 184 | break; |
| 184 | case Opt_acl: | 185 | case Opt_acl: |
| 185 | args->ar_posix_acl = 1; | 186 | args->ar_posix_acl = 1; |
| @@ -342,15 +343,14 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) | |||
| 342 | { | 343 | { |
| 343 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); | 344 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); |
| 344 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | 345 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); |
| 346 | u64 size = i_size_read(jd->jd_inode); | ||
| 345 | 347 | ||
| 346 | if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || | 348 | if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30)) |
| 347 | (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { | ||
| 348 | gfs2_consist_inode(ip); | ||
| 349 | return -EIO; | 349 | return -EIO; |
| 350 | } | ||
| 351 | jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; | ||
| 352 | 350 | ||
| 353 | if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) { | 351 | jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; |
| 352 | |||
| 353 | if (gfs2_write_alloc_required(ip, 0, size)) { | ||
| 354 | gfs2_consist_inode(ip); | 354 | gfs2_consist_inode(ip); |
| 355 | return -EIO; | 355 | return -EIO; |
| 356 | } | 356 | } |
| @@ -1129,9 +1129,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1129 | 1129 | ||
| 1130 | /* Some flags must not be changed */ | 1130 | /* Some flags must not be changed */ |
| 1131 | if (args_neq(&args, &sdp->sd_args, spectator) || | 1131 | if (args_neq(&args, &sdp->sd_args, spectator) || |
| 1132 | args_neq(&args, &sdp->sd_args, ignore_local_fs) || | ||
| 1133 | args_neq(&args, &sdp->sd_args, localflocks) || | 1132 | args_neq(&args, &sdp->sd_args, localflocks) || |
| 1134 | args_neq(&args, &sdp->sd_args, localcaching) || | ||
| 1135 | args_neq(&args, &sdp->sd_args, meta)) | 1133 | args_neq(&args, &sdp->sd_args, meta)) |
| 1136 | return -EINVAL; | 1134 | return -EINVAL; |
| 1137 | 1135 | ||
| @@ -1234,16 +1232,10 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
| 1234 | seq_printf(s, ",hostdata=%s", args->ar_hostdata); | 1232 | seq_printf(s, ",hostdata=%s", args->ar_hostdata); |
| 1235 | if (args->ar_spectator) | 1233 | if (args->ar_spectator) |
| 1236 | seq_printf(s, ",spectator"); | 1234 | seq_printf(s, ",spectator"); |
| 1237 | if (args->ar_ignore_local_fs) | ||
| 1238 | seq_printf(s, ",ignore_local_fs"); | ||
| 1239 | if (args->ar_localflocks) | 1235 | if (args->ar_localflocks) |
| 1240 | seq_printf(s, ",localflocks"); | 1236 | seq_printf(s, ",localflocks"); |
| 1241 | if (args->ar_localcaching) | ||
| 1242 | seq_printf(s, ",localcaching"); | ||
| 1243 | if (args->ar_debug) | 1237 | if (args->ar_debug) |
| 1244 | seq_printf(s, ",debug"); | 1238 | seq_printf(s, ",debug"); |
| 1245 | if (args->ar_upgrade) | ||
| 1246 | seq_printf(s, ",upgrade"); | ||
| 1247 | if (args->ar_posix_acl) | 1239 | if (args->ar_posix_acl) |
| 1248 | seq_printf(s, ",acl"); | 1240 | seq_printf(s, ",acl"); |
| 1249 | if (args->ar_quota != GFS2_QUOTA_DEFAULT) { | 1241 | if (args->ar_quota != GFS2_QUOTA_DEFAULT) { |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index ccacffd2faaa..748ccb557c18 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
| @@ -230,7 +230,10 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len | |||
| 230 | 230 | ||
| 231 | if (gltype > LM_TYPE_JOURNAL) | 231 | if (gltype > LM_TYPE_JOURNAL) |
| 232 | return -EINVAL; | 232 | return -EINVAL; |
| 233 | glops = gfs2_glops_list[gltype]; | 233 | if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK) |
| 234 | glops = &gfs2_trans_glops; | ||
| 235 | else | ||
| 236 | glops = gfs2_glops_list[gltype]; | ||
| 234 | if (glops == NULL) | 237 | if (glops == NULL) |
| 235 | return -EINVAL; | 238 | return -EINVAL; |
| 236 | if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) | 239 | if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) |
| @@ -399,31 +402,32 @@ static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) | |||
| 399 | 402 | ||
| 400 | static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) | 403 | static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) |
| 401 | { | 404 | { |
| 402 | return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); | 405 | return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid); |
| 403 | } | 406 | } |
| 404 | 407 | ||
| 405 | static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | 408 | static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) |
| 406 | { | 409 | { |
| 407 | unsigned jid; | 410 | int jid; |
| 408 | int rv; | 411 | int rv; |
| 409 | 412 | ||
| 410 | rv = sscanf(buf, "%u", &jid); | 413 | rv = sscanf(buf, "%d", &jid); |
| 411 | if (rv != 1) | 414 | if (rv != 1) |
| 412 | return -EINVAL; | 415 | return -EINVAL; |
| 413 | 416 | ||
| 414 | spin_lock(&sdp->sd_jindex_spin); | 417 | spin_lock(&sdp->sd_jindex_spin); |
| 415 | rv = -EINVAL; | 418 | rv = -EINVAL; |
| 416 | if (sdp->sd_args.ar_spectator) | ||
| 417 | goto out; | ||
| 418 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | 419 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) |
| 419 | goto out; | 420 | goto out; |
| 420 | rv = -EBUSY; | 421 | rv = -EBUSY; |
| 421 | if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) | 422 | if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) |
| 422 | goto out; | 423 | goto out; |
| 424 | rv = 0; | ||
| 425 | if (sdp->sd_args.ar_spectator && jid > 0) | ||
| 426 | rv = jid = -EINVAL; | ||
| 423 | sdp->sd_lockstruct.ls_jid = jid; | 427 | sdp->sd_lockstruct.ls_jid = jid; |
| 428 | clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); | ||
| 424 | smp_mb__after_clear_bit(); | 429 | smp_mb__after_clear_bit(); |
| 425 | wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); | 430 | wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); |
| 426 | rv = 0; | ||
| 427 | out: | 431 | out: |
| 428 | spin_unlock(&sdp->sd_jindex_spin); | 432 | spin_unlock(&sdp->sd_jindex_spin); |
| 429 | return rv ? rv : len; | 433 | return rv ? rv : len; |
| @@ -617,7 +621,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj, | |||
| 617 | add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); | 621 | add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); |
| 618 | add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); | 622 | add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); |
| 619 | if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) | 623 | if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) |
| 620 | add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); | 624 | add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid); |
| 621 | if (gfs2_uuid_valid(uuid)) | 625 | if (gfs2_uuid_valid(uuid)) |
| 622 | add_uevent_var(env, "UUID=%pUB", uuid); | 626 | add_uevent_var(env, "UUID=%pUB", uuid); |
| 623 | return 0; | 627 | return 0; |
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index 148d55c14171..cedb0bb96d96 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h | |||
| @@ -39,7 +39,8 @@ | |||
| 39 | {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ | 39 | {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ |
| 40 | {(1UL << GLF_REPLY_PENDING), "r" }, \ | 40 | {(1UL << GLF_REPLY_PENDING), "r" }, \ |
| 41 | {(1UL << GLF_INITIAL), "I" }, \ | 41 | {(1UL << GLF_INITIAL), "I" }, \ |
| 42 | {(1UL << GLF_FROZEN), "F" }) | 42 | {(1UL << GLF_FROZEN), "F" }, \ |
| 43 | {(1UL << GLF_QUEUED), "q" }) | ||
| 43 | 44 | ||
| 44 | #ifndef NUMPTY | 45 | #ifndef NUMPTY |
| 45 | #define NUMPTY | 46 | #define NUMPTY |
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index edf9d4bd908e..fb56b783e028 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h | |||
| @@ -20,11 +20,20 @@ struct gfs2_glock; | |||
| 20 | #define RES_JDATA 1 | 20 | #define RES_JDATA 1 |
| 21 | #define RES_DATA 1 | 21 | #define RES_DATA 1 |
| 22 | #define RES_LEAF 1 | 22 | #define RES_LEAF 1 |
| 23 | #define RES_RG_HDR 1 | ||
| 23 | #define RES_RG_BIT 2 | 24 | #define RES_RG_BIT 2 |
| 24 | #define RES_EATTR 1 | 25 | #define RES_EATTR 1 |
| 25 | #define RES_STATFS 1 | 26 | #define RES_STATFS 1 |
| 26 | #define RES_QUOTA 2 | 27 | #define RES_QUOTA 2 |
| 27 | 28 | ||
| 29 | /* reserve either the number of blocks to be allocated plus the rg header | ||
| 30 | * block, or all of the blocks in the rg, whichever is smaller */ | ||
| 31 | static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al) | ||
| 32 | { | ||
| 33 | return (al->al_requested < al->al_rgd->rd_length)? | ||
| 34 | al->al_requested + 1 : al->al_rgd->rd_length; | ||
| 35 | } | ||
| 36 | |||
| 28 | int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, | 37 | int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, |
| 29 | unsigned int revokes); | 38 | unsigned int revokes); |
| 30 | 39 | ||
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 776af6eb4bcb..30b58f07c8a6 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c | |||
| @@ -734,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, | |||
| 734 | goto out_gunlock_q; | 734 | goto out_gunlock_q; |
| 735 | 735 | ||
| 736 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), | 736 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), |
| 737 | blks + al->al_rgd->rd_length + | 737 | blks + gfs2_rg_blocks(al) + |
| 738 | RES_DINODE + RES_STATFS + RES_QUOTA, 0); | 738 | RES_DINODE + RES_STATFS + RES_QUOTA, 0); |
| 739 | if (error) | 739 | if (error) |
| 740 | goto out_ipres; | 740 | goto out_ipres; |
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c index 5007a41f1be9..d182438c7ae4 100644 --- a/fs/hfsplus/bfind.c +++ b/fs/hfsplus/bfind.c | |||
| @@ -23,7 +23,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) | |||
| 23 | fd->search_key = ptr; | 23 | fd->search_key = ptr; |
| 24 | fd->key = ptr + tree->max_key_len + 2; | 24 | fd->key = ptr + tree->max_key_len + 2; |
| 25 | dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); | 25 | dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); |
| 26 | down(&tree->tree_lock); | 26 | mutex_lock(&tree->tree_lock); |
| 27 | return 0; | 27 | return 0; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| @@ -32,7 +32,7 @@ void hfs_find_exit(struct hfs_find_data *fd) | |||
| 32 | hfs_bnode_put(fd->bnode); | 32 | hfs_bnode_put(fd->bnode); |
| 33 | kfree(fd->search_key); | 33 | kfree(fd->search_key); |
| 34 | dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); | 34 | dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); |
| 35 | up(&fd->tree->tree_lock); | 35 | mutex_unlock(&fd->tree->tree_lock); |
| 36 | fd->tree = NULL; | 36 | fd->tree = NULL; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| @@ -52,6 +52,10 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) | |||
| 52 | rec = (e + b) / 2; | 52 | rec = (e + b) / 2; |
| 53 | len = hfs_brec_lenoff(bnode, rec, &off); | 53 | len = hfs_brec_lenoff(bnode, rec, &off); |
| 54 | keylen = hfs_brec_keylen(bnode, rec); | 54 | keylen = hfs_brec_keylen(bnode, rec); |
| 55 | if (keylen == 0) { | ||
| 56 | res = -EINVAL; | ||
| 57 | goto fail; | ||
| 58 | } | ||
| 55 | hfs_bnode_read(bnode, fd->key, off, keylen); | 59 | hfs_bnode_read(bnode, fd->key, off, keylen); |
| 56 | cmpval = bnode->tree->keycmp(fd->key, fd->search_key); | 60 | cmpval = bnode->tree->keycmp(fd->key, fd->search_key); |
| 57 | if (!cmpval) { | 61 | if (!cmpval) { |
| @@ -67,6 +71,10 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) | |||
| 67 | if (rec != e && e >= 0) { | 71 | if (rec != e && e >= 0) { |
| 68 | len = hfs_brec_lenoff(bnode, e, &off); | 72 | len = hfs_brec_lenoff(bnode, e, &off); |
| 69 | keylen = hfs_brec_keylen(bnode, e); | 73 | keylen = hfs_brec_keylen(bnode, e); |
| 74 | if (keylen == 0) { | ||
| 75 | res = -EINVAL; | ||
| 76 | goto fail; | ||
| 77 | } | ||
| 70 | hfs_bnode_read(bnode, fd->key, off, keylen); | 78 | hfs_bnode_read(bnode, fd->key, off, keylen); |
| 71 | } | 79 | } |
| 72 | done: | 80 | done: |
| @@ -75,6 +83,7 @@ done: | |||
| 75 | fd->keylength = keylen; | 83 | fd->keylength = keylen; |
| 76 | fd->entryoffset = off + keylen; | 84 | fd->entryoffset = off + keylen; |
| 77 | fd->entrylength = len - keylen; | 85 | fd->entrylength = len - keylen; |
| 86 | fail: | ||
| 78 | return res; | 87 | return res; |
| 79 | } | 88 | } |
| 80 | 89 | ||
| @@ -198,6 +207,10 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt) | |||
| 198 | 207 | ||
| 199 | len = hfs_brec_lenoff(bnode, fd->record, &off); | 208 | len = hfs_brec_lenoff(bnode, fd->record, &off); |
| 200 | keylen = hfs_brec_keylen(bnode, fd->record); | 209 | keylen = hfs_brec_keylen(bnode, fd->record); |
| 210 | if (keylen == 0) { | ||
| 211 | res = -EINVAL; | ||
| 212 | goto out; | ||
| 213 | } | ||
| 201 | fd->keyoffset = off; | 214 | fd->keyoffset = off; |
| 202 | fd->keylength = keylen; | 215 | fd->keylength = keylen; |
| 203 | fd->entryoffset = off + keylen; | 216 | fd->entryoffset = off + keylen; |
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c index ea30afc2a03c..ad57f5991eb1 100644 --- a/fs/hfsplus/bitmap.c +++ b/fs/hfsplus/bitmap.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) | 18 | int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) |
| 19 | { | 19 | { |
| 20 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 20 | struct page *page; | 21 | struct page *page; |
| 21 | struct address_space *mapping; | 22 | struct address_space *mapping; |
| 22 | __be32 *pptr, *curr, *end; | 23 | __be32 *pptr, *curr, *end; |
| @@ -29,8 +30,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma | |||
| 29 | return size; | 30 | return size; |
| 30 | 31 | ||
| 31 | dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); | 32 | dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); |
| 32 | mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); | 33 | mutex_lock(&sbi->alloc_mutex); |
| 33 | mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; | 34 | mapping = sbi->alloc_file->i_mapping; |
| 34 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); | 35 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); |
| 35 | if (IS_ERR(page)) { | 36 | if (IS_ERR(page)) { |
| 36 | start = size; | 37 | start = size; |
| @@ -150,16 +151,17 @@ done: | |||
| 150 | set_page_dirty(page); | 151 | set_page_dirty(page); |
| 151 | kunmap(page); | 152 | kunmap(page); |
| 152 | *max = offset + (curr - pptr) * 32 + i - start; | 153 | *max = offset + (curr - pptr) * 32 + i - start; |
| 153 | HFSPLUS_SB(sb).free_blocks -= *max; | 154 | sbi->free_blocks -= *max; |
| 154 | sb->s_dirt = 1; | 155 | sb->s_dirt = 1; |
| 155 | dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); | 156 | dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); |
| 156 | out: | 157 | out: |
| 157 | mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); | 158 | mutex_unlock(&sbi->alloc_mutex); |
| 158 | return start; | 159 | return start; |
| 159 | } | 160 | } |
| 160 | 161 | ||
| 161 | int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) | 162 | int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) |
| 162 | { | 163 | { |
| 164 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 163 | struct page *page; | 165 | struct page *page; |
| 164 | struct address_space *mapping; | 166 | struct address_space *mapping; |
| 165 | __be32 *pptr, *curr, *end; | 167 | __be32 *pptr, *curr, *end; |
| @@ -172,11 +174,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) | |||
| 172 | 174 | ||
| 173 | dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); | 175 | dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); |
| 174 | /* are all of the bits in range? */ | 176 | /* are all of the bits in range? */ |
| 175 | if ((offset + count) > HFSPLUS_SB(sb).total_blocks) | 177 | if ((offset + count) > sbi->total_blocks) |
| 176 | return -2; | 178 | return -2; |
| 177 | 179 | ||
| 178 | mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); | 180 | mutex_lock(&sbi->alloc_mutex); |
| 179 | mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; | 181 | mapping = sbi->alloc_file->i_mapping; |
| 180 | pnr = offset / PAGE_CACHE_BITS; | 182 | pnr = offset / PAGE_CACHE_BITS; |
| 181 | page = read_mapping_page(mapping, pnr, NULL); | 183 | page = read_mapping_page(mapping, pnr, NULL); |
| 182 | pptr = kmap(page); | 184 | pptr = kmap(page); |
| @@ -224,9 +226,9 @@ done: | |||
| 224 | out: | 226 | out: |
| 225 | set_page_dirty(page); | 227 | set_page_dirty(page); |
| 226 | kunmap(page); | 228 | kunmap(page); |
| 227 | HFSPLUS_SB(sb).free_blocks += len; | 229 | sbi->free_blocks += len; |
| 228 | sb->s_dirt = 1; | 230 | sb->s_dirt = 1; |
| 229 | mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); | 231 | mutex_unlock(&sbi->alloc_mutex); |
| 230 | 232 | ||
| 231 | return 0; | 233 | return 0; |
| 232 | } | 234 | } |
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index c88e5d72a402..2f39d05443e1 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c | |||
| @@ -42,10 +42,13 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) | |||
| 42 | recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); | 42 | recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); |
| 43 | if (!recoff) | 43 | if (!recoff) |
| 44 | return 0; | 44 | return 0; |
| 45 | if (node->tree->attributes & HFS_TREE_BIGKEYS) | 45 | |
| 46 | retval = hfs_bnode_read_u16(node, recoff) + 2; | 46 | retval = hfs_bnode_read_u16(node, recoff) + 2; |
| 47 | else | 47 | if (retval > node->tree->max_key_len + 2) { |
| 48 | retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; | 48 | printk(KERN_ERR "hfs: keylen %d too large\n", |
| 49 | retval); | ||
| 50 | retval = 0; | ||
| 51 | } | ||
| 49 | } | 52 | } |
| 50 | return retval; | 53 | return retval; |
| 51 | } | 54 | } |
| @@ -216,7 +219,7 @@ skip: | |||
| 216 | static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) | 219 | static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) |
| 217 | { | 220 | { |
| 218 | struct hfs_btree *tree; | 221 | struct hfs_btree *tree; |
| 219 | struct hfs_bnode *node, *new_node; | 222 | struct hfs_bnode *node, *new_node, *next_node; |
| 220 | struct hfs_bnode_desc node_desc; | 223 | struct hfs_bnode_desc node_desc; |
| 221 | int num_recs, new_rec_off, new_off, old_rec_off; | 224 | int num_recs, new_rec_off, new_off, old_rec_off; |
| 222 | int data_start, data_end, size; | 225 | int data_start, data_end, size; |
| @@ -235,6 +238,17 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) | |||
| 235 | new_node->type = node->type; | 238 | new_node->type = node->type; |
| 236 | new_node->height = node->height; | 239 | new_node->height = node->height; |
| 237 | 240 | ||
| 241 | if (node->next) | ||
| 242 | next_node = hfs_bnode_find(tree, node->next); | ||
| 243 | else | ||
| 244 | next_node = NULL; | ||
| 245 | |||
| 246 | if (IS_ERR(next_node)) { | ||
| 247 | hfs_bnode_put(node); | ||
| 248 | hfs_bnode_put(new_node); | ||
| 249 | return next_node; | ||
| 250 | } | ||
| 251 | |||
| 238 | size = tree->node_size / 2 - node->num_recs * 2 - 14; | 252 | size = tree->node_size / 2 - node->num_recs * 2 - 14; |
| 239 | old_rec_off = tree->node_size - 4; | 253 | old_rec_off = tree->node_size - 4; |
| 240 | num_recs = 1; | 254 | num_recs = 1; |
| @@ -248,6 +262,8 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) | |||
| 248 | /* panic? */ | 262 | /* panic? */ |
| 249 | hfs_bnode_put(node); | 263 | hfs_bnode_put(node); |
| 250 | hfs_bnode_put(new_node); | 264 | hfs_bnode_put(new_node); |
| 265 | if (next_node) | ||
| 266 | hfs_bnode_put(next_node); | ||
| 251 | return ERR_PTR(-ENOSPC); | 267 | return ERR_PTR(-ENOSPC); |
| 252 | } | 268 | } |
| 253 | 269 | ||
| @@ -302,8 +318,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) | |||
| 302 | hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); | 318 | hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); |
| 303 | 319 | ||
| 304 | /* update next bnode header */ | 320 | /* update next bnode header */ |
| 305 | if (new_node->next) { | 321 | if (next_node) { |
| 306 | struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next); | ||
| 307 | next_node->prev = new_node->this; | 322 | next_node->prev = new_node->this; |
| 308 | hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); | 323 | hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); |
| 309 | node_desc.prev = cpu_to_be32(next_node->prev); | 324 | node_desc.prev = cpu_to_be32(next_node->prev); |
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index e49fcee1e293..22e4d4e32999 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c | |||
| @@ -30,7 +30,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |||
| 30 | if (!tree) | 30 | if (!tree) |
| 31 | return NULL; | 31 | return NULL; |
| 32 | 32 | ||
| 33 | init_MUTEX(&tree->tree_lock); | 33 | mutex_init(&tree->tree_lock); |
| 34 | spin_lock_init(&tree->hash_lock); | 34 | spin_lock_init(&tree->hash_lock); |
| 35 | tree->sb = sb; | 35 | tree->sb = sb; |
| 36 | tree->cnid = id; | 36 | tree->cnid = id; |
| @@ -39,10 +39,16 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |||
| 39 | goto free_tree; | 39 | goto free_tree; |
| 40 | tree->inode = inode; | 40 | tree->inode = inode; |
| 41 | 41 | ||
| 42 | if (!HFSPLUS_I(tree->inode)->first_blocks) { | ||
| 43 | printk(KERN_ERR | ||
| 44 | "hfs: invalid btree extent records (0 size).\n"); | ||
| 45 | goto free_inode; | ||
| 46 | } | ||
| 47 | |||
| 42 | mapping = tree->inode->i_mapping; | 48 | mapping = tree->inode->i_mapping; |
| 43 | page = read_mapping_page(mapping, 0, NULL); | 49 | page = read_mapping_page(mapping, 0, NULL); |
| 44 | if (IS_ERR(page)) | 50 | if (IS_ERR(page)) |
| 45 | goto free_tree; | 51 | goto free_inode; |
| 46 | 52 | ||
| 47 | /* Load the header */ | 53 | /* Load the header */ |
| 48 | head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); | 54 | head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); |
| @@ -57,27 +63,56 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |||
| 57 | tree->max_key_len = be16_to_cpu(head->max_key_len); | 63 | tree->max_key_len = be16_to_cpu(head->max_key_len); |
| 58 | tree->depth = be16_to_cpu(head->depth); | 64 | tree->depth = be16_to_cpu(head->depth); |
| 59 | 65 | ||
| 60 | /* Set the correct compare function */ | 66 | /* Verify the tree and set the correct compare function */ |
| 61 | if (id == HFSPLUS_EXT_CNID) { | 67 | switch (id) { |
| 68 | case HFSPLUS_EXT_CNID: | ||
| 69 | if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) { | ||
| 70 | printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", | ||
| 71 | tree->max_key_len); | ||
| 72 | goto fail_page; | ||
| 73 | } | ||
| 74 | if (tree->attributes & HFS_TREE_VARIDXKEYS) { | ||
| 75 | printk(KERN_ERR "hfs: invalid extent btree flag\n"); | ||
| 76 | goto fail_page; | ||
| 77 | } | ||
| 78 | |||
| 62 | tree->keycmp = hfsplus_ext_cmp_key; | 79 | tree->keycmp = hfsplus_ext_cmp_key; |
| 63 | } else if (id == HFSPLUS_CAT_CNID) { | 80 | break; |
| 64 | if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) && | 81 | case HFSPLUS_CAT_CNID: |
| 82 | if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) { | ||
| 83 | printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", | ||
| 84 | tree->max_key_len); | ||
| 85 | goto fail_page; | ||
| 86 | } | ||
| 87 | if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { | ||
| 88 | printk(KERN_ERR "hfs: invalid catalog btree flag\n"); | ||
| 89 | goto fail_page; | ||
| 90 | } | ||
| 91 | |||
| 92 | if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) && | ||
| 65 | (head->key_type == HFSPLUS_KEY_BINARY)) | 93 | (head->key_type == HFSPLUS_KEY_BINARY)) |
| 66 | tree->keycmp = hfsplus_cat_bin_cmp_key; | 94 | tree->keycmp = hfsplus_cat_bin_cmp_key; |
| 67 | else { | 95 | else { |
| 68 | tree->keycmp = hfsplus_cat_case_cmp_key; | 96 | tree->keycmp = hfsplus_cat_case_cmp_key; |
| 69 | HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD; | 97 | set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); |
| 70 | } | 98 | } |
| 71 | } else { | 99 | break; |
| 100 | default: | ||
| 72 | printk(KERN_ERR "hfs: unknown B*Tree requested\n"); | 101 | printk(KERN_ERR "hfs: unknown B*Tree requested\n"); |
| 73 | goto fail_page; | 102 | goto fail_page; |
| 74 | } | 103 | } |
| 75 | 104 | ||
| 105 | if (!(tree->attributes & HFS_TREE_BIGKEYS)) { | ||
| 106 | printk(KERN_ERR "hfs: invalid btree flag\n"); | ||
| 107 | goto fail_page; | ||
| 108 | } | ||
| 109 | |||
| 76 | size = tree->node_size; | 110 | size = tree->node_size; |
| 77 | if (!is_power_of_2(size)) | 111 | if (!is_power_of_2(size)) |
| 78 | goto fail_page; | 112 | goto fail_page; |
| 79 | if (!tree->node_count) | 113 | if (!tree->node_count) |
| 80 | goto fail_page; | 114 | goto fail_page; |
| 115 | |||
| 81 | tree->node_size_shift = ffs(size) - 1; | 116 | tree->node_size_shift = ffs(size) - 1; |
| 82 | 117 | ||
| 83 | tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 118 | tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| @@ -87,10 +122,11 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |||
| 87 | return tree; | 122 | return tree; |
| 88 | 123 | ||
| 89 | fail_page: | 124 | fail_page: |
| 90 | tree->inode->i_mapping->a_ops = &hfsplus_aops; | ||
| 91 | page_cache_release(page); | 125 | page_cache_release(page); |
| 92 | free_tree: | 126 | free_inode: |
| 127 | tree->inode->i_mapping->a_ops = &hfsplus_aops; | ||
| 93 | iput(tree->inode); | 128 | iput(tree->inode); |
| 129 | free_tree: | ||
| 94 | kfree(tree); | 130 | kfree(tree); |
| 95 | return NULL; | 131 | return NULL; |
| 96 | } | 132 | } |
| @@ -192,17 +228,18 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) | |||
| 192 | 228 | ||
| 193 | while (!tree->free_nodes) { | 229 | while (!tree->free_nodes) { |
| 194 | struct inode *inode = tree->inode; | 230 | struct inode *inode = tree->inode; |
| 231 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 195 | u32 count; | 232 | u32 count; |
| 196 | int res; | 233 | int res; |
| 197 | 234 | ||
| 198 | res = hfsplus_file_extend(inode); | 235 | res = hfsplus_file_extend(inode); |
| 199 | if (res) | 236 | if (res) |
| 200 | return ERR_PTR(res); | 237 | return ERR_PTR(res); |
| 201 | HFSPLUS_I(inode).phys_size = inode->i_size = | 238 | hip->phys_size = inode->i_size = |
| 202 | (loff_t)HFSPLUS_I(inode).alloc_blocks << | 239 | (loff_t)hip->alloc_blocks << |
| 203 | HFSPLUS_SB(tree->sb).alloc_blksz_shift; | 240 | HFSPLUS_SB(tree->sb)->alloc_blksz_shift; |
| 204 | HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks << | 241 | hip->fs_blocks = |
| 205 | HFSPLUS_SB(tree->sb).fs_shift; | 242 | hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift; |
| 206 | inode_set_bytes(inode, inode->i_size); | 243 | inode_set_bytes(inode, inode->i_size); |
| 207 | count = inode->i_size >> tree->node_size_shift; | 244 | count = inode->i_size >> tree->node_size_shift; |
| 208 | tree->free_nodes = count - tree->node_count; | 245 | tree->free_nodes = count - tree->node_count; |
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index f6874acb2cf2..8af45fc5b051 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c | |||
| @@ -67,7 +67,7 @@ static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent, | |||
| 67 | key->key_len = cpu_to_be16(6 + ustrlen); | 67 | key->key_len = cpu_to_be16(6 + ustrlen); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms) | 70 | void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms) |
| 71 | { | 71 | { |
| 72 | if (inode->i_flags & S_IMMUTABLE) | 72 | if (inode->i_flags & S_IMMUTABLE) |
| 73 | perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; | 73 | perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; |
| @@ -77,15 +77,24 @@ static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms) | |||
| 77 | perms->rootflags |= HFSPLUS_FLG_APPEND; | 77 | perms->rootflags |= HFSPLUS_FLG_APPEND; |
| 78 | else | 78 | else |
| 79 | perms->rootflags &= ~HFSPLUS_FLG_APPEND; | 79 | perms->rootflags &= ~HFSPLUS_FLG_APPEND; |
| 80 | HFSPLUS_I(inode).rootflags = perms->rootflags; | 80 | |
| 81 | HFSPLUS_I(inode).userflags = perms->userflags; | 81 | perms->userflags = HFSPLUS_I(inode)->userflags; |
| 82 | perms->mode = cpu_to_be16(inode->i_mode); | 82 | perms->mode = cpu_to_be16(inode->i_mode); |
| 83 | perms->owner = cpu_to_be32(inode->i_uid); | 83 | perms->owner = cpu_to_be32(inode->i_uid); |
| 84 | perms->group = cpu_to_be32(inode->i_gid); | 84 | perms->group = cpu_to_be32(inode->i_gid); |
| 85 | |||
| 86 | if (S_ISREG(inode->i_mode)) | ||
| 87 | perms->dev = cpu_to_be32(inode->i_nlink); | ||
| 88 | else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) | ||
| 89 | perms->dev = cpu_to_be32(inode->i_rdev); | ||
| 90 | else | ||
| 91 | perms->dev = 0; | ||
| 85 | } | 92 | } |
| 86 | 93 | ||
| 87 | static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode) | 94 | static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode) |
| 88 | { | 95 | { |
| 96 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); | ||
| 97 | |||
| 89 | if (S_ISDIR(inode->i_mode)) { | 98 | if (S_ISDIR(inode->i_mode)) { |
| 90 | struct hfsplus_cat_folder *folder; | 99 | struct hfsplus_cat_folder *folder; |
| 91 | 100 | ||
| @@ -93,13 +102,13 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i | |||
| 93 | memset(folder, 0, sizeof(*folder)); | 102 | memset(folder, 0, sizeof(*folder)); |
| 94 | folder->type = cpu_to_be16(HFSPLUS_FOLDER); | 103 | folder->type = cpu_to_be16(HFSPLUS_FOLDER); |
| 95 | folder->id = cpu_to_be32(inode->i_ino); | 104 | folder->id = cpu_to_be32(inode->i_ino); |
| 96 | HFSPLUS_I(inode).create_date = | 105 | HFSPLUS_I(inode)->create_date = |
| 97 | folder->create_date = | 106 | folder->create_date = |
| 98 | folder->content_mod_date = | 107 | folder->content_mod_date = |
| 99 | folder->attribute_mod_date = | 108 | folder->attribute_mod_date = |
| 100 | folder->access_date = hfsp_now2mt(); | 109 | folder->access_date = hfsp_now2mt(); |
| 101 | hfsplus_set_perms(inode, &folder->permissions); | 110 | hfsplus_cat_set_perms(inode, &folder->permissions); |
| 102 | if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir) | 111 | if (inode == sbi->hidden_dir) |
| 103 | /* invisible and namelocked */ | 112 | /* invisible and namelocked */ |
| 104 | folder->user_info.frFlags = cpu_to_be16(0x5000); | 113 | folder->user_info.frFlags = cpu_to_be16(0x5000); |
| 105 | return sizeof(*folder); | 114 | return sizeof(*folder); |
| @@ -111,19 +120,19 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i | |||
| 111 | file->type = cpu_to_be16(HFSPLUS_FILE); | 120 | file->type = cpu_to_be16(HFSPLUS_FILE); |
| 112 | file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS); | 121 | file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS); |
| 113 | file->id = cpu_to_be32(cnid); | 122 | file->id = cpu_to_be32(cnid); |
| 114 | HFSPLUS_I(inode).create_date = | 123 | HFSPLUS_I(inode)->create_date = |
| 115 | file->create_date = | 124 | file->create_date = |
| 116 | file->content_mod_date = | 125 | file->content_mod_date = |
| 117 | file->attribute_mod_date = | 126 | file->attribute_mod_date = |
| 118 | file->access_date = hfsp_now2mt(); | 127 | file->access_date = hfsp_now2mt(); |
| 119 | if (cnid == inode->i_ino) { | 128 | if (cnid == inode->i_ino) { |
| 120 | hfsplus_set_perms(inode, &file->permissions); | 129 | hfsplus_cat_set_perms(inode, &file->permissions); |
| 121 | if (S_ISLNK(inode->i_mode)) { | 130 | if (S_ISLNK(inode->i_mode)) { |
| 122 | file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE); | 131 | file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE); |
| 123 | file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR); | 132 | file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR); |
| 124 | } else { | 133 | } else { |
| 125 | file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type); | 134 | file->user_info.fdType = cpu_to_be32(sbi->type); |
| 126 | file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator); | 135 | file->user_info.fdCreator = cpu_to_be32(sbi->creator); |
| 127 | } | 136 | } |
| 128 | if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) | 137 | if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) |
| 129 | file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); | 138 | file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); |
| @@ -131,8 +140,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i | |||
| 131 | file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE); | 140 | file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE); |
| 132 | file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR); | 141 | file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR); |
| 133 | file->user_info.fdFlags = cpu_to_be16(0x100); | 142 | file->user_info.fdFlags = cpu_to_be16(0x100); |
| 134 | file->create_date = HFSPLUS_I(HFSPLUS_SB(inode->i_sb).hidden_dir).create_date; | 143 | file->create_date = HFSPLUS_I(sbi->hidden_dir)->create_date; |
| 135 | file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev); | 144 | file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode)->linkid); |
| 136 | } | 145 | } |
| 137 | return sizeof(*file); | 146 | return sizeof(*file); |
| 138 | } | 147 | } |
| @@ -180,15 +189,14 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, | |||
| 180 | 189 | ||
| 181 | int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode) | 190 | int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode) |
| 182 | { | 191 | { |
| 192 | struct super_block *sb = dir->i_sb; | ||
| 183 | struct hfs_find_data fd; | 193 | struct hfs_find_data fd; |
| 184 | struct super_block *sb; | ||
| 185 | hfsplus_cat_entry entry; | 194 | hfsplus_cat_entry entry; |
| 186 | int entry_size; | 195 | int entry_size; |
| 187 | int err; | 196 | int err; |
| 188 | 197 | ||
| 189 | dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); | 198 | dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); |
| 190 | sb = dir->i_sb; | 199 | hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); |
| 191 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); | ||
| 192 | 200 | ||
| 193 | hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); | 201 | hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); |
| 194 | entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ? | 202 | entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ? |
| @@ -234,7 +242,7 @@ err2: | |||
| 234 | 242 | ||
| 235 | int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) | 243 | int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) |
| 236 | { | 244 | { |
| 237 | struct super_block *sb; | 245 | struct super_block *sb = dir->i_sb; |
| 238 | struct hfs_find_data fd; | 246 | struct hfs_find_data fd; |
| 239 | struct hfsplus_fork_raw fork; | 247 | struct hfsplus_fork_raw fork; |
| 240 | struct list_head *pos; | 248 | struct list_head *pos; |
| @@ -242,8 +250,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) | |||
| 242 | u16 type; | 250 | u16 type; |
| 243 | 251 | ||
| 244 | dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); | 252 | dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); |
| 245 | sb = dir->i_sb; | 253 | hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); |
| 246 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); | ||
| 247 | 254 | ||
| 248 | if (!str) { | 255 | if (!str) { |
| 249 | int len; | 256 | int len; |
| @@ -279,7 +286,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) | |||
| 279 | hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); | 286 | hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); |
| 280 | } | 287 | } |
| 281 | 288 | ||
| 282 | list_for_each(pos, &HFSPLUS_I(dir).open_dir_list) { | 289 | list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) { |
| 283 | struct hfsplus_readdir_data *rd = | 290 | struct hfsplus_readdir_data *rd = |
| 284 | list_entry(pos, struct hfsplus_readdir_data, list); | 291 | list_entry(pos, struct hfsplus_readdir_data, list); |
| 285 | if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) | 292 | if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) |
| @@ -312,7 +319,7 @@ int hfsplus_rename_cat(u32 cnid, | |||
| 312 | struct inode *src_dir, struct qstr *src_name, | 319 | struct inode *src_dir, struct qstr *src_name, |
| 313 | struct inode *dst_dir, struct qstr *dst_name) | 320 | struct inode *dst_dir, struct qstr *dst_name) |
| 314 | { | 321 | { |
| 315 | struct super_block *sb; | 322 | struct super_block *sb = src_dir->i_sb; |
| 316 | struct hfs_find_data src_fd, dst_fd; | 323 | struct hfs_find_data src_fd, dst_fd; |
| 317 | hfsplus_cat_entry entry; | 324 | hfsplus_cat_entry entry; |
| 318 | int entry_size, type; | 325 | int entry_size, type; |
| @@ -320,8 +327,7 @@ int hfsplus_rename_cat(u32 cnid, | |||
| 320 | 327 | ||
| 321 | dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, | 328 | dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, |
| 322 | dst_dir->i_ino, dst_name->name); | 329 | dst_dir->i_ino, dst_name->name); |
| 323 | sb = src_dir->i_sb; | 330 | hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd); |
| 324 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &src_fd); | ||
| 325 | dst_fd = src_fd; | 331 | dst_fd = src_fd; |
| 326 | 332 | ||
| 327 | /* find the old dir entry and read the data */ | 333 | /* find the old dir entry and read the data */ |
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 764fd1bdca88..d236d85ec9d7 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c | |||
| @@ -39,7 +39,7 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, | |||
| 39 | 39 | ||
| 40 | dentry->d_op = &hfsplus_dentry_operations; | 40 | dentry->d_op = &hfsplus_dentry_operations; |
| 41 | dentry->d_fsdata = NULL; | 41 | dentry->d_fsdata = NULL; |
| 42 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); | 42 | hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); |
| 43 | hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); | 43 | hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); |
| 44 | again: | 44 | again: |
| 45 | err = hfs_brec_read(&fd, &entry, sizeof(entry)); | 45 | err = hfs_brec_read(&fd, &entry, sizeof(entry)); |
| @@ -68,9 +68,9 @@ again: | |||
| 68 | cnid = be32_to_cpu(entry.file.id); | 68 | cnid = be32_to_cpu(entry.file.id); |
| 69 | if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && | 69 | if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && |
| 70 | entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && | 70 | entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && |
| 71 | (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb).hidden_dir).create_date || | 71 | (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->create_date || |
| 72 | entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode).create_date) && | 72 | entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)->create_date) && |
| 73 | HFSPLUS_SB(sb).hidden_dir) { | 73 | HFSPLUS_SB(sb)->hidden_dir) { |
| 74 | struct qstr str; | 74 | struct qstr str; |
| 75 | char name[32]; | 75 | char name[32]; |
| 76 | 76 | ||
| @@ -86,7 +86,8 @@ again: | |||
| 86 | linkid = be32_to_cpu(entry.file.permissions.dev); | 86 | linkid = be32_to_cpu(entry.file.permissions.dev); |
| 87 | str.len = sprintf(name, "iNode%d", linkid); | 87 | str.len = sprintf(name, "iNode%d", linkid); |
| 88 | str.name = name; | 88 | str.name = name; |
| 89 | hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str); | 89 | hfsplus_cat_build_key(sb, fd.search_key, |
| 90 | HFSPLUS_SB(sb)->hidden_dir->i_ino, &str); | ||
| 90 | goto again; | 91 | goto again; |
| 91 | } | 92 | } |
| 92 | } else if (!dentry->d_fsdata) | 93 | } else if (!dentry->d_fsdata) |
| @@ -101,7 +102,7 @@ again: | |||
| 101 | if (IS_ERR(inode)) | 102 | if (IS_ERR(inode)) |
| 102 | return ERR_CAST(inode); | 103 | return ERR_CAST(inode); |
| 103 | if (S_ISREG(inode->i_mode)) | 104 | if (S_ISREG(inode->i_mode)) |
| 104 | HFSPLUS_I(inode).dev = linkid; | 105 | HFSPLUS_I(inode)->linkid = linkid; |
| 105 | out: | 106 | out: |
| 106 | d_add(dentry, inode); | 107 | d_add(dentry, inode); |
| 107 | return NULL; | 108 | return NULL; |
| @@ -124,7 +125,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
| 124 | if (filp->f_pos >= inode->i_size) | 125 | if (filp->f_pos >= inode->i_size) |
| 125 | return 0; | 126 | return 0; |
| 126 | 127 | ||
| 127 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); | 128 | hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); |
| 128 | hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); | 129 | hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); |
| 129 | err = hfs_brec_find(&fd); | 130 | err = hfs_brec_find(&fd); |
| 130 | if (err) | 131 | if (err) |
| @@ -180,8 +181,9 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
| 180 | err = -EIO; | 181 | err = -EIO; |
| 181 | goto out; | 182 | goto out; |
| 182 | } | 183 | } |
| 183 | if (HFSPLUS_SB(sb).hidden_dir && | 184 | if (HFSPLUS_SB(sb)->hidden_dir && |
| 184 | HFSPLUS_SB(sb).hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) | 185 | HFSPLUS_SB(sb)->hidden_dir->i_ino == |
| 186 | be32_to_cpu(entry.folder.id)) | ||
| 185 | goto next; | 187 | goto next; |
| 186 | if (filldir(dirent, strbuf, len, filp->f_pos, | 188 | if (filldir(dirent, strbuf, len, filp->f_pos, |
| 187 | be32_to_cpu(entry.folder.id), DT_DIR)) | 189 | be32_to_cpu(entry.folder.id), DT_DIR)) |
| @@ -217,7 +219,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
| 217 | } | 219 | } |
| 218 | filp->private_data = rd; | 220 | filp->private_data = rd; |
| 219 | rd->file = filp; | 221 | rd->file = filp; |
| 220 | list_add(&rd->list, &HFSPLUS_I(inode).open_dir_list); | 222 | list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); |
| 221 | } | 223 | } |
| 222 | memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); | 224 | memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); |
| 223 | out: | 225 | out: |
| @@ -229,38 +231,18 @@ static int hfsplus_dir_release(struct inode *inode, struct file *file) | |||
| 229 | { | 231 | { |
| 230 | struct hfsplus_readdir_data *rd = file->private_data; | 232 | struct hfsplus_readdir_data *rd = file->private_data; |
| 231 | if (rd) { | 233 | if (rd) { |
| 234 | mutex_lock(&inode->i_mutex); | ||
| 232 | list_del(&rd->list); | 235 | list_del(&rd->list); |
| 236 | mutex_unlock(&inode->i_mutex); | ||
| 233 | kfree(rd); | 237 | kfree(rd); |
| 234 | } | 238 | } |
| 235 | return 0; | 239 | return 0; |
| 236 | } | 240 | } |
| 237 | 241 | ||
| 238 | static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode, | ||
| 239 | struct nameidata *nd) | ||
| 240 | { | ||
| 241 | struct inode *inode; | ||
| 242 | int res; | ||
| 243 | |||
| 244 | inode = hfsplus_new_inode(dir->i_sb, mode); | ||
| 245 | if (!inode) | ||
| 246 | return -ENOSPC; | ||
| 247 | |||
| 248 | res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); | ||
| 249 | if (res) { | ||
| 250 | inode->i_nlink = 0; | ||
| 251 | hfsplus_delete_inode(inode); | ||
| 252 | iput(inode); | ||
| 253 | return res; | ||
| 254 | } | ||
| 255 | hfsplus_instantiate(dentry, inode, inode->i_ino); | ||
| 256 | mark_inode_dirty(inode); | ||
| 257 | return 0; | ||
| 258 | } | ||
| 259 | |||
| 260 | static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, | 242 | static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, |
| 261 | struct dentry *dst_dentry) | 243 | struct dentry *dst_dentry) |
| 262 | { | 244 | { |
| 263 | struct super_block *sb = dst_dir->i_sb; | 245 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); |
| 264 | struct inode *inode = src_dentry->d_inode; | 246 | struct inode *inode = src_dentry->d_inode; |
| 265 | struct inode *src_dir = src_dentry->d_parent->d_inode; | 247 | struct inode *src_dir = src_dentry->d_parent->d_inode; |
| 266 | struct qstr str; | 248 | struct qstr str; |
| @@ -270,7 +252,10 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, | |||
| 270 | 252 | ||
| 271 | if (HFSPLUS_IS_RSRC(inode)) | 253 | if (HFSPLUS_IS_RSRC(inode)) |
| 272 | return -EPERM; | 254 | return -EPERM; |
| 255 | if (!S_ISREG(inode->i_mode)) | ||
| 256 | return -EPERM; | ||
| 273 | 257 | ||
| 258 | mutex_lock(&sbi->vh_mutex); | ||
| 274 | if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) { | 259 | if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) { |
| 275 | for (;;) { | 260 | for (;;) { |
| 276 | get_random_bytes(&id, sizeof(cnid)); | 261 | get_random_bytes(&id, sizeof(cnid)); |
| @@ -279,40 +264,41 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, | |||
| 279 | str.len = sprintf(name, "iNode%d", id); | 264 | str.len = sprintf(name, "iNode%d", id); |
| 280 | res = hfsplus_rename_cat(inode->i_ino, | 265 | res = hfsplus_rename_cat(inode->i_ino, |
| 281 | src_dir, &src_dentry->d_name, | 266 | src_dir, &src_dentry->d_name, |
| 282 | HFSPLUS_SB(sb).hidden_dir, &str); | 267 | sbi->hidden_dir, &str); |
| 283 | if (!res) | 268 | if (!res) |
| 284 | break; | 269 | break; |
| 285 | if (res != -EEXIST) | 270 | if (res != -EEXIST) |
| 286 | return res; | 271 | goto out; |
| 287 | } | 272 | } |
| 288 | HFSPLUS_I(inode).dev = id; | 273 | HFSPLUS_I(inode)->linkid = id; |
| 289 | cnid = HFSPLUS_SB(sb).next_cnid++; | 274 | cnid = sbi->next_cnid++; |
| 290 | src_dentry->d_fsdata = (void *)(unsigned long)cnid; | 275 | src_dentry->d_fsdata = (void *)(unsigned long)cnid; |
| 291 | res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode); | 276 | res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode); |
| 292 | if (res) | 277 | if (res) |
| 293 | /* panic? */ | 278 | /* panic? */ |
| 294 | return res; | 279 | goto out; |
| 295 | HFSPLUS_SB(sb).file_count++; | 280 | sbi->file_count++; |
| 296 | } | 281 | } |
| 297 | cnid = HFSPLUS_SB(sb).next_cnid++; | 282 | cnid = sbi->next_cnid++; |
| 298 | res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode); | 283 | res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode); |
| 299 | if (res) | 284 | if (res) |
| 300 | return res; | 285 | goto out; |
| 301 | 286 | ||
| 302 | inc_nlink(inode); | 287 | inc_nlink(inode); |
| 303 | hfsplus_instantiate(dst_dentry, inode, cnid); | 288 | hfsplus_instantiate(dst_dentry, inode, cnid); |
| 304 | atomic_inc(&inode->i_count); | 289 | atomic_inc(&inode->i_count); |
| 305 | inode->i_ctime = CURRENT_TIME_SEC; | 290 | inode->i_ctime = CURRENT_TIME_SEC; |
| 306 | mark_inode_dirty(inode); | 291 | mark_inode_dirty(inode); |
| 307 | HFSPLUS_SB(sb).file_count++; | 292 | sbi->file_count++; |
| 308 | sb->s_dirt = 1; | 293 | dst_dir->i_sb->s_dirt = 1; |
| 309 | 294 | out: | |
| 310 | return 0; | 295 | mutex_unlock(&sbi->vh_mutex); |
| 296 | return res; | ||
| 311 | } | 297 | } |
| 312 | 298 | ||
| 313 | static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) | 299 | static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) |
| 314 | { | 300 | { |
| 315 | struct super_block *sb = dir->i_sb; | 301 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); |
| 316 | struct inode *inode = dentry->d_inode; | 302 | struct inode *inode = dentry->d_inode; |
| 317 | struct qstr str; | 303 | struct qstr str; |
| 318 | char name[32]; | 304 | char name[32]; |
| @@ -322,21 +308,22 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) | |||
| 322 | if (HFSPLUS_IS_RSRC(inode)) | 308 | if (HFSPLUS_IS_RSRC(inode)) |
| 323 | return -EPERM; | 309 | return -EPERM; |
| 324 | 310 | ||
| 311 | mutex_lock(&sbi->vh_mutex); | ||
| 325 | cnid = (u32)(unsigned long)dentry->d_fsdata; | 312 | cnid = (u32)(unsigned long)dentry->d_fsdata; |
| 326 | if (inode->i_ino == cnid && | 313 | if (inode->i_ino == cnid && |
| 327 | atomic_read(&HFSPLUS_I(inode).opencnt)) { | 314 | atomic_read(&HFSPLUS_I(inode)->opencnt)) { |
| 328 | str.name = name; | 315 | str.name = name; |
| 329 | str.len = sprintf(name, "temp%lu", inode->i_ino); | 316 | str.len = sprintf(name, "temp%lu", inode->i_ino); |
| 330 | res = hfsplus_rename_cat(inode->i_ino, | 317 | res = hfsplus_rename_cat(inode->i_ino, |
| 331 | dir, &dentry->d_name, | 318 | dir, &dentry->d_name, |
| 332 | HFSPLUS_SB(sb).hidden_dir, &str); | 319 | sbi->hidden_dir, &str); |
| 333 | if (!res) | 320 | if (!res) |
| 334 | inode->i_flags |= S_DEAD; | 321 | inode->i_flags |= S_DEAD; |
| 335 | return res; | 322 | goto out; |
| 336 | } | 323 | } |
| 337 | res = hfsplus_delete_cat(cnid, dir, &dentry->d_name); | 324 | res = hfsplus_delete_cat(cnid, dir, &dentry->d_name); |
| 338 | if (res) | 325 | if (res) |
| 339 | return res; | 326 | goto out; |
| 340 | 327 | ||
| 341 | if (inode->i_nlink > 0) | 328 | if (inode->i_nlink > 0) |
| 342 | drop_nlink(inode); | 329 | drop_nlink(inode); |
| @@ -344,10 +331,10 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) | |||
| 344 | clear_nlink(inode); | 331 | clear_nlink(inode); |
| 345 | if (!inode->i_nlink) { | 332 | if (!inode->i_nlink) { |
| 346 | if (inode->i_ino != cnid) { | 333 | if (inode->i_ino != cnid) { |
| 347 | HFSPLUS_SB(sb).file_count--; | 334 | sbi->file_count--; |
| 348 | if (!atomic_read(&HFSPLUS_I(inode).opencnt)) { | 335 | if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) { |
| 349 | res = hfsplus_delete_cat(inode->i_ino, | 336 | res = hfsplus_delete_cat(inode->i_ino, |
| 350 | HFSPLUS_SB(sb).hidden_dir, | 337 | sbi->hidden_dir, |
| 351 | NULL); | 338 | NULL); |
| 352 | if (!res) | 339 | if (!res) |
| 353 | hfsplus_delete_inode(inode); | 340 | hfsplus_delete_inode(inode); |
| @@ -356,107 +343,108 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) | |||
| 356 | } else | 343 | } else |
| 357 | hfsplus_delete_inode(inode); | 344 | hfsplus_delete_inode(inode); |
| 358 | } else | 345 | } else |
| 359 | HFSPLUS_SB(sb).file_count--; | 346 | sbi->file_count--; |
| 360 | inode->i_ctime = CURRENT_TIME_SEC; | 347 | inode->i_ctime = CURRENT_TIME_SEC; |
| 361 | mark_inode_dirty(inode); | 348 | mark_inode_dirty(inode); |
| 362 | 349 | out: | |
| 350 | mutex_unlock(&sbi->vh_mutex); | ||
| 363 | return res; | 351 | return res; |
| 364 | } | 352 | } |
| 365 | 353 | ||
| 366 | static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode) | ||
| 367 | { | ||
| 368 | struct inode *inode; | ||
| 369 | int res; | ||
| 370 | |||
| 371 | inode = hfsplus_new_inode(dir->i_sb, S_IFDIR | mode); | ||
| 372 | if (!inode) | ||
| 373 | return -ENOSPC; | ||
| 374 | |||
| 375 | res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); | ||
| 376 | if (res) { | ||
| 377 | inode->i_nlink = 0; | ||
| 378 | hfsplus_delete_inode(inode); | ||
| 379 | iput(inode); | ||
| 380 | return res; | ||
| 381 | } | ||
| 382 | hfsplus_instantiate(dentry, inode, inode->i_ino); | ||
| 383 | mark_inode_dirty(inode); | ||
| 384 | return 0; | ||
| 385 | } | ||
| 386 | |||
| 387 | static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) | 354 | static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) |
| 388 | { | 355 | { |
| 389 | struct inode *inode; | 356 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); |
| 357 | struct inode *inode = dentry->d_inode; | ||
| 390 | int res; | 358 | int res; |
| 391 | 359 | ||
| 392 | inode = dentry->d_inode; | ||
| 393 | if (inode->i_size != 2) | 360 | if (inode->i_size != 2) |
| 394 | return -ENOTEMPTY; | 361 | return -ENOTEMPTY; |
| 362 | |||
| 363 | mutex_lock(&sbi->vh_mutex); | ||
| 395 | res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); | 364 | res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); |
| 396 | if (res) | 365 | if (res) |
| 397 | return res; | 366 | goto out; |
| 398 | clear_nlink(inode); | 367 | clear_nlink(inode); |
| 399 | inode->i_ctime = CURRENT_TIME_SEC; | 368 | inode->i_ctime = CURRENT_TIME_SEC; |
| 400 | hfsplus_delete_inode(inode); | 369 | hfsplus_delete_inode(inode); |
| 401 | mark_inode_dirty(inode); | 370 | mark_inode_dirty(inode); |
| 402 | return 0; | 371 | out: |
| 372 | mutex_unlock(&sbi->vh_mutex); | ||
| 373 | return res; | ||
| 403 | } | 374 | } |
| 404 | 375 | ||
| 405 | static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, | 376 | static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, |
| 406 | const char *symname) | 377 | const char *symname) |
| 407 | { | 378 | { |
| 408 | struct super_block *sb; | 379 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); |
| 409 | struct inode *inode; | 380 | struct inode *inode; |
| 410 | int res; | 381 | int res = -ENOSPC; |
| 411 | 382 | ||
| 412 | sb = dir->i_sb; | 383 | mutex_lock(&sbi->vh_mutex); |
| 413 | inode = hfsplus_new_inode(sb, S_IFLNK | S_IRWXUGO); | 384 | inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO); |
| 414 | if (!inode) | 385 | if (!inode) |
| 415 | return -ENOSPC; | 386 | goto out; |
| 416 | 387 | ||
| 417 | res = page_symlink(inode, symname, strlen(symname) + 1); | 388 | res = page_symlink(inode, symname, strlen(symname) + 1); |
| 418 | if (res) { | 389 | if (res) |
| 419 | inode->i_nlink = 0; | 390 | goto out_err; |
| 420 | hfsplus_delete_inode(inode); | ||
| 421 | iput(inode); | ||
| 422 | return res; | ||
| 423 | } | ||
| 424 | 391 | ||
| 425 | mark_inode_dirty(inode); | ||
| 426 | res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); | 392 | res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); |
| 393 | if (res) | ||
| 394 | goto out_err; | ||
| 427 | 395 | ||
| 428 | if (!res) { | 396 | hfsplus_instantiate(dentry, inode, inode->i_ino); |
| 429 | hfsplus_instantiate(dentry, inode, inode->i_ino); | 397 | mark_inode_dirty(inode); |
| 430 | mark_inode_dirty(inode); | 398 | goto out; |
| 431 | } | ||
| 432 | 399 | ||
| 400 | out_err: | ||
| 401 | inode->i_nlink = 0; | ||
| 402 | hfsplus_delete_inode(inode); | ||
| 403 | iput(inode); | ||
| 404 | out: | ||
| 405 | mutex_unlock(&sbi->vh_mutex); | ||
| 433 | return res; | 406 | return res; |
| 434 | } | 407 | } |
| 435 | 408 | ||
| 436 | static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, | 409 | static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, |
| 437 | int mode, dev_t rdev) | 410 | int mode, dev_t rdev) |
| 438 | { | 411 | { |
| 439 | struct super_block *sb; | 412 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); |
| 440 | struct inode *inode; | 413 | struct inode *inode; |
| 441 | int res; | 414 | int res = -ENOSPC; |
| 442 | 415 | ||
| 443 | sb = dir->i_sb; | 416 | mutex_lock(&sbi->vh_mutex); |
| 444 | inode = hfsplus_new_inode(sb, mode); | 417 | inode = hfsplus_new_inode(dir->i_sb, mode); |
| 445 | if (!inode) | 418 | if (!inode) |
| 446 | return -ENOSPC; | 419 | goto out; |
| 420 | |||
| 421 | if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) | ||
| 422 | init_special_inode(inode, mode, rdev); | ||
| 447 | 423 | ||
| 448 | res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); | 424 | res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); |
| 449 | if (res) { | 425 | if (res) { |
| 450 | inode->i_nlink = 0; | 426 | inode->i_nlink = 0; |
| 451 | hfsplus_delete_inode(inode); | 427 | hfsplus_delete_inode(inode); |
| 452 | iput(inode); | 428 | iput(inode); |
| 453 | return res; | 429 | goto out; |
| 454 | } | 430 | } |
| 455 | init_special_inode(inode, mode, rdev); | 431 | |
| 456 | hfsplus_instantiate(dentry, inode, inode->i_ino); | 432 | hfsplus_instantiate(dentry, inode, inode->i_ino); |
| 457 | mark_inode_dirty(inode); | 433 | mark_inode_dirty(inode); |
| 434 | out: | ||
| 435 | mutex_unlock(&sbi->vh_mutex); | ||
| 436 | return res; | ||
| 437 | } | ||
| 458 | 438 | ||
| 459 | return 0; | 439 | static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode, |
| 440 | struct nameidata *nd) | ||
| 441 | { | ||
| 442 | return hfsplus_mknod(dir, dentry, mode, 0); | ||
| 443 | } | ||
| 444 | |||
| 445 | static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode) | ||
| 446 | { | ||
| 447 | return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0); | ||
| 460 | } | 448 | } |
| 461 | 449 | ||
| 462 | static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, | 450 | static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, |
| @@ -466,7 +454,10 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 466 | 454 | ||
| 467 | /* Unlink destination if it already exists */ | 455 | /* Unlink destination if it already exists */ |
| 468 | if (new_dentry->d_inode) { | 456 | if (new_dentry->d_inode) { |
| 469 | res = hfsplus_unlink(new_dir, new_dentry); | 457 | if (S_ISDIR(new_dentry->d_inode->i_mode)) |
| 458 | res = hfsplus_rmdir(new_dir, new_dentry); | ||
| 459 | else | ||
| 460 | res = hfsplus_unlink(new_dir, new_dentry); | ||
| 470 | if (res) | 461 | if (res) |
| 471 | return res; | 462 | return res; |
| 472 | } | 463 | } |
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index 0022eec63cda..0c9cb1820a52 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c | |||
| @@ -85,35 +85,49 @@ static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) | |||
| 85 | 85 | ||
| 86 | static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) | 86 | static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) |
| 87 | { | 87 | { |
| 88 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 88 | int res; | 89 | int res; |
| 89 | 90 | ||
| 90 | hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start, | 91 | WARN_ON(!mutex_is_locked(&hip->extents_lock)); |
| 91 | HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | 92 | |
| 93 | hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start, | ||
| 94 | HFSPLUS_IS_RSRC(inode) ? | ||
| 95 | HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | ||
| 96 | |||
| 92 | res = hfs_brec_find(fd); | 97 | res = hfs_brec_find(fd); |
| 93 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) { | 98 | if (hip->flags & HFSPLUS_FLG_EXT_NEW) { |
| 94 | if (res != -ENOENT) | 99 | if (res != -ENOENT) |
| 95 | return; | 100 | return; |
| 96 | hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec)); | 101 | hfs_brec_insert(fd, hip->cached_extents, |
| 97 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | 102 | sizeof(hfsplus_extent_rec)); |
| 103 | hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | ||
| 98 | } else { | 104 | } else { |
| 99 | if (res) | 105 | if (res) |
| 100 | return; | 106 | return; |
| 101 | hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength); | 107 | hfs_bnode_write(fd->bnode, hip->cached_extents, |
| 102 | HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY; | 108 | fd->entryoffset, fd->entrylength); |
| 109 | hip->flags &= ~HFSPLUS_FLG_EXT_DIRTY; | ||
| 103 | } | 110 | } |
| 104 | } | 111 | } |
| 105 | 112 | ||
| 106 | void hfsplus_ext_write_extent(struct inode *inode) | 113 | static void hfsplus_ext_write_extent_locked(struct inode *inode) |
| 107 | { | 114 | { |
| 108 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) { | 115 | if (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_EXT_DIRTY) { |
| 109 | struct hfs_find_data fd; | 116 | struct hfs_find_data fd; |
| 110 | 117 | ||
| 111 | hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); | 118 | hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); |
| 112 | __hfsplus_ext_write_extent(inode, &fd); | 119 | __hfsplus_ext_write_extent(inode, &fd); |
| 113 | hfs_find_exit(&fd); | 120 | hfs_find_exit(&fd); |
| 114 | } | 121 | } |
| 115 | } | 122 | } |
| 116 | 123 | ||
| 124 | void hfsplus_ext_write_extent(struct inode *inode) | ||
| 125 | { | ||
| 126 | mutex_lock(&HFSPLUS_I(inode)->extents_lock); | ||
| 127 | hfsplus_ext_write_extent_locked(inode); | ||
| 128 | mutex_unlock(&HFSPLUS_I(inode)->extents_lock); | ||
| 129 | } | ||
| 130 | |||
| 117 | static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, | 131 | static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, |
| 118 | struct hfsplus_extent *extent, | 132 | struct hfsplus_extent *extent, |
| 119 | u32 cnid, u32 block, u8 type) | 133 | u32 cnid, u32 block, u8 type) |
| @@ -136,33 +150,39 @@ static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, | |||
| 136 | 150 | ||
| 137 | static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) | 151 | static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) |
| 138 | { | 152 | { |
| 153 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 139 | int res; | 154 | int res; |
| 140 | 155 | ||
| 141 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) | 156 | WARN_ON(!mutex_is_locked(&hip->extents_lock)); |
| 157 | |||
| 158 | if (hip->flags & HFSPLUS_FLG_EXT_DIRTY) | ||
| 142 | __hfsplus_ext_write_extent(inode, fd); | 159 | __hfsplus_ext_write_extent(inode, fd); |
| 143 | 160 | ||
| 144 | res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino, | 161 | res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino, |
| 145 | block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | 162 | block, HFSPLUS_IS_RSRC(inode) ? |
| 163 | HFSPLUS_TYPE_RSRC : | ||
| 164 | HFSPLUS_TYPE_DATA); | ||
| 146 | if (!res) { | 165 | if (!res) { |
| 147 | HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block); | 166 | hip->cached_start = be32_to_cpu(fd->key->ext.start_block); |
| 148 | HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents); | 167 | hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents); |
| 149 | } else { | 168 | } else { |
| 150 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; | 169 | hip->cached_start = hip->cached_blocks = 0; |
| 151 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | 170 | hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); |
| 152 | } | 171 | } |
| 153 | return res; | 172 | return res; |
| 154 | } | 173 | } |
| 155 | 174 | ||
| 156 | static int hfsplus_ext_read_extent(struct inode *inode, u32 block) | 175 | static int hfsplus_ext_read_extent(struct inode *inode, u32 block) |
| 157 | { | 176 | { |
| 177 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 158 | struct hfs_find_data fd; | 178 | struct hfs_find_data fd; |
| 159 | int res; | 179 | int res; |
| 160 | 180 | ||
| 161 | if (block >= HFSPLUS_I(inode).cached_start && | 181 | if (block >= hip->cached_start && |
| 162 | block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks) | 182 | block < hip->cached_start + hip->cached_blocks) |
| 163 | return 0; | 183 | return 0; |
| 164 | 184 | ||
| 165 | hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); | 185 | hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); |
| 166 | res = __hfsplus_ext_cache_extent(&fd, inode, block); | 186 | res = __hfsplus_ext_cache_extent(&fd, inode, block); |
| 167 | hfs_find_exit(&fd); | 187 | hfs_find_exit(&fd); |
| 168 | return res; | 188 | return res; |
| @@ -172,21 +192,21 @@ static int hfsplus_ext_read_extent(struct inode *inode, u32 block) | |||
| 172 | int hfsplus_get_block(struct inode *inode, sector_t iblock, | 192 | int hfsplus_get_block(struct inode *inode, sector_t iblock, |
| 173 | struct buffer_head *bh_result, int create) | 193 | struct buffer_head *bh_result, int create) |
| 174 | { | 194 | { |
| 175 | struct super_block *sb; | 195 | struct super_block *sb = inode->i_sb; |
| 196 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 197 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 176 | int res = -EIO; | 198 | int res = -EIO; |
| 177 | u32 ablock, dblock, mask; | 199 | u32 ablock, dblock, mask; |
| 178 | int shift; | 200 | int shift; |
| 179 | 201 | ||
| 180 | sb = inode->i_sb; | ||
| 181 | |||
| 182 | /* Convert inode block to disk allocation block */ | 202 | /* Convert inode block to disk allocation block */ |
| 183 | shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits; | 203 | shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; |
| 184 | ablock = iblock >> HFSPLUS_SB(sb).fs_shift; | 204 | ablock = iblock >> sbi->fs_shift; |
| 185 | 205 | ||
| 186 | if (iblock >= HFSPLUS_I(inode).fs_blocks) { | 206 | if (iblock >= hip->fs_blocks) { |
| 187 | if (iblock > HFSPLUS_I(inode).fs_blocks || !create) | 207 | if (iblock > hip->fs_blocks || !create) |
| 188 | return -EIO; | 208 | return -EIO; |
| 189 | if (ablock >= HFSPLUS_I(inode).alloc_blocks) { | 209 | if (ablock >= hip->alloc_blocks) { |
| 190 | res = hfsplus_file_extend(inode); | 210 | res = hfsplus_file_extend(inode); |
| 191 | if (res) | 211 | if (res) |
| 192 | return res; | 212 | return res; |
| @@ -194,33 +214,33 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock, | |||
| 194 | } else | 214 | } else |
| 195 | create = 0; | 215 | create = 0; |
| 196 | 216 | ||
| 197 | if (ablock < HFSPLUS_I(inode).first_blocks) { | 217 | if (ablock < hip->first_blocks) { |
| 198 | dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock); | 218 | dblock = hfsplus_ext_find_block(hip->first_extents, ablock); |
| 199 | goto done; | 219 | goto done; |
| 200 | } | 220 | } |
| 201 | 221 | ||
| 202 | if (inode->i_ino == HFSPLUS_EXT_CNID) | 222 | if (inode->i_ino == HFSPLUS_EXT_CNID) |
| 203 | return -EIO; | 223 | return -EIO; |
| 204 | 224 | ||
| 205 | mutex_lock(&HFSPLUS_I(inode).extents_lock); | 225 | mutex_lock(&hip->extents_lock); |
| 206 | res = hfsplus_ext_read_extent(inode, ablock); | 226 | res = hfsplus_ext_read_extent(inode, ablock); |
| 207 | if (!res) { | 227 | if (!res) { |
| 208 | dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - | 228 | dblock = hfsplus_ext_find_block(hip->cached_extents, |
| 209 | HFSPLUS_I(inode).cached_start); | 229 | ablock - hip->cached_start); |
| 210 | } else { | 230 | } else { |
| 211 | mutex_unlock(&HFSPLUS_I(inode).extents_lock); | 231 | mutex_unlock(&hip->extents_lock); |
| 212 | return -EIO; | 232 | return -EIO; |
| 213 | } | 233 | } |
| 214 | mutex_unlock(&HFSPLUS_I(inode).extents_lock); | 234 | mutex_unlock(&hip->extents_lock); |
| 215 | 235 | ||
| 216 | done: | 236 | done: |
| 217 | dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); | 237 | dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); |
| 218 | mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1; | 238 | mask = (1 << sbi->fs_shift) - 1; |
| 219 | map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask)); | 239 | map_bh(bh_result, sb, (dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask)); |
| 220 | if (create) { | 240 | if (create) { |
| 221 | set_buffer_new(bh_result); | 241 | set_buffer_new(bh_result); |
| 222 | HFSPLUS_I(inode).phys_size += sb->s_blocksize; | 242 | hip->phys_size += sb->s_blocksize; |
| 223 | HFSPLUS_I(inode).fs_blocks++; | 243 | hip->fs_blocks++; |
| 224 | inode_add_bytes(inode, sb->s_blocksize); | 244 | inode_add_bytes(inode, sb->s_blocksize); |
| 225 | mark_inode_dirty(inode); | 245 | mark_inode_dirty(inode); |
| 226 | } | 246 | } |
| @@ -327,7 +347,7 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw | |||
| 327 | if (total_blocks == blocks) | 347 | if (total_blocks == blocks) |
| 328 | return 0; | 348 | return 0; |
| 329 | 349 | ||
| 330 | hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); | 350 | hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); |
| 331 | do { | 351 | do { |
| 332 | res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, | 352 | res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, |
| 333 | total_blocks, type); | 353 | total_blocks, type); |
| @@ -348,29 +368,33 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw | |||
| 348 | int hfsplus_file_extend(struct inode *inode) | 368 | int hfsplus_file_extend(struct inode *inode) |
| 349 | { | 369 | { |
| 350 | struct super_block *sb = inode->i_sb; | 370 | struct super_block *sb = inode->i_sb; |
| 371 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 372 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 351 | u32 start, len, goal; | 373 | u32 start, len, goal; |
| 352 | int res; | 374 | int res; |
| 353 | 375 | ||
| 354 | if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) { | 376 | if (sbi->alloc_file->i_size * 8 < |
| 377 | sbi->total_blocks - sbi->free_blocks + 8) { | ||
| 355 | // extend alloc file | 378 | // extend alloc file |
| 356 | printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8, | 379 | printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", |
| 357 | HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks); | 380 | sbi->alloc_file->i_size * 8, |
| 381 | sbi->total_blocks, sbi->free_blocks); | ||
| 358 | return -ENOSPC; | 382 | return -ENOSPC; |
| 359 | } | 383 | } |
| 360 | 384 | ||
| 361 | mutex_lock(&HFSPLUS_I(inode).extents_lock); | 385 | mutex_lock(&hip->extents_lock); |
| 362 | if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks) | 386 | if (hip->alloc_blocks == hip->first_blocks) |
| 363 | goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents); | 387 | goal = hfsplus_ext_lastblock(hip->first_extents); |
| 364 | else { | 388 | else { |
| 365 | res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks); | 389 | res = hfsplus_ext_read_extent(inode, hip->alloc_blocks); |
| 366 | if (res) | 390 | if (res) |
| 367 | goto out; | 391 | goto out; |
| 368 | goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents); | 392 | goal = hfsplus_ext_lastblock(hip->cached_extents); |
| 369 | } | 393 | } |
| 370 | 394 | ||
| 371 | len = HFSPLUS_I(inode).clump_blocks; | 395 | len = hip->clump_blocks; |
| 372 | start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len); | 396 | start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len); |
| 373 | if (start >= HFSPLUS_SB(sb).total_blocks) { | 397 | if (start >= sbi->total_blocks) { |
| 374 | start = hfsplus_block_allocate(sb, goal, 0, &len); | 398 | start = hfsplus_block_allocate(sb, goal, 0, &len); |
| 375 | if (start >= goal) { | 399 | if (start >= goal) { |
| 376 | res = -ENOSPC; | 400 | res = -ENOSPC; |
| @@ -379,56 +403,56 @@ int hfsplus_file_extend(struct inode *inode) | |||
| 379 | } | 403 | } |
| 380 | 404 | ||
| 381 | dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); | 405 | dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); |
| 382 | if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) { | 406 | |
| 383 | if (!HFSPLUS_I(inode).first_blocks) { | 407 | if (hip->alloc_blocks <= hip->first_blocks) { |
| 408 | if (!hip->first_blocks) { | ||
| 384 | dprint(DBG_EXTENT, "first extents\n"); | 409 | dprint(DBG_EXTENT, "first extents\n"); |
| 385 | /* no extents yet */ | 410 | /* no extents yet */ |
| 386 | HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start); | 411 | hip->first_extents[0].start_block = cpu_to_be32(start); |
| 387 | HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len); | 412 | hip->first_extents[0].block_count = cpu_to_be32(len); |
| 388 | res = 0; | 413 | res = 0; |
| 389 | } else { | 414 | } else { |
| 390 | /* try to append to extents in inode */ | 415 | /* try to append to extents in inode */ |
| 391 | res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents, | 416 | res = hfsplus_add_extent(hip->first_extents, |
| 392 | HFSPLUS_I(inode).alloc_blocks, | 417 | hip->alloc_blocks, |
| 393 | start, len); | 418 | start, len); |
| 394 | if (res == -ENOSPC) | 419 | if (res == -ENOSPC) |
| 395 | goto insert_extent; | 420 | goto insert_extent; |
| 396 | } | 421 | } |
| 397 | if (!res) { | 422 | if (!res) { |
| 398 | hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); | 423 | hfsplus_dump_extent(hip->first_extents); |
| 399 | HFSPLUS_I(inode).first_blocks += len; | 424 | hip->first_blocks += len; |
| 400 | } | 425 | } |
| 401 | } else { | 426 | } else { |
| 402 | res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents, | 427 | res = hfsplus_add_extent(hip->cached_extents, |
| 403 | HFSPLUS_I(inode).alloc_blocks - | 428 | hip->alloc_blocks - hip->cached_start, |
| 404 | HFSPLUS_I(inode).cached_start, | ||
| 405 | start, len); | 429 | start, len); |
| 406 | if (!res) { | 430 | if (!res) { |
| 407 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | 431 | hfsplus_dump_extent(hip->cached_extents); |
| 408 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; | 432 | hip->flags |= HFSPLUS_FLG_EXT_DIRTY; |
| 409 | HFSPLUS_I(inode).cached_blocks += len; | 433 | hip->cached_blocks += len; |
| 410 | } else if (res == -ENOSPC) | 434 | } else if (res == -ENOSPC) |
| 411 | goto insert_extent; | 435 | goto insert_extent; |
| 412 | } | 436 | } |
| 413 | out: | 437 | out: |
| 414 | mutex_unlock(&HFSPLUS_I(inode).extents_lock); | 438 | mutex_unlock(&hip->extents_lock); |
| 415 | if (!res) { | 439 | if (!res) { |
| 416 | HFSPLUS_I(inode).alloc_blocks += len; | 440 | hip->alloc_blocks += len; |
| 417 | mark_inode_dirty(inode); | 441 | mark_inode_dirty(inode); |
| 418 | } | 442 | } |
| 419 | return res; | 443 | return res; |
| 420 | 444 | ||
| 421 | insert_extent: | 445 | insert_extent: |
| 422 | dprint(DBG_EXTENT, "insert new extent\n"); | 446 | dprint(DBG_EXTENT, "insert new extent\n"); |
| 423 | hfsplus_ext_write_extent(inode); | 447 | hfsplus_ext_write_extent_locked(inode); |
| 424 | 448 | ||
| 425 | memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); | 449 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); |
| 426 | HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start); | 450 | hip->cached_extents[0].start_block = cpu_to_be32(start); |
| 427 | HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len); | 451 | hip->cached_extents[0].block_count = cpu_to_be32(len); |
| 428 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | 452 | hfsplus_dump_extent(hip->cached_extents); |
| 429 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW; | 453 | hip->flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW; |
| 430 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks; | 454 | hip->cached_start = hip->alloc_blocks; |
| 431 | HFSPLUS_I(inode).cached_blocks = len; | 455 | hip->cached_blocks = len; |
| 432 | 456 | ||
| 433 | res = 0; | 457 | res = 0; |
| 434 | goto out; | 458 | goto out; |
| @@ -437,13 +461,15 @@ insert_extent: | |||
| 437 | void hfsplus_file_truncate(struct inode *inode) | 461 | void hfsplus_file_truncate(struct inode *inode) |
| 438 | { | 462 | { |
| 439 | struct super_block *sb = inode->i_sb; | 463 | struct super_block *sb = inode->i_sb; |
| 464 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 440 | struct hfs_find_data fd; | 465 | struct hfs_find_data fd; |
| 441 | u32 alloc_cnt, blk_cnt, start; | 466 | u32 alloc_cnt, blk_cnt, start; |
| 442 | int res; | 467 | int res; |
| 443 | 468 | ||
| 444 | dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, | 469 | dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", |
| 445 | (long long)HFSPLUS_I(inode).phys_size, inode->i_size); | 470 | inode->i_ino, (long long)hip->phys_size, inode->i_size); |
| 446 | if (inode->i_size > HFSPLUS_I(inode).phys_size) { | 471 | |
| 472 | if (inode->i_size > hip->phys_size) { | ||
| 447 | struct address_space *mapping = inode->i_mapping; | 473 | struct address_space *mapping = inode->i_mapping; |
| 448 | struct page *page; | 474 | struct page *page; |
| 449 | void *fsdata; | 475 | void *fsdata; |
| @@ -460,47 +486,48 @@ void hfsplus_file_truncate(struct inode *inode) | |||
| 460 | return; | 486 | return; |
| 461 | mark_inode_dirty(inode); | 487 | mark_inode_dirty(inode); |
| 462 | return; | 488 | return; |
| 463 | } else if (inode->i_size == HFSPLUS_I(inode).phys_size) | 489 | } else if (inode->i_size == hip->phys_size) |
| 464 | return; | 490 | return; |
| 465 | 491 | ||
| 466 | blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift; | 492 | blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> |
| 467 | alloc_cnt = HFSPLUS_I(inode).alloc_blocks; | 493 | HFSPLUS_SB(sb)->alloc_blksz_shift; |
| 494 | alloc_cnt = hip->alloc_blocks; | ||
| 468 | if (blk_cnt == alloc_cnt) | 495 | if (blk_cnt == alloc_cnt) |
| 469 | goto out; | 496 | goto out; |
| 470 | 497 | ||
| 471 | mutex_lock(&HFSPLUS_I(inode).extents_lock); | 498 | mutex_lock(&hip->extents_lock); |
| 472 | hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); | 499 | hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); |
| 473 | while (1) { | 500 | while (1) { |
| 474 | if (alloc_cnt == HFSPLUS_I(inode).first_blocks) { | 501 | if (alloc_cnt == hip->first_blocks) { |
| 475 | hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents, | 502 | hfsplus_free_extents(sb, hip->first_extents, |
| 476 | alloc_cnt, alloc_cnt - blk_cnt); | 503 | alloc_cnt, alloc_cnt - blk_cnt); |
| 477 | hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); | 504 | hfsplus_dump_extent(hip->first_extents); |
| 478 | HFSPLUS_I(inode).first_blocks = blk_cnt; | 505 | hip->first_blocks = blk_cnt; |
| 479 | break; | 506 | break; |
| 480 | } | 507 | } |
| 481 | res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); | 508 | res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); |
| 482 | if (res) | 509 | if (res) |
| 483 | break; | 510 | break; |
| 484 | start = HFSPLUS_I(inode).cached_start; | 511 | start = hip->cached_start; |
| 485 | hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents, | 512 | hfsplus_free_extents(sb, hip->cached_extents, |
| 486 | alloc_cnt - start, alloc_cnt - blk_cnt); | 513 | alloc_cnt - start, alloc_cnt - blk_cnt); |
| 487 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | 514 | hfsplus_dump_extent(hip->cached_extents); |
| 488 | if (blk_cnt > start) { | 515 | if (blk_cnt > start) { |
| 489 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; | 516 | hip->flags |= HFSPLUS_FLG_EXT_DIRTY; |
| 490 | break; | 517 | break; |
| 491 | } | 518 | } |
| 492 | alloc_cnt = start; | 519 | alloc_cnt = start; |
| 493 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; | 520 | hip->cached_start = hip->cached_blocks = 0; |
| 494 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | 521 | hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); |
| 495 | hfs_brec_remove(&fd); | 522 | hfs_brec_remove(&fd); |
| 496 | } | 523 | } |
| 497 | hfs_find_exit(&fd); | 524 | hfs_find_exit(&fd); |
| 498 | mutex_unlock(&HFSPLUS_I(inode).extents_lock); | 525 | mutex_unlock(&hip->extents_lock); |
| 499 | 526 | ||
| 500 | HFSPLUS_I(inode).alloc_blocks = blk_cnt; | 527 | hip->alloc_blocks = blk_cnt; |
| 501 | out: | 528 | out: |
| 502 | HFSPLUS_I(inode).phys_size = inode->i_size; | 529 | hip->phys_size = inode->i_size; |
| 503 | HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; | 530 | hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; |
| 504 | inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); | 531 | inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); |
| 505 | mark_inode_dirty(inode); | 532 | mark_inode_dirty(inode); |
| 506 | } | 533 | } |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index dc856be3c2b0..cb3653efb57a 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
| @@ -62,7 +62,7 @@ struct hfs_btree { | |||
| 62 | unsigned int depth; | 62 | unsigned int depth; |
| 63 | 63 | ||
| 64 | //unsigned int map1_size, map_size; | 64 | //unsigned int map1_size, map_size; |
| 65 | struct semaphore tree_lock; | 65 | struct mutex tree_lock; |
| 66 | 66 | ||
| 67 | unsigned int pages_per_bnode; | 67 | unsigned int pages_per_bnode; |
| 68 | spinlock_t hash_lock; | 68 | spinlock_t hash_lock; |
| @@ -121,16 +121,21 @@ struct hfsplus_sb_info { | |||
| 121 | u32 sect_count; | 121 | u32 sect_count; |
| 122 | int fs_shift; | 122 | int fs_shift; |
| 123 | 123 | ||
| 124 | /* Stuff in host order from Vol Header */ | 124 | /* immutable data from the volume header */ |
| 125 | u32 alloc_blksz; | 125 | u32 alloc_blksz; |
| 126 | int alloc_blksz_shift; | 126 | int alloc_blksz_shift; |
| 127 | u32 total_blocks; | 127 | u32 total_blocks; |
| 128 | u32 data_clump_blocks, rsrc_clump_blocks; | ||
| 129 | |||
| 130 | /* mutable data from the volume header, protected by alloc_mutex */ | ||
| 128 | u32 free_blocks; | 131 | u32 free_blocks; |
| 129 | u32 next_alloc; | 132 | struct mutex alloc_mutex; |
| 133 | |||
| 134 | /* mutable data from the volume header, protected by vh_mutex */ | ||
| 130 | u32 next_cnid; | 135 | u32 next_cnid; |
| 131 | u32 file_count; | 136 | u32 file_count; |
| 132 | u32 folder_count; | 137 | u32 folder_count; |
| 133 | u32 data_clump_blocks, rsrc_clump_blocks; | 138 | struct mutex vh_mutex; |
| 134 | 139 | ||
| 135 | /* Config options */ | 140 | /* Config options */ |
| 136 | u32 creator; | 141 | u32 creator; |
| @@ -143,40 +148,50 @@ struct hfsplus_sb_info { | |||
| 143 | int part, session; | 148 | int part, session; |
| 144 | 149 | ||
| 145 | unsigned long flags; | 150 | unsigned long flags; |
| 146 | |||
| 147 | struct hlist_head rsrc_inodes; | ||
| 148 | }; | 151 | }; |
| 149 | 152 | ||
| 150 | #define HFSPLUS_SB_WRITEBACKUP 0x0001 | 153 | #define HFSPLUS_SB_WRITEBACKUP 0 |
| 151 | #define HFSPLUS_SB_NODECOMPOSE 0x0002 | 154 | #define HFSPLUS_SB_NODECOMPOSE 1 |
| 152 | #define HFSPLUS_SB_FORCE 0x0004 | 155 | #define HFSPLUS_SB_FORCE 2 |
| 153 | #define HFSPLUS_SB_HFSX 0x0008 | 156 | #define HFSPLUS_SB_HFSX 3 |
| 154 | #define HFSPLUS_SB_CASEFOLD 0x0010 | 157 | #define HFSPLUS_SB_CASEFOLD 4 |
| 155 | 158 | ||
| 156 | 159 | ||
| 157 | struct hfsplus_inode_info { | 160 | struct hfsplus_inode_info { |
| 158 | struct mutex extents_lock; | ||
| 159 | u32 clump_blocks, alloc_blocks; | ||
| 160 | sector_t fs_blocks; | ||
| 161 | /* Allocation extents from catalog record or volume header */ | ||
| 162 | hfsplus_extent_rec first_extents; | ||
| 163 | u32 first_blocks; | ||
| 164 | hfsplus_extent_rec cached_extents; | ||
| 165 | u32 cached_start, cached_blocks; | ||
| 166 | atomic_t opencnt; | 161 | atomic_t opencnt; |
| 167 | 162 | ||
| 168 | struct inode *rsrc_inode; | 163 | /* |
| 164 | * Extent allocation information, protected by extents_lock. | ||
| 165 | */ | ||
| 166 | u32 first_blocks; | ||
| 167 | u32 clump_blocks; | ||
| 168 | u32 alloc_blocks; | ||
| 169 | u32 cached_start; | ||
| 170 | u32 cached_blocks; | ||
| 171 | hfsplus_extent_rec first_extents; | ||
| 172 | hfsplus_extent_rec cached_extents; | ||
| 169 | unsigned long flags; | 173 | unsigned long flags; |
| 174 | struct mutex extents_lock; | ||
| 170 | 175 | ||
| 176 | /* | ||
| 177 | * Immutable data. | ||
| 178 | */ | ||
| 179 | struct inode *rsrc_inode; | ||
| 171 | __be32 create_date; | 180 | __be32 create_date; |
| 172 | /* Device number in hfsplus_permissions in catalog */ | ||
| 173 | u32 dev; | ||
| 174 | /* BSD system and user file flags */ | ||
| 175 | u8 rootflags; | ||
| 176 | u8 userflags; | ||
| 177 | 181 | ||
| 182 | /* | ||
| 183 | * Protected by sbi->vh_mutex. | ||
| 184 | */ | ||
| 185 | u32 linkid; | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Protected by i_mutex. | ||
| 189 | */ | ||
| 190 | sector_t fs_blocks; | ||
| 191 | u8 userflags; /* BSD user file flags */ | ||
| 178 | struct list_head open_dir_list; | 192 | struct list_head open_dir_list; |
| 179 | loff_t phys_size; | 193 | loff_t phys_size; |
| 194 | |||
| 180 | struct inode vfs_inode; | 195 | struct inode vfs_inode; |
| 181 | }; | 196 | }; |
| 182 | 197 | ||
| @@ -184,8 +199,8 @@ struct hfsplus_inode_info { | |||
| 184 | #define HFSPLUS_FLG_EXT_DIRTY 0x0002 | 199 | #define HFSPLUS_FLG_EXT_DIRTY 0x0002 |
| 185 | #define HFSPLUS_FLG_EXT_NEW 0x0004 | 200 | #define HFSPLUS_FLG_EXT_NEW 0x0004 |
| 186 | 201 | ||
| 187 | #define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)) | 202 | #define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC)) |
| 188 | #define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC) | 203 | #define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC) |
| 189 | 204 | ||
| 190 | struct hfs_find_data { | 205 | struct hfs_find_data { |
| 191 | /* filled by caller */ | 206 | /* filled by caller */ |
| @@ -311,6 +326,7 @@ int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *); | |||
| 311 | int hfsplus_delete_cat(u32, struct inode *, struct qstr *); | 326 | int hfsplus_delete_cat(u32, struct inode *, struct qstr *); |
| 312 | int hfsplus_rename_cat(u32, struct inode *, struct qstr *, | 327 | int hfsplus_rename_cat(u32, struct inode *, struct qstr *, |
| 313 | struct inode *, struct qstr *); | 328 | struct inode *, struct qstr *); |
| 329 | void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms); | ||
| 314 | 330 | ||
| 315 | /* dir.c */ | 331 | /* dir.c */ |
| 316 | extern const struct inode_operations hfsplus_dir_inode_operations; | 332 | extern const struct inode_operations hfsplus_dir_inode_operations; |
| @@ -372,26 +388,15 @@ int hfsplus_read_wrapper(struct super_block *); | |||
| 372 | int hfs_part_find(struct super_block *, sector_t *, sector_t *); | 388 | int hfs_part_find(struct super_block *, sector_t *, sector_t *); |
| 373 | 389 | ||
| 374 | /* access macros */ | 390 | /* access macros */ |
| 375 | /* | ||
| 376 | static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb) | 391 | static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb) |
| 377 | { | 392 | { |
| 378 | return sb->s_fs_info; | 393 | return sb->s_fs_info; |
| 379 | } | 394 | } |
| 395 | |||
| 380 | static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode) | 396 | static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode) |
| 381 | { | 397 | { |
| 382 | return list_entry(inode, struct hfsplus_inode_info, vfs_inode); | 398 | return list_entry(inode, struct hfsplus_inode_info, vfs_inode); |
| 383 | } | 399 | } |
| 384 | */ | ||
| 385 | #define HFSPLUS_SB(super) (*(struct hfsplus_sb_info *)(super)->s_fs_info) | ||
| 386 | #define HFSPLUS_I(inode) (*list_entry(inode, struct hfsplus_inode_info, vfs_inode)) | ||
| 387 | |||
| 388 | #if 1 | ||
| 389 | #define hfsplus_kmap(p) ({ struct page *__p = (p); kmap(__p); }) | ||
| 390 | #define hfsplus_kunmap(p) ({ struct page *__p = (p); kunmap(__p); __p; }) | ||
| 391 | #else | ||
| 392 | #define hfsplus_kmap(p) kmap(p) | ||
| 393 | #define hfsplus_kunmap(p) kunmap(p) | ||
| 394 | #endif | ||
| 395 | 400 | ||
| 396 | #define sb_bread512(sb, sec, data) ({ \ | 401 | #define sb_bread512(sb, sec, data) ({ \ |
| 397 | struct buffer_head *__bh; \ | 402 | struct buffer_head *__bh; \ |
| @@ -419,6 +424,4 @@ static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode) | |||
| 419 | #define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec) | 424 | #define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec) |
| 420 | #define hfsp_now2mt() __hfsp_ut2mt(get_seconds()) | 425 | #define hfsp_now2mt() __hfsp_ut2mt(get_seconds()) |
| 421 | 426 | ||
| 422 | #define kdev_t_to_nr(x) (x) | ||
| 423 | |||
| 424 | #endif | 427 | #endif |
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h index fe99fe8db61a..6892899fd6fb 100644 --- a/fs/hfsplus/hfsplus_raw.h +++ b/fs/hfsplus/hfsplus_raw.h | |||
| @@ -200,6 +200,7 @@ struct hfsplus_cat_key { | |||
| 200 | struct hfsplus_unistr name; | 200 | struct hfsplus_unistr name; |
| 201 | } __packed; | 201 | } __packed; |
| 202 | 202 | ||
| 203 | #define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key)) | ||
| 203 | 204 | ||
| 204 | /* Structs from hfs.h */ | 205 | /* Structs from hfs.h */ |
| 205 | struct hfsp_point { | 206 | struct hfsp_point { |
| @@ -323,7 +324,7 @@ struct hfsplus_ext_key { | |||
| 323 | __be32 start_block; | 324 | __be32 start_block; |
| 324 | } __packed; | 325 | } __packed; |
| 325 | 326 | ||
| 326 | #define HFSPLUS_EXT_KEYLEN 12 | 327 | #define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key) |
| 327 | 328 | ||
| 328 | /* HFS+ generic BTree key */ | 329 | /* HFS+ generic BTree key */ |
| 329 | typedef union { | 330 | typedef union { |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index c5a979d62c65..78449280dae0 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
| @@ -36,7 +36,7 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping, | |||
| 36 | *pagep = NULL; | 36 | *pagep = NULL; |
| 37 | ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | 37 | ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, |
| 38 | hfsplus_get_block, | 38 | hfsplus_get_block, |
| 39 | &HFSPLUS_I(mapping->host).phys_size); | 39 | &HFSPLUS_I(mapping->host)->phys_size); |
| 40 | if (unlikely(ret)) { | 40 | if (unlikely(ret)) { |
| 41 | loff_t isize = mapping->host->i_size; | 41 | loff_t isize = mapping->host->i_size; |
| 42 | if (pos + len > isize) | 42 | if (pos + len > isize) |
| @@ -62,13 +62,13 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) | |||
| 62 | 62 | ||
| 63 | switch (inode->i_ino) { | 63 | switch (inode->i_ino) { |
| 64 | case HFSPLUS_EXT_CNID: | 64 | case HFSPLUS_EXT_CNID: |
| 65 | tree = HFSPLUS_SB(sb).ext_tree; | 65 | tree = HFSPLUS_SB(sb)->ext_tree; |
| 66 | break; | 66 | break; |
| 67 | case HFSPLUS_CAT_CNID: | 67 | case HFSPLUS_CAT_CNID: |
| 68 | tree = HFSPLUS_SB(sb).cat_tree; | 68 | tree = HFSPLUS_SB(sb)->cat_tree; |
| 69 | break; | 69 | break; |
| 70 | case HFSPLUS_ATTR_CNID: | 70 | case HFSPLUS_ATTR_CNID: |
| 71 | tree = HFSPLUS_SB(sb).attr_tree; | 71 | tree = HFSPLUS_SB(sb)->attr_tree; |
| 72 | break; | 72 | break; |
| 73 | default: | 73 | default: |
| 74 | BUG(); | 74 | BUG(); |
| @@ -172,12 +172,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent | |||
| 172 | struct hfs_find_data fd; | 172 | struct hfs_find_data fd; |
| 173 | struct super_block *sb = dir->i_sb; | 173 | struct super_block *sb = dir->i_sb; |
| 174 | struct inode *inode = NULL; | 174 | struct inode *inode = NULL; |
| 175 | struct hfsplus_inode_info *hip; | ||
| 175 | int err; | 176 | int err; |
| 176 | 177 | ||
| 177 | if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) | 178 | if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) |
| 178 | goto out; | 179 | goto out; |
| 179 | 180 | ||
| 180 | inode = HFSPLUS_I(dir).rsrc_inode; | 181 | inode = HFSPLUS_I(dir)->rsrc_inode; |
| 181 | if (inode) | 182 | if (inode) |
| 182 | goto out; | 183 | goto out; |
| 183 | 184 | ||
| @@ -185,12 +186,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent | |||
| 185 | if (!inode) | 186 | if (!inode) |
| 186 | return ERR_PTR(-ENOMEM); | 187 | return ERR_PTR(-ENOMEM); |
| 187 | 188 | ||
| 189 | hip = HFSPLUS_I(inode); | ||
| 188 | inode->i_ino = dir->i_ino; | 190 | inode->i_ino = dir->i_ino; |
| 189 | INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); | 191 | INIT_LIST_HEAD(&hip->open_dir_list); |
| 190 | mutex_init(&HFSPLUS_I(inode).extents_lock); | 192 | mutex_init(&hip->extents_lock); |
| 191 | HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC; | 193 | hip->flags = HFSPLUS_FLG_RSRC; |
| 192 | 194 | ||
| 193 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); | 195 | hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); |
| 194 | err = hfsplus_find_cat(sb, dir->i_ino, &fd); | 196 | err = hfsplus_find_cat(sb, dir->i_ino, &fd); |
| 195 | if (!err) | 197 | if (!err) |
| 196 | err = hfsplus_cat_read_inode(inode, &fd); | 198 | err = hfsplus_cat_read_inode(inode, &fd); |
| @@ -199,10 +201,18 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent | |||
| 199 | iput(inode); | 201 | iput(inode); |
| 200 | return ERR_PTR(err); | 202 | return ERR_PTR(err); |
| 201 | } | 203 | } |
| 202 | HFSPLUS_I(inode).rsrc_inode = dir; | 204 | hip->rsrc_inode = dir; |
| 203 | HFSPLUS_I(dir).rsrc_inode = inode; | 205 | HFSPLUS_I(dir)->rsrc_inode = inode; |
| 204 | igrab(dir); | 206 | igrab(dir); |
| 205 | hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes); | 207 | |
| 208 | /* | ||
| 209 | * __mark_inode_dirty expects inodes to be hashed. Since we don't | ||
| 210 | * want resource fork inodes in the regular inode space, we make them | ||
| 211 | * appear hashed, but do not put on any lists. hlist_del() | ||
| 212 | * will work fine and require no locking. | ||
| 213 | */ | ||
| 214 | inode->i_hash.pprev = &inode->i_hash.next; | ||
| 215 | |||
| 206 | mark_inode_dirty(inode); | 216 | mark_inode_dirty(inode); |
| 207 | out: | 217 | out: |
| 208 | d_add(dentry, inode); | 218 | d_add(dentry, inode); |
| @@ -211,30 +221,27 @@ out: | |||
| 211 | 221 | ||
| 212 | static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir) | 222 | static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir) |
| 213 | { | 223 | { |
| 214 | struct super_block *sb = inode->i_sb; | 224 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); |
| 215 | u16 mode; | 225 | u16 mode; |
| 216 | 226 | ||
| 217 | mode = be16_to_cpu(perms->mode); | 227 | mode = be16_to_cpu(perms->mode); |
| 218 | 228 | ||
| 219 | inode->i_uid = be32_to_cpu(perms->owner); | 229 | inode->i_uid = be32_to_cpu(perms->owner); |
| 220 | if (!inode->i_uid && !mode) | 230 | if (!inode->i_uid && !mode) |
| 221 | inode->i_uid = HFSPLUS_SB(sb).uid; | 231 | inode->i_uid = sbi->uid; |
| 222 | 232 | ||
| 223 | inode->i_gid = be32_to_cpu(perms->group); | 233 | inode->i_gid = be32_to_cpu(perms->group); |
| 224 | if (!inode->i_gid && !mode) | 234 | if (!inode->i_gid && !mode) |
| 225 | inode->i_gid = HFSPLUS_SB(sb).gid; | 235 | inode->i_gid = sbi->gid; |
| 226 | 236 | ||
| 227 | if (dir) { | 237 | if (dir) { |
| 228 | mode = mode ? (mode & S_IALLUGO) : | 238 | mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask)); |
| 229 | (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask)); | ||
| 230 | mode |= S_IFDIR; | 239 | mode |= S_IFDIR; |
| 231 | } else if (!mode) | 240 | } else if (!mode) |
| 232 | mode = S_IFREG | ((S_IRUGO|S_IWUGO) & | 241 | mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask)); |
| 233 | ~(HFSPLUS_SB(sb).umask)); | ||
| 234 | inode->i_mode = mode; | 242 | inode->i_mode = mode; |
| 235 | 243 | ||
| 236 | HFSPLUS_I(inode).rootflags = perms->rootflags; | 244 | HFSPLUS_I(inode)->userflags = perms->userflags; |
| 237 | HFSPLUS_I(inode).userflags = perms->userflags; | ||
| 238 | if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE) | 245 | if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE) |
| 239 | inode->i_flags |= S_IMMUTABLE; | 246 | inode->i_flags |= S_IMMUTABLE; |
| 240 | else | 247 | else |
| @@ -245,30 +252,13 @@ static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, i | |||
| 245 | inode->i_flags &= ~S_APPEND; | 252 | inode->i_flags &= ~S_APPEND; |
| 246 | } | 253 | } |
| 247 | 254 | ||
| 248 | static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms) | ||
| 249 | { | ||
| 250 | if (inode->i_flags & S_IMMUTABLE) | ||
| 251 | perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; | ||
| 252 | else | ||
| 253 | perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE; | ||
| 254 | if (inode->i_flags & S_APPEND) | ||
| 255 | perms->rootflags |= HFSPLUS_FLG_APPEND; | ||
| 256 | else | ||
| 257 | perms->rootflags &= ~HFSPLUS_FLG_APPEND; | ||
| 258 | perms->userflags = HFSPLUS_I(inode).userflags; | ||
| 259 | perms->mode = cpu_to_be16(inode->i_mode); | ||
| 260 | perms->owner = cpu_to_be32(inode->i_uid); | ||
| 261 | perms->group = cpu_to_be32(inode->i_gid); | ||
| 262 | perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev); | ||
| 263 | } | ||
| 264 | |||
| 265 | static int hfsplus_file_open(struct inode *inode, struct file *file) | 255 | static int hfsplus_file_open(struct inode *inode, struct file *file) |
| 266 | { | 256 | { |
| 267 | if (HFSPLUS_IS_RSRC(inode)) | 257 | if (HFSPLUS_IS_RSRC(inode)) |
| 268 | inode = HFSPLUS_I(inode).rsrc_inode; | 258 | inode = HFSPLUS_I(inode)->rsrc_inode; |
| 269 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) | 259 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
| 270 | return -EOVERFLOW; | 260 | return -EOVERFLOW; |
| 271 | atomic_inc(&HFSPLUS_I(inode).opencnt); | 261 | atomic_inc(&HFSPLUS_I(inode)->opencnt); |
| 272 | return 0; | 262 | return 0; |
| 273 | } | 263 | } |
| 274 | 264 | ||
| @@ -277,12 +267,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file) | |||
| 277 | struct super_block *sb = inode->i_sb; | 267 | struct super_block *sb = inode->i_sb; |
| 278 | 268 | ||
| 279 | if (HFSPLUS_IS_RSRC(inode)) | 269 | if (HFSPLUS_IS_RSRC(inode)) |
| 280 | inode = HFSPLUS_I(inode).rsrc_inode; | 270 | inode = HFSPLUS_I(inode)->rsrc_inode; |
| 281 | if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) { | 271 | if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) { |
| 282 | mutex_lock(&inode->i_mutex); | 272 | mutex_lock(&inode->i_mutex); |
| 283 | hfsplus_file_truncate(inode); | 273 | hfsplus_file_truncate(inode); |
| 284 | if (inode->i_flags & S_DEAD) { | 274 | if (inode->i_flags & S_DEAD) { |
| 285 | hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL); | 275 | hfsplus_delete_cat(inode->i_ino, |
| 276 | HFSPLUS_SB(sb)->hidden_dir, NULL); | ||
| 286 | hfsplus_delete_inode(inode); | 277 | hfsplus_delete_inode(inode); |
| 287 | } | 278 | } |
| 288 | mutex_unlock(&inode->i_mutex); | 279 | mutex_unlock(&inode->i_mutex); |
| @@ -361,47 +352,52 @@ static const struct file_operations hfsplus_file_operations = { | |||
| 361 | 352 | ||
| 362 | struct inode *hfsplus_new_inode(struct super_block *sb, int mode) | 353 | struct inode *hfsplus_new_inode(struct super_block *sb, int mode) |
| 363 | { | 354 | { |
| 355 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 364 | struct inode *inode = new_inode(sb); | 356 | struct inode *inode = new_inode(sb); |
| 357 | struct hfsplus_inode_info *hip; | ||
| 358 | |||
| 365 | if (!inode) | 359 | if (!inode) |
| 366 | return NULL; | 360 | return NULL; |
| 367 | 361 | ||
| 368 | inode->i_ino = HFSPLUS_SB(sb).next_cnid++; | 362 | inode->i_ino = sbi->next_cnid++; |
| 369 | inode->i_mode = mode; | 363 | inode->i_mode = mode; |
| 370 | inode->i_uid = current_fsuid(); | 364 | inode->i_uid = current_fsuid(); |
| 371 | inode->i_gid = current_fsgid(); | 365 | inode->i_gid = current_fsgid(); |
| 372 | inode->i_nlink = 1; | 366 | inode->i_nlink = 1; |
| 373 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; | 367 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; |
| 374 | INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); | 368 | |
| 375 | mutex_init(&HFSPLUS_I(inode).extents_lock); | 369 | hip = HFSPLUS_I(inode); |
| 376 | atomic_set(&HFSPLUS_I(inode).opencnt, 0); | 370 | INIT_LIST_HEAD(&hip->open_dir_list); |
| 377 | HFSPLUS_I(inode).flags = 0; | 371 | mutex_init(&hip->extents_lock); |
| 378 | memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec)); | 372 | atomic_set(&hip->opencnt, 0); |
| 379 | memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); | 373 | hip->flags = 0; |
| 380 | HFSPLUS_I(inode).alloc_blocks = 0; | 374 | memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); |
| 381 | HFSPLUS_I(inode).first_blocks = 0; | 375 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); |
| 382 | HFSPLUS_I(inode).cached_start = 0; | 376 | hip->alloc_blocks = 0; |
| 383 | HFSPLUS_I(inode).cached_blocks = 0; | 377 | hip->first_blocks = 0; |
| 384 | HFSPLUS_I(inode).phys_size = 0; | 378 | hip->cached_start = 0; |
| 385 | HFSPLUS_I(inode).fs_blocks = 0; | 379 | hip->cached_blocks = 0; |
| 386 | HFSPLUS_I(inode).rsrc_inode = NULL; | 380 | hip->phys_size = 0; |
| 381 | hip->fs_blocks = 0; | ||
| 382 | hip->rsrc_inode = NULL; | ||
| 387 | if (S_ISDIR(inode->i_mode)) { | 383 | if (S_ISDIR(inode->i_mode)) { |
| 388 | inode->i_size = 2; | 384 | inode->i_size = 2; |
| 389 | HFSPLUS_SB(sb).folder_count++; | 385 | sbi->folder_count++; |
| 390 | inode->i_op = &hfsplus_dir_inode_operations; | 386 | inode->i_op = &hfsplus_dir_inode_operations; |
| 391 | inode->i_fop = &hfsplus_dir_operations; | 387 | inode->i_fop = &hfsplus_dir_operations; |
| 392 | } else if (S_ISREG(inode->i_mode)) { | 388 | } else if (S_ISREG(inode->i_mode)) { |
| 393 | HFSPLUS_SB(sb).file_count++; | 389 | sbi->file_count++; |
| 394 | inode->i_op = &hfsplus_file_inode_operations; | 390 | inode->i_op = &hfsplus_file_inode_operations; |
| 395 | inode->i_fop = &hfsplus_file_operations; | 391 | inode->i_fop = &hfsplus_file_operations; |
| 396 | inode->i_mapping->a_ops = &hfsplus_aops; | 392 | inode->i_mapping->a_ops = &hfsplus_aops; |
| 397 | HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks; | 393 | hip->clump_blocks = sbi->data_clump_blocks; |
| 398 | } else if (S_ISLNK(inode->i_mode)) { | 394 | } else if (S_ISLNK(inode->i_mode)) { |
| 399 | HFSPLUS_SB(sb).file_count++; | 395 | sbi->file_count++; |
| 400 | inode->i_op = &page_symlink_inode_operations; | 396 | inode->i_op = &page_symlink_inode_operations; |
| 401 | inode->i_mapping->a_ops = &hfsplus_aops; | 397 | inode->i_mapping->a_ops = &hfsplus_aops; |
| 402 | HFSPLUS_I(inode).clump_blocks = 1; | 398 | hip->clump_blocks = 1; |
| 403 | } else | 399 | } else |
| 404 | HFSPLUS_SB(sb).file_count++; | 400 | sbi->file_count++; |
| 405 | insert_inode_hash(inode); | 401 | insert_inode_hash(inode); |
| 406 | mark_inode_dirty(inode); | 402 | mark_inode_dirty(inode); |
| 407 | sb->s_dirt = 1; | 403 | sb->s_dirt = 1; |
| @@ -414,11 +410,11 @@ void hfsplus_delete_inode(struct inode *inode) | |||
| 414 | struct super_block *sb = inode->i_sb; | 410 | struct super_block *sb = inode->i_sb; |
| 415 | 411 | ||
| 416 | if (S_ISDIR(inode->i_mode)) { | 412 | if (S_ISDIR(inode->i_mode)) { |
| 417 | HFSPLUS_SB(sb).folder_count--; | 413 | HFSPLUS_SB(sb)->folder_count--; |
| 418 | sb->s_dirt = 1; | 414 | sb->s_dirt = 1; |
| 419 | return; | 415 | return; |
| 420 | } | 416 | } |
| 421 | HFSPLUS_SB(sb).file_count--; | 417 | HFSPLUS_SB(sb)->file_count--; |
| 422 | if (S_ISREG(inode->i_mode)) { | 418 | if (S_ISREG(inode->i_mode)) { |
| 423 | if (!inode->i_nlink) { | 419 | if (!inode->i_nlink) { |
| 424 | inode->i_size = 0; | 420 | inode->i_size = 0; |
| @@ -434,34 +430,39 @@ void hfsplus_delete_inode(struct inode *inode) | |||
| 434 | void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) | 430 | void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) |
| 435 | { | 431 | { |
| 436 | struct super_block *sb = inode->i_sb; | 432 | struct super_block *sb = inode->i_sb; |
| 433 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 434 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 437 | u32 count; | 435 | u32 count; |
| 438 | int i; | 436 | int i; |
| 439 | 437 | ||
| 440 | memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents, | 438 | memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec)); |
| 441 | sizeof(hfsplus_extent_rec)); | ||
| 442 | for (count = 0, i = 0; i < 8; i++) | 439 | for (count = 0, i = 0; i < 8; i++) |
| 443 | count += be32_to_cpu(fork->extents[i].block_count); | 440 | count += be32_to_cpu(fork->extents[i].block_count); |
| 444 | HFSPLUS_I(inode).first_blocks = count; | 441 | hip->first_blocks = count; |
| 445 | memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); | 442 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); |
| 446 | HFSPLUS_I(inode).cached_start = 0; | 443 | hip->cached_start = 0; |
| 447 | HFSPLUS_I(inode).cached_blocks = 0; | 444 | hip->cached_blocks = 0; |
| 448 | 445 | ||
| 449 | HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks); | 446 | hip->alloc_blocks = be32_to_cpu(fork->total_blocks); |
| 450 | inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size); | 447 | hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size); |
| 451 | HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; | 448 | hip->fs_blocks = |
| 452 | inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); | 449 | (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; |
| 453 | HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift; | 450 | inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); |
| 454 | if (!HFSPLUS_I(inode).clump_blocks) | 451 | hip->clump_blocks = |
| 455 | HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks : | 452 | be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift; |
| 456 | HFSPLUS_SB(sb).data_clump_blocks; | 453 | if (!hip->clump_blocks) { |
| 454 | hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ? | ||
| 455 | sbi->rsrc_clump_blocks : | ||
| 456 | sbi->data_clump_blocks; | ||
| 457 | } | ||
| 457 | } | 458 | } |
| 458 | 459 | ||
| 459 | void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork) | 460 | void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork) |
| 460 | { | 461 | { |
| 461 | memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents, | 462 | memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents, |
| 462 | sizeof(hfsplus_extent_rec)); | 463 | sizeof(hfsplus_extent_rec)); |
| 463 | fork->total_size = cpu_to_be64(inode->i_size); | 464 | fork->total_size = cpu_to_be64(inode->i_size); |
| 464 | fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks); | 465 | fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks); |
| 465 | } | 466 | } |
| 466 | 467 | ||
| 467 | int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | 468 | int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) |
| @@ -472,7 +473,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | |||
| 472 | 473 | ||
| 473 | type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); | 474 | type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); |
| 474 | 475 | ||
| 475 | HFSPLUS_I(inode).dev = 0; | 476 | HFSPLUS_I(inode)->linkid = 0; |
| 476 | if (type == HFSPLUS_FOLDER) { | 477 | if (type == HFSPLUS_FOLDER) { |
| 477 | struct hfsplus_cat_folder *folder = &entry.folder; | 478 | struct hfsplus_cat_folder *folder = &entry.folder; |
| 478 | 479 | ||
| @@ -486,8 +487,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | |||
| 486 | inode->i_atime = hfsp_mt2ut(folder->access_date); | 487 | inode->i_atime = hfsp_mt2ut(folder->access_date); |
| 487 | inode->i_mtime = hfsp_mt2ut(folder->content_mod_date); | 488 | inode->i_mtime = hfsp_mt2ut(folder->content_mod_date); |
| 488 | inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); | 489 | inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); |
| 489 | HFSPLUS_I(inode).create_date = folder->create_date; | 490 | HFSPLUS_I(inode)->create_date = folder->create_date; |
| 490 | HFSPLUS_I(inode).fs_blocks = 0; | 491 | HFSPLUS_I(inode)->fs_blocks = 0; |
| 491 | inode->i_op = &hfsplus_dir_inode_operations; | 492 | inode->i_op = &hfsplus_dir_inode_operations; |
| 492 | inode->i_fop = &hfsplus_dir_operations; | 493 | inode->i_fop = &hfsplus_dir_operations; |
| 493 | } else if (type == HFSPLUS_FILE) { | 494 | } else if (type == HFSPLUS_FILE) { |
| @@ -518,7 +519,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | |||
| 518 | inode->i_atime = hfsp_mt2ut(file->access_date); | 519 | inode->i_atime = hfsp_mt2ut(file->access_date); |
| 519 | inode->i_mtime = hfsp_mt2ut(file->content_mod_date); | 520 | inode->i_mtime = hfsp_mt2ut(file->content_mod_date); |
| 520 | inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date); | 521 | inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date); |
| 521 | HFSPLUS_I(inode).create_date = file->create_date; | 522 | HFSPLUS_I(inode)->create_date = file->create_date; |
| 522 | } else { | 523 | } else { |
| 523 | printk(KERN_ERR "hfs: bad catalog entry used to create inode\n"); | 524 | printk(KERN_ERR "hfs: bad catalog entry used to create inode\n"); |
| 524 | res = -EIO; | 525 | res = -EIO; |
| @@ -533,12 +534,12 @@ int hfsplus_cat_write_inode(struct inode *inode) | |||
| 533 | hfsplus_cat_entry entry; | 534 | hfsplus_cat_entry entry; |
| 534 | 535 | ||
| 535 | if (HFSPLUS_IS_RSRC(inode)) | 536 | if (HFSPLUS_IS_RSRC(inode)) |
| 536 | main_inode = HFSPLUS_I(inode).rsrc_inode; | 537 | main_inode = HFSPLUS_I(inode)->rsrc_inode; |
| 537 | 538 | ||
| 538 | if (!main_inode->i_nlink) | 539 | if (!main_inode->i_nlink) |
| 539 | return 0; | 540 | return 0; |
| 540 | 541 | ||
| 541 | if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd)) | 542 | if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd)) |
| 542 | /* panic? */ | 543 | /* panic? */ |
| 543 | return -EIO; | 544 | return -EIO; |
| 544 | 545 | ||
| @@ -554,7 +555,7 @@ int hfsplus_cat_write_inode(struct inode *inode) | |||
| 554 | hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, | 555 | hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, |
| 555 | sizeof(struct hfsplus_cat_folder)); | 556 | sizeof(struct hfsplus_cat_folder)); |
| 556 | /* simple node checks? */ | 557 | /* simple node checks? */ |
| 557 | hfsplus_set_perms(inode, &folder->permissions); | 558 | hfsplus_cat_set_perms(inode, &folder->permissions); |
| 558 | folder->access_date = hfsp_ut2mt(inode->i_atime); | 559 | folder->access_date = hfsp_ut2mt(inode->i_atime); |
| 559 | folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); | 560 | folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); |
| 560 | folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); | 561 | folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); |
| @@ -576,11 +577,7 @@ int hfsplus_cat_write_inode(struct inode *inode) | |||
| 576 | hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, | 577 | hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, |
| 577 | sizeof(struct hfsplus_cat_file)); | 578 | sizeof(struct hfsplus_cat_file)); |
| 578 | hfsplus_inode_write_fork(inode, &file->data_fork); | 579 | hfsplus_inode_write_fork(inode, &file->data_fork); |
| 579 | if (S_ISREG(inode->i_mode)) | 580 | hfsplus_cat_set_perms(inode, &file->permissions); |
| 580 | HFSPLUS_I(inode).dev = inode->i_nlink; | ||
| 581 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) | ||
| 582 | HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev); | ||
| 583 | hfsplus_set_perms(inode, &file->permissions); | ||
| 584 | if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) | 581 | if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) |
| 585 | file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); | 582 | file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); |
| 586 | else | 583 | else |
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c index ac405f099026..5b4667e08ef7 100644 --- a/fs/hfsplus/ioctl.c +++ b/fs/hfsplus/ioctl.c | |||
| @@ -17,83 +17,98 @@ | |||
| 17 | #include <linux/mount.h> | 17 | #include <linux/mount.h> |
| 18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
| 19 | #include <linux/xattr.h> | 19 | #include <linux/xattr.h> |
| 20 | #include <linux/smp_lock.h> | ||
| 21 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
| 22 | #include "hfsplus_fs.h" | 21 | #include "hfsplus_fs.h" |
| 23 | 22 | ||
| 24 | long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 23 | static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags) |
| 25 | { | 24 | { |
| 26 | struct inode *inode = filp->f_path.dentry->d_inode; | 25 | struct inode *inode = file->f_path.dentry->d_inode; |
| 26 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 27 | unsigned int flags = 0; | ||
| 28 | |||
| 29 | if (inode->i_flags & S_IMMUTABLE) | ||
| 30 | flags |= FS_IMMUTABLE_FL; | ||
| 31 | if (inode->i_flags |= S_APPEND) | ||
| 32 | flags |= FS_APPEND_FL; | ||
| 33 | if (hip->userflags & HFSPLUS_FLG_NODUMP) | ||
| 34 | flags |= FS_NODUMP_FL; | ||
| 35 | |||
| 36 | return put_user(flags, user_flags); | ||
| 37 | } | ||
| 38 | |||
| 39 | static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags) | ||
| 40 | { | ||
| 41 | struct inode *inode = file->f_path.dentry->d_inode; | ||
| 42 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); | ||
| 27 | unsigned int flags; | 43 | unsigned int flags; |
| 44 | int err = 0; | ||
| 28 | 45 | ||
| 29 | lock_kernel(); | 46 | err = mnt_want_write(file->f_path.mnt); |
| 30 | switch (cmd) { | 47 | if (err) |
| 31 | case HFSPLUS_IOC_EXT2_GETFLAGS: | 48 | goto out; |
| 32 | flags = 0; | ||
| 33 | if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_IMMUTABLE) | ||
| 34 | flags |= FS_IMMUTABLE_FL; /* EXT2_IMMUTABLE_FL */ | ||
| 35 | if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_APPEND) | ||
| 36 | flags |= FS_APPEND_FL; /* EXT2_APPEND_FL */ | ||
| 37 | if (HFSPLUS_I(inode).userflags & HFSPLUS_FLG_NODUMP) | ||
| 38 | flags |= FS_NODUMP_FL; /* EXT2_NODUMP_FL */ | ||
| 39 | return put_user(flags, (int __user *)arg); | ||
| 40 | case HFSPLUS_IOC_EXT2_SETFLAGS: { | ||
| 41 | int err = 0; | ||
| 42 | err = mnt_want_write(filp->f_path.mnt); | ||
| 43 | if (err) { | ||
| 44 | unlock_kernel(); | ||
| 45 | return err; | ||
| 46 | } | ||
| 47 | 49 | ||
| 48 | if (!is_owner_or_cap(inode)) { | 50 | if (!is_owner_or_cap(inode)) { |
| 49 | err = -EACCES; | 51 | err = -EACCES; |
| 50 | goto setflags_out; | 52 | goto out_drop_write; |
| 51 | } | 53 | } |
| 52 | if (get_user(flags, (int __user *)arg)) { | ||
| 53 | err = -EFAULT; | ||
| 54 | goto setflags_out; | ||
| 55 | } | ||
| 56 | if (flags & (FS_IMMUTABLE_FL|FS_APPEND_FL) || | ||
| 57 | HFSPLUS_I(inode).rootflags & (HFSPLUS_FLG_IMMUTABLE|HFSPLUS_FLG_APPEND)) { | ||
| 58 | if (!capable(CAP_LINUX_IMMUTABLE)) { | ||
| 59 | err = -EPERM; | ||
| 60 | goto setflags_out; | ||
| 61 | } | ||
| 62 | } | ||
| 63 | 54 | ||
| 64 | /* don't silently ignore unsupported ext2 flags */ | 55 | if (get_user(flags, user_flags)) { |
| 65 | if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) { | 56 | err = -EFAULT; |
| 66 | err = -EOPNOTSUPP; | 57 | goto out_drop_write; |
| 67 | goto setflags_out; | 58 | } |
| 68 | } | 59 | |
| 69 | if (flags & FS_IMMUTABLE_FL) { /* EXT2_IMMUTABLE_FL */ | 60 | mutex_lock(&inode->i_mutex); |
| 70 | inode->i_flags |= S_IMMUTABLE; | 61 | |
| 71 | HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_IMMUTABLE; | 62 | if ((flags & (FS_IMMUTABLE_FL|FS_APPEND_FL)) || |
| 72 | } else { | 63 | inode->i_flags & (S_IMMUTABLE|S_APPEND)) { |
| 73 | inode->i_flags &= ~S_IMMUTABLE; | 64 | if (!capable(CAP_LINUX_IMMUTABLE)) { |
| 74 | HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_IMMUTABLE; | 65 | err = -EPERM; |
| 75 | } | 66 | goto out_unlock_inode; |
| 76 | if (flags & FS_APPEND_FL) { /* EXT2_APPEND_FL */ | ||
| 77 | inode->i_flags |= S_APPEND; | ||
| 78 | HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_APPEND; | ||
| 79 | } else { | ||
| 80 | inode->i_flags &= ~S_APPEND; | ||
| 81 | HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_APPEND; | ||
| 82 | } | 67 | } |
| 83 | if (flags & FS_NODUMP_FL) /* EXT2_NODUMP_FL */ | ||
| 84 | HFSPLUS_I(inode).userflags |= HFSPLUS_FLG_NODUMP; | ||
| 85 | else | ||
| 86 | HFSPLUS_I(inode).userflags &= ~HFSPLUS_FLG_NODUMP; | ||
| 87 | |||
| 88 | inode->i_ctime = CURRENT_TIME_SEC; | ||
| 89 | mark_inode_dirty(inode); | ||
| 90 | setflags_out: | ||
| 91 | mnt_drop_write(filp->f_path.mnt); | ||
| 92 | unlock_kernel(); | ||
| 93 | return err; | ||
| 94 | } | 68 | } |
| 69 | |||
| 70 | /* don't silently ignore unsupported ext2 flags */ | ||
| 71 | if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) { | ||
| 72 | err = -EOPNOTSUPP; | ||
| 73 | goto out_unlock_inode; | ||
| 74 | } | ||
| 75 | |||
| 76 | if (flags & FS_IMMUTABLE_FL) | ||
| 77 | inode->i_flags |= S_IMMUTABLE; | ||
| 78 | else | ||
| 79 | inode->i_flags &= ~S_IMMUTABLE; | ||
| 80 | |||
| 81 | if (flags & FS_APPEND_FL) | ||
| 82 | inode->i_flags |= S_APPEND; | ||
| 83 | else | ||
| 84 | inode->i_flags &= ~S_APPEND; | ||
| 85 | |||
| 86 | if (flags & FS_NODUMP_FL) | ||
| 87 | hip->userflags |= HFSPLUS_FLG_NODUMP; | ||
| 88 | else | ||
| 89 | hip->userflags &= ~HFSPLUS_FLG_NODUMP; | ||
| 90 | |||
| 91 | inode->i_ctime = CURRENT_TIME_SEC; | ||
| 92 | mark_inode_dirty(inode); | ||
| 93 | |||
| 94 | out_unlock_inode: | ||
| 95 | mutex_lock(&inode->i_mutex); | ||
| 96 | out_drop_write: | ||
| 97 | mnt_drop_write(file->f_path.mnt); | ||
| 98 | out: | ||
| 99 | return err; | ||
| 100 | } | ||
| 101 | |||
| 102 | long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
| 103 | { | ||
| 104 | void __user *argp = (void __user *)arg; | ||
| 105 | |||
| 106 | switch (cmd) { | ||
| 107 | case HFSPLUS_IOC_EXT2_GETFLAGS: | ||
| 108 | return hfsplus_ioctl_getflags(file, argp); | ||
| 109 | case HFSPLUS_IOC_EXT2_SETFLAGS: | ||
| 110 | return hfsplus_ioctl_setflags(file, argp); | ||
| 95 | default: | 111 | default: |
| 96 | unlock_kernel(); | ||
| 97 | return -ENOTTY; | 112 | return -ENOTTY; |
| 98 | } | 113 | } |
| 99 | } | 114 | } |
| @@ -110,7 +125,7 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name, | |||
| 110 | if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) | 125 | if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) |
| 111 | return -EOPNOTSUPP; | 126 | return -EOPNOTSUPP; |
| 112 | 127 | ||
| 113 | res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd); | 128 | res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); |
| 114 | if (res) | 129 | if (res) |
| 115 | return res; | 130 | return res; |
| 116 | res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); | 131 | res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); |
| @@ -153,7 +168,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, | |||
| 153 | return -EOPNOTSUPP; | 168 | return -EOPNOTSUPP; |
| 154 | 169 | ||
| 155 | if (size) { | 170 | if (size) { |
| 156 | res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd); | 171 | res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); |
| 157 | if (res) | 172 | if (res) |
| 158 | return res; | 173 | return res; |
| 159 | res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); | 174 | res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); |
| @@ -177,7 +192,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, | |||
| 177 | } else | 192 | } else |
| 178 | res = size ? -ERANGE : 4; | 193 | res = size ? -ERANGE : 4; |
| 179 | } else | 194 | } else |
| 180 | res = -ENODATA; | 195 | res = -EOPNOTSUPP; |
| 181 | out: | 196 | out: |
| 182 | if (size) | 197 | if (size) |
| 183 | hfs_find_exit(&fd); | 198 | hfs_find_exit(&fd); |
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 572628b4b07d..f9ab276a4d8d 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c | |||
| @@ -143,13 +143,13 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) | |||
| 143 | kfree(p); | 143 | kfree(p); |
| 144 | break; | 144 | break; |
| 145 | case opt_decompose: | 145 | case opt_decompose: |
| 146 | sbi->flags &= ~HFSPLUS_SB_NODECOMPOSE; | 146 | clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags); |
| 147 | break; | 147 | break; |
| 148 | case opt_nodecompose: | 148 | case opt_nodecompose: |
| 149 | sbi->flags |= HFSPLUS_SB_NODECOMPOSE; | 149 | set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags); |
| 150 | break; | 150 | break; |
| 151 | case opt_force: | 151 | case opt_force: |
| 152 | sbi->flags |= HFSPLUS_SB_FORCE; | 152 | set_bit(HFSPLUS_SB_FORCE, &sbi->flags); |
| 153 | break; | 153 | break; |
| 154 | default: | 154 | default: |
| 155 | return 0; | 155 | return 0; |
| @@ -171,7 +171,7 @@ done: | |||
| 171 | 171 | ||
| 172 | int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt) | 172 | int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt) |
| 173 | { | 173 | { |
| 174 | struct hfsplus_sb_info *sbi = &HFSPLUS_SB(mnt->mnt_sb); | 174 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb); |
| 175 | 175 | ||
| 176 | if (sbi->creator != HFSPLUS_DEF_CR_TYPE) | 176 | if (sbi->creator != HFSPLUS_DEF_CR_TYPE) |
| 177 | seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator); | 177 | seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator); |
| @@ -184,7 +184,7 @@ int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt) | |||
| 184 | seq_printf(seq, ",session=%u", sbi->session); | 184 | seq_printf(seq, ",session=%u", sbi->session); |
| 185 | if (sbi->nls) | 185 | if (sbi->nls) |
| 186 | seq_printf(seq, ",nls=%s", sbi->nls->charset); | 186 | seq_printf(seq, ",nls=%s", sbi->nls->charset); |
| 187 | if (sbi->flags & HFSPLUS_SB_NODECOMPOSE) | 187 | if (test_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags)) |
| 188 | seq_printf(seq, ",nodecompose"); | 188 | seq_printf(seq, ",nodecompose"); |
| 189 | return 0; | 189 | return 0; |
| 190 | } | 190 | } |
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c index 1528a6fd0299..208b16c645cc 100644 --- a/fs/hfsplus/part_tbl.c +++ b/fs/hfsplus/part_tbl.c | |||
| @@ -74,6 +74,7 @@ struct old_pmap { | |||
| 74 | int hfs_part_find(struct super_block *sb, | 74 | int hfs_part_find(struct super_block *sb, |
| 75 | sector_t *part_start, sector_t *part_size) | 75 | sector_t *part_start, sector_t *part_size) |
| 76 | { | 76 | { |
| 77 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 77 | struct buffer_head *bh; | 78 | struct buffer_head *bh; |
| 78 | __be16 *data; | 79 | __be16 *data; |
| 79 | int i, size, res; | 80 | int i, size, res; |
| @@ -95,7 +96,7 @@ int hfs_part_find(struct super_block *sb, | |||
| 95 | for (i = 0; i < size; p++, i++) { | 96 | for (i = 0; i < size; p++, i++) { |
| 96 | if (p->pdStart && p->pdSize && | 97 | if (p->pdStart && p->pdSize && |
| 97 | p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ && | 98 | p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ && |
| 98 | (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) { | 99 | (sbi->part < 0 || sbi->part == i)) { |
| 99 | *part_start += be32_to_cpu(p->pdStart); | 100 | *part_start += be32_to_cpu(p->pdStart); |
| 100 | *part_size = be32_to_cpu(p->pdSize); | 101 | *part_size = be32_to_cpu(p->pdSize); |
| 101 | res = 0; | 102 | res = 0; |
| @@ -111,7 +112,7 @@ int hfs_part_find(struct super_block *sb, | |||
| 111 | size = be32_to_cpu(pm->pmMapBlkCnt); | 112 | size = be32_to_cpu(pm->pmMapBlkCnt); |
| 112 | for (i = 0; i < size;) { | 113 | for (i = 0; i < size;) { |
| 113 | if (!memcmp(pm->pmPartType,"Apple_HFS", 9) && | 114 | if (!memcmp(pm->pmPartType,"Apple_HFS", 9) && |
| 114 | (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) { | 115 | (sbi->part < 0 || sbi->part == i)) { |
| 115 | *part_start += be32_to_cpu(pm->pmPyPartStart); | 116 | *part_start += be32_to_cpu(pm->pmPyPartStart); |
| 116 | *part_size = be32_to_cpu(pm->pmPartBlkCnt); | 117 | *part_size = be32_to_cpu(pm->pmPartBlkCnt); |
| 117 | res = 0; | 118 | res = 0; |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 3b55c050c742..9a88d7536103 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
| 14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 15 | #include <linux/smp_lock.h> | ||
| 16 | #include <linux/vfs.h> | 15 | #include <linux/vfs.h> |
| 17 | #include <linux/nls.h> | 16 | #include <linux/nls.h> |
| 18 | 17 | ||
| @@ -21,40 +20,11 @@ static void hfsplus_destroy_inode(struct inode *inode); | |||
| 21 | 20 | ||
| 22 | #include "hfsplus_fs.h" | 21 | #include "hfsplus_fs.h" |
| 23 | 22 | ||
| 24 | struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) | 23 | static int hfsplus_system_read_inode(struct inode *inode) |
| 25 | { | 24 | { |
| 26 | struct hfs_find_data fd; | 25 | struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr; |
| 27 | struct hfsplus_vh *vhdr; | ||
| 28 | struct inode *inode; | ||
| 29 | long err = -EIO; | ||
| 30 | |||
| 31 | inode = iget_locked(sb, ino); | ||
| 32 | if (!inode) | ||
| 33 | return ERR_PTR(-ENOMEM); | ||
| 34 | if (!(inode->i_state & I_NEW)) | ||
| 35 | return inode; | ||
| 36 | 26 | ||
| 37 | INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); | 27 | switch (inode->i_ino) { |
| 38 | mutex_init(&HFSPLUS_I(inode).extents_lock); | ||
| 39 | HFSPLUS_I(inode).flags = 0; | ||
| 40 | HFSPLUS_I(inode).rsrc_inode = NULL; | ||
| 41 | atomic_set(&HFSPLUS_I(inode).opencnt, 0); | ||
| 42 | |||
| 43 | if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) { | ||
| 44 | read_inode: | ||
| 45 | hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd); | ||
| 46 | err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); | ||
| 47 | if (!err) | ||
| 48 | err = hfsplus_cat_read_inode(inode, &fd); | ||
| 49 | hfs_find_exit(&fd); | ||
| 50 | if (err) | ||
| 51 | goto bad_inode; | ||
| 52 | goto done; | ||
| 53 | } | ||
| 54 | vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr; | ||
| 55 | switch(inode->i_ino) { | ||
| 56 | case HFSPLUS_ROOT_CNID: | ||
| 57 | goto read_inode; | ||
| 58 | case HFSPLUS_EXT_CNID: | 28 | case HFSPLUS_EXT_CNID: |
| 59 | hfsplus_inode_read_fork(inode, &vhdr->ext_file); | 29 | hfsplus_inode_read_fork(inode, &vhdr->ext_file); |
| 60 | inode->i_mapping->a_ops = &hfsplus_btree_aops; | 30 | inode->i_mapping->a_ops = &hfsplus_btree_aops; |
| @@ -75,74 +45,101 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) | |||
| 75 | inode->i_mapping->a_ops = &hfsplus_btree_aops; | 45 | inode->i_mapping->a_ops = &hfsplus_btree_aops; |
| 76 | break; | 46 | break; |
| 77 | default: | 47 | default: |
| 78 | goto bad_inode; | 48 | return -EIO; |
| 49 | } | ||
| 50 | |||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | |||
| 54 | struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) | ||
| 55 | { | ||
| 56 | struct hfs_find_data fd; | ||
| 57 | struct inode *inode; | ||
| 58 | int err; | ||
| 59 | |||
| 60 | inode = iget_locked(sb, ino); | ||
| 61 | if (!inode) | ||
| 62 | return ERR_PTR(-ENOMEM); | ||
| 63 | if (!(inode->i_state & I_NEW)) | ||
| 64 | return inode; | ||
| 65 | |||
| 66 | INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); | ||
| 67 | mutex_init(&HFSPLUS_I(inode)->extents_lock); | ||
| 68 | HFSPLUS_I(inode)->flags = 0; | ||
| 69 | HFSPLUS_I(inode)->rsrc_inode = NULL; | ||
| 70 | atomic_set(&HFSPLUS_I(inode)->opencnt, 0); | ||
| 71 | |||
| 72 | if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || | ||
| 73 | inode->i_ino == HFSPLUS_ROOT_CNID) { | ||
| 74 | hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); | ||
| 75 | err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); | ||
| 76 | if (!err) | ||
| 77 | err = hfsplus_cat_read_inode(inode, &fd); | ||
| 78 | hfs_find_exit(&fd); | ||
| 79 | } else { | ||
| 80 | err = hfsplus_system_read_inode(inode); | ||
| 81 | } | ||
| 82 | |||
| 83 | if (err) { | ||
| 84 | iget_failed(inode); | ||
| 85 | return ERR_PTR(err); | ||
| 79 | } | 86 | } |
| 80 | 87 | ||
| 81 | done: | ||
| 82 | unlock_new_inode(inode); | 88 | unlock_new_inode(inode); |
| 83 | return inode; | 89 | return inode; |
| 84 | |||
| 85 | bad_inode: | ||
| 86 | iget_failed(inode); | ||
| 87 | return ERR_PTR(err); | ||
| 88 | } | 90 | } |
| 89 | 91 | ||
| 90 | static int hfsplus_write_inode(struct inode *inode, | 92 | static int hfsplus_system_write_inode(struct inode *inode) |
| 91 | struct writeback_control *wbc) | ||
| 92 | { | 93 | { |
| 93 | struct hfsplus_vh *vhdr; | 94 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); |
| 94 | int ret = 0; | 95 | struct hfsplus_vh *vhdr = sbi->s_vhdr; |
| 96 | struct hfsplus_fork_raw *fork; | ||
| 97 | struct hfs_btree *tree = NULL; | ||
| 95 | 98 | ||
| 96 | dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); | ||
| 97 | hfsplus_ext_write_extent(inode); | ||
| 98 | if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) { | ||
| 99 | return hfsplus_cat_write_inode(inode); | ||
| 100 | } | ||
| 101 | vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr; | ||
| 102 | switch (inode->i_ino) { | 99 | switch (inode->i_ino) { |
| 103 | case HFSPLUS_ROOT_CNID: | ||
| 104 | ret = hfsplus_cat_write_inode(inode); | ||
| 105 | break; | ||
| 106 | case HFSPLUS_EXT_CNID: | 100 | case HFSPLUS_EXT_CNID: |
| 107 | if (vhdr->ext_file.total_size != cpu_to_be64(inode->i_size)) { | 101 | fork = &vhdr->ext_file; |
| 108 | HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; | 102 | tree = sbi->ext_tree; |
| 109 | inode->i_sb->s_dirt = 1; | ||
| 110 | } | ||
| 111 | hfsplus_inode_write_fork(inode, &vhdr->ext_file); | ||
| 112 | hfs_btree_write(HFSPLUS_SB(inode->i_sb).ext_tree); | ||
| 113 | break; | 103 | break; |
| 114 | case HFSPLUS_CAT_CNID: | 104 | case HFSPLUS_CAT_CNID: |
| 115 | if (vhdr->cat_file.total_size != cpu_to_be64(inode->i_size)) { | 105 | fork = &vhdr->cat_file; |
| 116 | HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; | 106 | tree = sbi->cat_tree; |
| 117 | inode->i_sb->s_dirt = 1; | ||
| 118 | } | ||
| 119 | hfsplus_inode_write_fork(inode, &vhdr->cat_file); | ||
| 120 | hfs_btree_write(HFSPLUS_SB(inode->i_sb).cat_tree); | ||
| 121 | break; | 107 | break; |
| 122 | case HFSPLUS_ALLOC_CNID: | 108 | case HFSPLUS_ALLOC_CNID: |
| 123 | if (vhdr->alloc_file.total_size != cpu_to_be64(inode->i_size)) { | 109 | fork = &vhdr->alloc_file; |
| 124 | HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; | ||
| 125 | inode->i_sb->s_dirt = 1; | ||
| 126 | } | ||
| 127 | hfsplus_inode_write_fork(inode, &vhdr->alloc_file); | ||
| 128 | break; | 110 | break; |
| 129 | case HFSPLUS_START_CNID: | 111 | case HFSPLUS_START_CNID: |
| 130 | if (vhdr->start_file.total_size != cpu_to_be64(inode->i_size)) { | 112 | fork = &vhdr->start_file; |
| 131 | HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; | ||
| 132 | inode->i_sb->s_dirt = 1; | ||
| 133 | } | ||
| 134 | hfsplus_inode_write_fork(inode, &vhdr->start_file); | ||
| 135 | break; | 113 | break; |
| 136 | case HFSPLUS_ATTR_CNID: | 114 | case HFSPLUS_ATTR_CNID: |
| 137 | if (vhdr->attr_file.total_size != cpu_to_be64(inode->i_size)) { | 115 | fork = &vhdr->attr_file; |
| 138 | HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; | 116 | tree = sbi->attr_tree; |
| 139 | inode->i_sb->s_dirt = 1; | 117 | default: |
| 140 | } | 118 | return -EIO; |
| 141 | hfsplus_inode_write_fork(inode, &vhdr->attr_file); | 119 | } |
| 142 | hfs_btree_write(HFSPLUS_SB(inode->i_sb).attr_tree); | 120 | |
| 143 | break; | 121 | if (fork->total_size != cpu_to_be64(inode->i_size)) { |
| 122 | set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); | ||
| 123 | inode->i_sb->s_dirt = 1; | ||
| 144 | } | 124 | } |
| 145 | return ret; | 125 | hfsplus_inode_write_fork(inode, fork); |
| 126 | if (tree) | ||
| 127 | hfs_btree_write(tree); | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | static int hfsplus_write_inode(struct inode *inode, | ||
| 132 | struct writeback_control *wbc) | ||
| 133 | { | ||
| 134 | dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); | ||
| 135 | |||
| 136 | hfsplus_ext_write_extent(inode); | ||
| 137 | |||
| 138 | if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || | ||
| 139 | inode->i_ino == HFSPLUS_ROOT_CNID) | ||
| 140 | return hfsplus_cat_write_inode(inode); | ||
| 141 | else | ||
| 142 | return hfsplus_system_write_inode(inode); | ||
| 146 | } | 143 | } |
| 147 | 144 | ||
| 148 | static void hfsplus_evict_inode(struct inode *inode) | 145 | static void hfsplus_evict_inode(struct inode *inode) |
| @@ -151,51 +148,53 @@ static void hfsplus_evict_inode(struct inode *inode) | |||
| 151 | truncate_inode_pages(&inode->i_data, 0); | 148 | truncate_inode_pages(&inode->i_data, 0); |
| 152 | end_writeback(inode); | 149 | end_writeback(inode); |
| 153 | if (HFSPLUS_IS_RSRC(inode)) { | 150 | if (HFSPLUS_IS_RSRC(inode)) { |
| 154 | HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL; | 151 | HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; |
| 155 | iput(HFSPLUS_I(inode).rsrc_inode); | 152 | iput(HFSPLUS_I(inode)->rsrc_inode); |
| 156 | } | 153 | } |
| 157 | } | 154 | } |
| 158 | 155 | ||
| 159 | int hfsplus_sync_fs(struct super_block *sb, int wait) | 156 | int hfsplus_sync_fs(struct super_block *sb, int wait) |
| 160 | { | 157 | { |
| 161 | struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; | 158 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); |
| 159 | struct hfsplus_vh *vhdr = sbi->s_vhdr; | ||
| 162 | 160 | ||
| 163 | dprint(DBG_SUPER, "hfsplus_write_super\n"); | 161 | dprint(DBG_SUPER, "hfsplus_write_super\n"); |
| 164 | 162 | ||
| 165 | lock_super(sb); | 163 | mutex_lock(&sbi->vh_mutex); |
| 164 | mutex_lock(&sbi->alloc_mutex); | ||
| 166 | sb->s_dirt = 0; | 165 | sb->s_dirt = 0; |
| 167 | 166 | ||
| 168 | vhdr->free_blocks = cpu_to_be32(HFSPLUS_SB(sb).free_blocks); | 167 | vhdr->free_blocks = cpu_to_be32(sbi->free_blocks); |
| 169 | vhdr->next_alloc = cpu_to_be32(HFSPLUS_SB(sb).next_alloc); | 168 | vhdr->next_cnid = cpu_to_be32(sbi->next_cnid); |
| 170 | vhdr->next_cnid = cpu_to_be32(HFSPLUS_SB(sb).next_cnid); | 169 | vhdr->folder_count = cpu_to_be32(sbi->folder_count); |
| 171 | vhdr->folder_count = cpu_to_be32(HFSPLUS_SB(sb).folder_count); | 170 | vhdr->file_count = cpu_to_be32(sbi->file_count); |
| 172 | vhdr->file_count = cpu_to_be32(HFSPLUS_SB(sb).file_count); | ||
| 173 | 171 | ||
| 174 | mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); | 172 | mark_buffer_dirty(sbi->s_vhbh); |
| 175 | if (HFSPLUS_SB(sb).flags & HFSPLUS_SB_WRITEBACKUP) { | 173 | if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) { |
| 176 | if (HFSPLUS_SB(sb).sect_count) { | 174 | if (sbi->sect_count) { |
| 177 | struct buffer_head *bh; | 175 | struct buffer_head *bh; |
| 178 | u32 block, offset; | 176 | u32 block, offset; |
| 179 | 177 | ||
| 180 | block = HFSPLUS_SB(sb).blockoffset; | 178 | block = sbi->blockoffset; |
| 181 | block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9); | 179 | block += (sbi->sect_count - 2) >> (sb->s_blocksize_bits - 9); |
| 182 | offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1); | 180 | offset = ((sbi->sect_count - 2) << 9) & (sb->s_blocksize - 1); |
| 183 | printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset, | 181 | printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", |
| 184 | HFSPLUS_SB(sb).sect_count, block, offset); | 182 | sbi->blockoffset, sbi->sect_count, |
| 183 | block, offset); | ||
| 185 | bh = sb_bread(sb, block); | 184 | bh = sb_bread(sb, block); |
| 186 | if (bh) { | 185 | if (bh) { |
| 187 | vhdr = (struct hfsplus_vh *)(bh->b_data + offset); | 186 | vhdr = (struct hfsplus_vh *)(bh->b_data + offset); |
| 188 | if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) { | 187 | if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) { |
| 189 | memcpy(vhdr, HFSPLUS_SB(sb).s_vhdr, sizeof(*vhdr)); | 188 | memcpy(vhdr, sbi->s_vhdr, sizeof(*vhdr)); |
| 190 | mark_buffer_dirty(bh); | 189 | mark_buffer_dirty(bh); |
| 191 | brelse(bh); | 190 | brelse(bh); |
| 192 | } else | 191 | } else |
| 193 | printk(KERN_WARNING "hfs: backup not found!\n"); | 192 | printk(KERN_WARNING "hfs: backup not found!\n"); |
| 194 | } | 193 | } |
| 195 | } | 194 | } |
| 196 | HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP; | ||
| 197 | } | 195 | } |
| 198 | unlock_super(sb); | 196 | mutex_unlock(&sbi->alloc_mutex); |
| 197 | mutex_unlock(&sbi->vh_mutex); | ||
| 199 | return 0; | 198 | return 0; |
| 200 | } | 199 | } |
| 201 | 200 | ||
| @@ -209,48 +208,48 @@ static void hfsplus_write_super(struct super_block *sb) | |||
| 209 | 208 | ||
| 210 | static void hfsplus_put_super(struct super_block *sb) | 209 | static void hfsplus_put_super(struct super_block *sb) |
| 211 | { | 210 | { |
| 211 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 212 | |||
| 212 | dprint(DBG_SUPER, "hfsplus_put_super\n"); | 213 | dprint(DBG_SUPER, "hfsplus_put_super\n"); |
| 214 | |||
| 213 | if (!sb->s_fs_info) | 215 | if (!sb->s_fs_info) |
| 214 | return; | 216 | return; |
| 215 | 217 | ||
| 216 | lock_kernel(); | ||
| 217 | |||
| 218 | if (sb->s_dirt) | 218 | if (sb->s_dirt) |
| 219 | hfsplus_write_super(sb); | 219 | hfsplus_write_super(sb); |
| 220 | if (!(sb->s_flags & MS_RDONLY) && HFSPLUS_SB(sb).s_vhdr) { | 220 | if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) { |
| 221 | struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; | 221 | struct hfsplus_vh *vhdr = sbi->s_vhdr; |
| 222 | 222 | ||
| 223 | vhdr->modify_date = hfsp_now2mt(); | 223 | vhdr->modify_date = hfsp_now2mt(); |
| 224 | vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); | 224 | vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); |
| 225 | vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); | 225 | vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); |
| 226 | mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); | 226 | mark_buffer_dirty(sbi->s_vhbh); |
| 227 | sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh); | 227 | sync_dirty_buffer(sbi->s_vhbh); |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | hfs_btree_close(HFSPLUS_SB(sb).cat_tree); | 230 | hfs_btree_close(sbi->cat_tree); |
| 231 | hfs_btree_close(HFSPLUS_SB(sb).ext_tree); | 231 | hfs_btree_close(sbi->ext_tree); |
| 232 | iput(HFSPLUS_SB(sb).alloc_file); | 232 | iput(sbi->alloc_file); |
| 233 | iput(HFSPLUS_SB(sb).hidden_dir); | 233 | iput(sbi->hidden_dir); |
| 234 | brelse(HFSPLUS_SB(sb).s_vhbh); | 234 | brelse(sbi->s_vhbh); |
| 235 | unload_nls(HFSPLUS_SB(sb).nls); | 235 | unload_nls(sbi->nls); |
| 236 | kfree(sb->s_fs_info); | 236 | kfree(sb->s_fs_info); |
| 237 | sb->s_fs_info = NULL; | 237 | sb->s_fs_info = NULL; |
| 238 | |||
| 239 | unlock_kernel(); | ||
| 240 | } | 238 | } |
| 241 | 239 | ||
| 242 | static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) | 240 | static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) |
| 243 | { | 241 | { |
| 244 | struct super_block *sb = dentry->d_sb; | 242 | struct super_block *sb = dentry->d_sb; |
| 243 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 245 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); | 244 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); |
| 246 | 245 | ||
| 247 | buf->f_type = HFSPLUS_SUPER_MAGIC; | 246 | buf->f_type = HFSPLUS_SUPER_MAGIC; |
| 248 | buf->f_bsize = sb->s_blocksize; | 247 | buf->f_bsize = sb->s_blocksize; |
| 249 | buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift; | 248 | buf->f_blocks = sbi->total_blocks << sbi->fs_shift; |
| 250 | buf->f_bfree = HFSPLUS_SB(sb).free_blocks << HFSPLUS_SB(sb).fs_shift; | 249 | buf->f_bfree = sbi->free_blocks << sbi->fs_shift; |
| 251 | buf->f_bavail = buf->f_bfree; | 250 | buf->f_bavail = buf->f_bfree; |
| 252 | buf->f_files = 0xFFFFFFFF; | 251 | buf->f_files = 0xFFFFFFFF; |
| 253 | buf->f_ffree = 0xFFFFFFFF - HFSPLUS_SB(sb).next_cnid; | 252 | buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid; |
| 254 | buf->f_fsid.val[0] = (u32)id; | 253 | buf->f_fsid.val[0] = (u32)id; |
| 255 | buf->f_fsid.val[1] = (u32)(id >> 32); | 254 | buf->f_fsid.val[1] = (u32)(id >> 32); |
| 256 | buf->f_namelen = HFSPLUS_MAX_STRLEN; | 255 | buf->f_namelen = HFSPLUS_MAX_STRLEN; |
| @@ -263,11 +262,11 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data) | |||
| 263 | if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) | 262 | if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) |
| 264 | return 0; | 263 | return 0; |
| 265 | if (!(*flags & MS_RDONLY)) { | 264 | if (!(*flags & MS_RDONLY)) { |
| 266 | struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; | 265 | struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr; |
| 267 | struct hfsplus_sb_info sbi; | 266 | struct hfsplus_sb_info sbi; |
| 268 | 267 | ||
| 269 | memset(&sbi, 0, sizeof(struct hfsplus_sb_info)); | 268 | memset(&sbi, 0, sizeof(struct hfsplus_sb_info)); |
| 270 | sbi.nls = HFSPLUS_SB(sb).nls; | 269 | sbi.nls = HFSPLUS_SB(sb)->nls; |
| 271 | if (!hfsplus_parse_options(data, &sbi)) | 270 | if (!hfsplus_parse_options(data, &sbi)) |
| 272 | return -EINVAL; | 271 | return -EINVAL; |
| 273 | 272 | ||
| @@ -276,7 +275,7 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data) | |||
| 276 | "running fsck.hfsplus is recommended. leaving read-only.\n"); | 275 | "running fsck.hfsplus is recommended. leaving read-only.\n"); |
| 277 | sb->s_flags |= MS_RDONLY; | 276 | sb->s_flags |= MS_RDONLY; |
| 278 | *flags |= MS_RDONLY; | 277 | *flags |= MS_RDONLY; |
| 279 | } else if (sbi.flags & HFSPLUS_SB_FORCE) { | 278 | } else if (test_bit(HFSPLUS_SB_FORCE, &sbi.flags)) { |
| 280 | /* nothing */ | 279 | /* nothing */ |
| 281 | } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { | 280 | } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { |
| 282 | printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n"); | 281 | printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n"); |
| @@ -320,7 +319,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 320 | return -ENOMEM; | 319 | return -ENOMEM; |
| 321 | 320 | ||
| 322 | sb->s_fs_info = sbi; | 321 | sb->s_fs_info = sbi; |
| 323 | INIT_HLIST_HEAD(&sbi->rsrc_inodes); | 322 | mutex_init(&sbi->alloc_mutex); |
| 323 | mutex_init(&sbi->vh_mutex); | ||
| 324 | hfsplus_fill_defaults(sbi); | 324 | hfsplus_fill_defaults(sbi); |
| 325 | if (!hfsplus_parse_options(data, sbi)) { | 325 | if (!hfsplus_parse_options(data, sbi)) { |
| 326 | printk(KERN_ERR "hfs: unable to parse mount options\n"); | 326 | printk(KERN_ERR "hfs: unable to parse mount options\n"); |
| @@ -344,7 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 344 | err = -EINVAL; | 344 | err = -EINVAL; |
| 345 | goto cleanup; | 345 | goto cleanup; |
| 346 | } | 346 | } |
| 347 | vhdr = HFSPLUS_SB(sb).s_vhdr; | 347 | vhdr = sbi->s_vhdr; |
| 348 | 348 | ||
| 349 | /* Copy parts of the volume header into the superblock */ | 349 | /* Copy parts of the volume header into the superblock */ |
| 350 | sb->s_magic = HFSPLUS_VOLHEAD_SIG; | 350 | sb->s_magic = HFSPLUS_VOLHEAD_SIG; |
| @@ -353,18 +353,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 353 | printk(KERN_ERR "hfs: wrong filesystem version\n"); | 353 | printk(KERN_ERR "hfs: wrong filesystem version\n"); |
| 354 | goto cleanup; | 354 | goto cleanup; |
| 355 | } | 355 | } |
| 356 | HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks); | 356 | sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); |
| 357 | HFSPLUS_SB(sb).free_blocks = be32_to_cpu(vhdr->free_blocks); | 357 | sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); |
| 358 | HFSPLUS_SB(sb).next_alloc = be32_to_cpu(vhdr->next_alloc); | 358 | sbi->next_cnid = be32_to_cpu(vhdr->next_cnid); |
| 359 | HFSPLUS_SB(sb).next_cnid = be32_to_cpu(vhdr->next_cnid); | 359 | sbi->file_count = be32_to_cpu(vhdr->file_count); |
| 360 | HFSPLUS_SB(sb).file_count = be32_to_cpu(vhdr->file_count); | 360 | sbi->folder_count = be32_to_cpu(vhdr->folder_count); |
| 361 | HFSPLUS_SB(sb).folder_count = be32_to_cpu(vhdr->folder_count); | 361 | sbi->data_clump_blocks = |
| 362 | HFSPLUS_SB(sb).data_clump_blocks = be32_to_cpu(vhdr->data_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift; | 362 | be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift; |
| 363 | if (!HFSPLUS_SB(sb).data_clump_blocks) | 363 | if (!sbi->data_clump_blocks) |
| 364 | HFSPLUS_SB(sb).data_clump_blocks = 1; | 364 | sbi->data_clump_blocks = 1; |
| 365 | HFSPLUS_SB(sb).rsrc_clump_blocks = be32_to_cpu(vhdr->rsrc_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift; | 365 | sbi->rsrc_clump_blocks = |
| 366 | if (!HFSPLUS_SB(sb).rsrc_clump_blocks) | 366 | be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift; |
| 367 | HFSPLUS_SB(sb).rsrc_clump_blocks = 1; | 367 | if (!sbi->rsrc_clump_blocks) |
| 368 | sbi->rsrc_clump_blocks = 1; | ||
| 368 | 369 | ||
| 369 | /* Set up operations so we can load metadata */ | 370 | /* Set up operations so we can load metadata */ |
| 370 | sb->s_op = &hfsplus_sops; | 371 | sb->s_op = &hfsplus_sops; |
| @@ -374,7 +375,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 374 | printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, " | 375 | printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, " |
| 375 | "running fsck.hfsplus is recommended. mounting read-only.\n"); | 376 | "running fsck.hfsplus is recommended. mounting read-only.\n"); |
| 376 | sb->s_flags |= MS_RDONLY; | 377 | sb->s_flags |= MS_RDONLY; |
| 377 | } else if (sbi->flags & HFSPLUS_SB_FORCE) { | 378 | } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { |
| 378 | /* nothing */ | 379 | /* nothing */ |
| 379 | } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { | 380 | } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { |
| 380 | printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n"); | 381 | printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n"); |
| @@ -384,16 +385,15 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 384 | "use the force option at your own risk, mounting read-only.\n"); | 385 | "use the force option at your own risk, mounting read-only.\n"); |
| 385 | sb->s_flags |= MS_RDONLY; | 386 | sb->s_flags |= MS_RDONLY; |
| 386 | } | 387 | } |
| 387 | sbi->flags &= ~HFSPLUS_SB_FORCE; | ||
| 388 | 388 | ||
| 389 | /* Load metadata objects (B*Trees) */ | 389 | /* Load metadata objects (B*Trees) */ |
| 390 | HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); | 390 | sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); |
| 391 | if (!HFSPLUS_SB(sb).ext_tree) { | 391 | if (!sbi->ext_tree) { |
| 392 | printk(KERN_ERR "hfs: failed to load extents file\n"); | 392 | printk(KERN_ERR "hfs: failed to load extents file\n"); |
| 393 | goto cleanup; | 393 | goto cleanup; |
| 394 | } | 394 | } |
| 395 | HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); | 395 | sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); |
| 396 | if (!HFSPLUS_SB(sb).cat_tree) { | 396 | if (!sbi->cat_tree) { |
| 397 | printk(KERN_ERR "hfs: failed to load catalog file\n"); | 397 | printk(KERN_ERR "hfs: failed to load catalog file\n"); |
| 398 | goto cleanup; | 398 | goto cleanup; |
| 399 | } | 399 | } |
| @@ -404,7 +404,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 404 | err = PTR_ERR(inode); | 404 | err = PTR_ERR(inode); |
| 405 | goto cleanup; | 405 | goto cleanup; |
| 406 | } | 406 | } |
| 407 | HFSPLUS_SB(sb).alloc_file = inode; | 407 | sbi->alloc_file = inode; |
| 408 | 408 | ||
| 409 | /* Load the root directory */ | 409 | /* Load the root directory */ |
| 410 | root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); | 410 | root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); |
| @@ -423,7 +423,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 423 | 423 | ||
| 424 | str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; | 424 | str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; |
| 425 | str.name = HFSP_HIDDENDIR_NAME; | 425 | str.name = HFSP_HIDDENDIR_NAME; |
| 426 | hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); | 426 | hfs_find_init(sbi->cat_tree, &fd); |
| 427 | hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); | 427 | hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); |
| 428 | if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { | 428 | if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { |
| 429 | hfs_find_exit(&fd); | 429 | hfs_find_exit(&fd); |
| @@ -434,7 +434,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 434 | err = PTR_ERR(inode); | 434 | err = PTR_ERR(inode); |
| 435 | goto cleanup; | 435 | goto cleanup; |
| 436 | } | 436 | } |
| 437 | HFSPLUS_SB(sb).hidden_dir = inode; | 437 | sbi->hidden_dir = inode; |
| 438 | } else | 438 | } else |
| 439 | hfs_find_exit(&fd); | 439 | hfs_find_exit(&fd); |
| 440 | 440 | ||
| @@ -449,15 +449,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 449 | be32_add_cpu(&vhdr->write_count, 1); | 449 | be32_add_cpu(&vhdr->write_count, 1); |
| 450 | vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); | 450 | vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); |
| 451 | vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); | 451 | vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); |
| 452 | mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); | 452 | mark_buffer_dirty(sbi->s_vhbh); |
| 453 | sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh); | 453 | sync_dirty_buffer(sbi->s_vhbh); |
| 454 | 454 | ||
| 455 | if (!HFSPLUS_SB(sb).hidden_dir) { | 455 | if (!sbi->hidden_dir) { |
| 456 | printk(KERN_DEBUG "hfs: create hidden dir...\n"); | 456 | printk(KERN_DEBUG "hfs: create hidden dir...\n"); |
| 457 | HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR); | 457 | |
| 458 | hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode, | 458 | mutex_lock(&sbi->vh_mutex); |
| 459 | &str, HFSPLUS_SB(sb).hidden_dir); | 459 | sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); |
| 460 | mark_inode_dirty(HFSPLUS_SB(sb).hidden_dir); | 460 | hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode, |
| 461 | &str, sbi->hidden_dir); | ||
| 462 | mutex_unlock(&sbi->vh_mutex); | ||
| 463 | |||
| 464 | mark_inode_dirty(sbi->hidden_dir); | ||
| 461 | } | 465 | } |
| 462 | out: | 466 | out: |
| 463 | unload_nls(sbi->nls); | 467 | unload_nls(sbi->nls); |
| @@ -486,7 +490,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb) | |||
| 486 | 490 | ||
| 487 | static void hfsplus_destroy_inode(struct inode *inode) | 491 | static void hfsplus_destroy_inode(struct inode *inode) |
| 488 | { | 492 | { |
| 489 | kmem_cache_free(hfsplus_inode_cachep, &HFSPLUS_I(inode)); | 493 | kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode)); |
| 490 | } | 494 | } |
| 491 | 495 | ||
| 492 | #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) | 496 | #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) |
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index 628ccf6fa402..b66d67de882c 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c | |||
| @@ -121,7 +121,7 @@ static u16 *hfsplus_compose_lookup(u16 *p, u16 cc) | |||
| 121 | int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p) | 121 | int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p) |
| 122 | { | 122 | { |
| 123 | const hfsplus_unichr *ip; | 123 | const hfsplus_unichr *ip; |
| 124 | struct nls_table *nls = HFSPLUS_SB(sb).nls; | 124 | struct nls_table *nls = HFSPLUS_SB(sb)->nls; |
| 125 | u8 *op; | 125 | u8 *op; |
| 126 | u16 cc, c0, c1; | 126 | u16 cc, c0, c1; |
| 127 | u16 *ce1, *ce2; | 127 | u16 *ce1, *ce2; |
| @@ -132,7 +132,7 @@ int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, c | |||
| 132 | ustrlen = be16_to_cpu(ustr->length); | 132 | ustrlen = be16_to_cpu(ustr->length); |
| 133 | len = *len_p; | 133 | len = *len_p; |
| 134 | ce1 = NULL; | 134 | ce1 = NULL; |
| 135 | compose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); | 135 | compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); |
| 136 | 136 | ||
| 137 | while (ustrlen > 0) { | 137 | while (ustrlen > 0) { |
| 138 | c0 = be16_to_cpu(*ip++); | 138 | c0 = be16_to_cpu(*ip++); |
| @@ -246,7 +246,7 @@ out: | |||
| 246 | static inline int asc2unichar(struct super_block *sb, const char *astr, int len, | 246 | static inline int asc2unichar(struct super_block *sb, const char *astr, int len, |
| 247 | wchar_t *uc) | 247 | wchar_t *uc) |
| 248 | { | 248 | { |
| 249 | int size = HFSPLUS_SB(sb).nls->char2uni(astr, len, uc); | 249 | int size = HFSPLUS_SB(sb)->nls->char2uni(astr, len, uc); |
| 250 | if (size <= 0) { | 250 | if (size <= 0) { |
| 251 | *uc = '?'; | 251 | *uc = '?'; |
| 252 | size = 1; | 252 | size = 1; |
| @@ -293,7 +293,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr, | |||
| 293 | u16 *dstr, outlen = 0; | 293 | u16 *dstr, outlen = 0; |
| 294 | wchar_t c; | 294 | wchar_t c; |
| 295 | 295 | ||
| 296 | decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); | 296 | decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); |
| 297 | while (outlen < HFSPLUS_MAX_STRLEN && len > 0) { | 297 | while (outlen < HFSPLUS_MAX_STRLEN && len > 0) { |
| 298 | size = asc2unichar(sb, astr, len, &c); | 298 | size = asc2unichar(sb, astr, len, &c); |
| 299 | 299 | ||
| @@ -330,8 +330,8 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str) | |||
| 330 | wchar_t c; | 330 | wchar_t c; |
| 331 | u16 c2; | 331 | u16 c2; |
| 332 | 332 | ||
| 333 | casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD); | 333 | casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); |
| 334 | decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); | 334 | decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); |
| 335 | hash = init_name_hash(); | 335 | hash = init_name_hash(); |
| 336 | astr = str->name; | 336 | astr = str->name; |
| 337 | len = str->len; | 337 | len = str->len; |
| @@ -373,8 +373,8 @@ int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr * | |||
| 373 | u16 c1, c2; | 373 | u16 c1, c2; |
| 374 | wchar_t c; | 374 | wchar_t c; |
| 375 | 375 | ||
| 376 | casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD); | 376 | casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); |
| 377 | decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); | 377 | decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); |
| 378 | astr1 = s1->name; | 378 | astr1 = s1->name; |
| 379 | len1 = s1->len; | 379 | len1 = s1->len; |
| 380 | astr2 = s2->name; | 380 | astr2 = s2->name; |
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index bed78ac8f6d1..8972c20b3216 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
| @@ -65,8 +65,8 @@ static int hfsplus_get_last_session(struct super_block *sb, | |||
| 65 | *start = 0; | 65 | *start = 0; |
| 66 | *size = sb->s_bdev->bd_inode->i_size >> 9; | 66 | *size = sb->s_bdev->bd_inode->i_size >> 9; |
| 67 | 67 | ||
| 68 | if (HFSPLUS_SB(sb).session >= 0) { | 68 | if (HFSPLUS_SB(sb)->session >= 0) { |
| 69 | te.cdte_track = HFSPLUS_SB(sb).session; | 69 | te.cdte_track = HFSPLUS_SB(sb)->session; |
| 70 | te.cdte_format = CDROM_LBA; | 70 | te.cdte_format = CDROM_LBA; |
| 71 | res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te); | 71 | res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te); |
| 72 | if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) { | 72 | if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) { |
| @@ -87,6 +87,7 @@ static int hfsplus_get_last_session(struct super_block *sb, | |||
| 87 | /* Takes in super block, returns true if good data read */ | 87 | /* Takes in super block, returns true if good data read */ |
| 88 | int hfsplus_read_wrapper(struct super_block *sb) | 88 | int hfsplus_read_wrapper(struct super_block *sb) |
| 89 | { | 89 | { |
| 90 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | ||
| 90 | struct buffer_head *bh; | 91 | struct buffer_head *bh; |
| 91 | struct hfsplus_vh *vhdr; | 92 | struct hfsplus_vh *vhdr; |
| 92 | struct hfsplus_wd wd; | 93 | struct hfsplus_wd wd; |
| @@ -122,7 +123,7 @@ int hfsplus_read_wrapper(struct super_block *sb) | |||
| 122 | if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG)) | 123 | if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG)) |
| 123 | break; | 124 | break; |
| 124 | if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) { | 125 | if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) { |
| 125 | HFSPLUS_SB(sb).flags |= HFSPLUS_SB_HFSX; | 126 | set_bit(HFSPLUS_SB_HFSX, &sbi->flags); |
| 126 | break; | 127 | break; |
| 127 | } | 128 | } |
| 128 | brelse(bh); | 129 | brelse(bh); |
| @@ -143,11 +144,11 @@ int hfsplus_read_wrapper(struct super_block *sb) | |||
| 143 | if (blocksize < HFSPLUS_SECTOR_SIZE || | 144 | if (blocksize < HFSPLUS_SECTOR_SIZE || |
| 144 | ((blocksize - 1) & blocksize)) | 145 | ((blocksize - 1) & blocksize)) |
| 145 | return -EINVAL; | 146 | return -EINVAL; |
| 146 | HFSPLUS_SB(sb).alloc_blksz = blocksize; | 147 | sbi->alloc_blksz = blocksize; |
| 147 | HFSPLUS_SB(sb).alloc_blksz_shift = 0; | 148 | sbi->alloc_blksz_shift = 0; |
| 148 | while ((blocksize >>= 1) != 0) | 149 | while ((blocksize >>= 1) != 0) |
| 149 | HFSPLUS_SB(sb).alloc_blksz_shift++; | 150 | sbi->alloc_blksz_shift++; |
| 150 | blocksize = min(HFSPLUS_SB(sb).alloc_blksz, (u32)PAGE_SIZE); | 151 | blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE); |
| 151 | 152 | ||
| 152 | /* align block size to block offset */ | 153 | /* align block size to block offset */ |
| 153 | while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1)) | 154 | while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1)) |
| @@ -158,23 +159,26 @@ int hfsplus_read_wrapper(struct super_block *sb) | |||
| 158 | return -EINVAL; | 159 | return -EINVAL; |
| 159 | } | 160 | } |
| 160 | 161 | ||
| 161 | HFSPLUS_SB(sb).blockoffset = part_start >> | 162 | sbi->blockoffset = |
| 162 | (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT); | 163 | part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT); |
| 163 | HFSPLUS_SB(sb).sect_count = part_size; | 164 | sbi->sect_count = part_size; |
| 164 | HFSPLUS_SB(sb).fs_shift = HFSPLUS_SB(sb).alloc_blksz_shift - | 165 | sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; |
| 165 | sb->s_blocksize_bits; | ||
| 166 | 166 | ||
| 167 | bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr); | 167 | bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr); |
| 168 | if (!bh) | 168 | if (!bh) |
| 169 | return -EIO; | 169 | return -EIO; |
| 170 | 170 | ||
| 171 | /* should still be the same... */ | 171 | /* should still be the same... */ |
| 172 | if (vhdr->signature != (HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX ? | 172 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { |
| 173 | cpu_to_be16(HFSPLUS_VOLHEAD_SIGX) : | 173 | if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) |
| 174 | cpu_to_be16(HFSPLUS_VOLHEAD_SIG))) | 174 | goto error; |
| 175 | goto error; | 175 | } else { |
| 176 | HFSPLUS_SB(sb).s_vhbh = bh; | 176 | if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIG)) |
| 177 | HFSPLUS_SB(sb).s_vhdr = vhdr; | 177 | goto error; |
| 178 | } | ||
| 179 | |||
| 180 | sbi->s_vhbh = bh; | ||
| 181 | sbi->s_vhdr = vhdr; | ||
| 178 | 182 | ||
| 179 | return 0; | 183 | return 0; |
| 180 | error: | 184 | error: |
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index cdfb8c6a4206..c16f8d8331b5 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h | |||
| @@ -196,8 +196,6 @@ fh_lock(struct svc_fh *fhp) | |||
| 196 | static inline void | 196 | static inline void |
| 197 | fh_unlock(struct svc_fh *fhp) | 197 | fh_unlock(struct svc_fh *fhp) |
| 198 | { | 198 | { |
| 199 | BUG_ON(!fhp->fh_dentry); | ||
| 200 | |||
| 201 | if (fhp->fh_locked) { | 199 | if (fhp->fh_locked) { |
| 202 | fill_post_wcc(fhp); | 200 | fill_post_wcc(fhp); |
| 203 | mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); | 201 | mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); |
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig index 22c629eedd82..b388443c3a09 100644 --- a/fs/notify/Kconfig +++ b/fs/notify/Kconfig | |||
| @@ -3,4 +3,4 @@ config FSNOTIFY | |||
| 3 | 3 | ||
| 4 | source "fs/notify/dnotify/Kconfig" | 4 | source "fs/notify/dnotify/Kconfig" |
| 5 | source "fs/notify/inotify/Kconfig" | 5 | source "fs/notify/inotify/Kconfig" |
| 6 | source "fs/notify/fanotify/Kconfig" | 6 | #source "fs/notify/fanotify/Kconfig" |
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 32499d213fc4..9975457c981f 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c | |||
| @@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, | |||
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | /* Fast symlinks can't be large */ | 130 | /* Fast symlinks can't be large */ |
| 131 | len = strlen(target); | 131 | len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb)); |
| 132 | link = kzalloc(len + 1, GFP_NOFS); | 132 | link = kzalloc(len + 1, GFP_NOFS); |
| 133 | if (!link) { | 133 | if (!link) { |
| 134 | status = -ENOMEM; | 134 | status = -ENOMEM; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index a1c43e7c8a7b..8e4addaa5424 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -2675,7 +2675,7 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
| 2675 | INF("auxv", S_IRUSR, proc_pid_auxv), | 2675 | INF("auxv", S_IRUSR, proc_pid_auxv), |
| 2676 | ONE("status", S_IRUGO, proc_pid_status), | 2676 | ONE("status", S_IRUGO, proc_pid_status), |
| 2677 | ONE("personality", S_IRUSR, proc_pid_personality), | 2677 | ONE("personality", S_IRUSR, proc_pid_personality), |
| 2678 | INF("limits", S_IRUSR, proc_pid_limits), | 2678 | INF("limits", S_IRUGO, proc_pid_limits), |
| 2679 | #ifdef CONFIG_SCHED_DEBUG | 2679 | #ifdef CONFIG_SCHED_DEBUG |
| 2680 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), | 2680 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), |
| 2681 | #endif | 2681 | #endif |
| @@ -3011,7 +3011,7 @@ static const struct pid_entry tid_base_stuff[] = { | |||
| 3011 | INF("auxv", S_IRUSR, proc_pid_auxv), | 3011 | INF("auxv", S_IRUSR, proc_pid_auxv), |
| 3012 | ONE("status", S_IRUGO, proc_pid_status), | 3012 | ONE("status", S_IRUGO, proc_pid_status), |
| 3013 | ONE("personality", S_IRUSR, proc_pid_personality), | 3013 | ONE("personality", S_IRUSR, proc_pid_personality), |
| 3014 | INF("limits", S_IRUSR, proc_pid_limits), | 3014 | INF("limits", S_IRUGO, proc_pid_limits), |
| 3015 | #ifdef CONFIG_SCHED_DEBUG | 3015 | #ifdef CONFIG_SCHED_DEBUG |
| 3016 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), | 3016 | REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), |
| 3017 | #endif | 3017 | #endif |
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index f53505de0712..5cbb81e134ac 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c | |||
| @@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page, | |||
| 170 | int reiserfs_unpack(struct inode *inode, struct file *filp) | 170 | int reiserfs_unpack(struct inode *inode, struct file *filp) |
| 171 | { | 171 | { |
| 172 | int retval = 0; | 172 | int retval = 0; |
| 173 | int depth; | ||
| 173 | int index; | 174 | int index; |
| 174 | struct page *page; | 175 | struct page *page; |
| 175 | struct address_space *mapping; | 176 | struct address_space *mapping; |
| @@ -188,8 +189,8 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
| 188 | /* we need to make sure nobody is changing the file size beneath | 189 | /* we need to make sure nobody is changing the file size beneath |
| 189 | ** us | 190 | ** us |
| 190 | */ | 191 | */ |
| 191 | mutex_lock(&inode->i_mutex); | 192 | reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); |
| 192 | reiserfs_write_lock(inode->i_sb); | 193 | depth = reiserfs_write_lock_once(inode->i_sb); |
| 193 | 194 | ||
| 194 | write_from = inode->i_size & (blocksize - 1); | 195 | write_from = inode->i_size & (blocksize - 1); |
| 195 | /* if we are on a block boundary, we are already unpacked. */ | 196 | /* if we are on a block boundary, we are already unpacked. */ |
| @@ -224,6 +225,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
| 224 | 225 | ||
| 225 | out: | 226 | out: |
| 226 | mutex_unlock(&inode->i_mutex); | 227 | mutex_unlock(&inode->i_mutex); |
| 227 | reiserfs_write_unlock(inode->i_sb); | 228 | reiserfs_write_unlock_once(inode->i_sb, depth); |
| 228 | return retval; | 229 | return retval; |
| 229 | } | 230 | } |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index d59c4a65d492..81976ffed7d6 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -668,14 +668,11 @@ xfs_inode_set_reclaim_tag( | |||
| 668 | xfs_perag_put(pag); | 668 | xfs_perag_put(pag); |
| 669 | } | 669 | } |
| 670 | 670 | ||
| 671 | void | 671 | STATIC void |
| 672 | __xfs_inode_clear_reclaim_tag( | 672 | __xfs_inode_clear_reclaim( |
| 673 | xfs_mount_t *mp, | ||
| 674 | xfs_perag_t *pag, | 673 | xfs_perag_t *pag, |
| 675 | xfs_inode_t *ip) | 674 | xfs_inode_t *ip) |
| 676 | { | 675 | { |
| 677 | radix_tree_tag_clear(&pag->pag_ici_root, | ||
| 678 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | ||
| 679 | pag->pag_ici_reclaimable--; | 676 | pag->pag_ici_reclaimable--; |
| 680 | if (!pag->pag_ici_reclaimable) { | 677 | if (!pag->pag_ici_reclaimable) { |
| 681 | /* clear the reclaim tag from the perag radix tree */ | 678 | /* clear the reclaim tag from the perag radix tree */ |
| @@ -689,6 +686,17 @@ __xfs_inode_clear_reclaim_tag( | |||
| 689 | } | 686 | } |
| 690 | } | 687 | } |
| 691 | 688 | ||
| 689 | void | ||
| 690 | __xfs_inode_clear_reclaim_tag( | ||
| 691 | xfs_mount_t *mp, | ||
| 692 | xfs_perag_t *pag, | ||
| 693 | xfs_inode_t *ip) | ||
| 694 | { | ||
| 695 | radix_tree_tag_clear(&pag->pag_ici_root, | ||
| 696 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | ||
| 697 | __xfs_inode_clear_reclaim(pag, ip); | ||
| 698 | } | ||
| 699 | |||
| 692 | /* | 700 | /* |
| 693 | * Inodes in different states need to be treated differently, and the return | 701 | * Inodes in different states need to be treated differently, and the return |
| 694 | * value of xfs_iflush is not sufficient to get this right. The following table | 702 | * value of xfs_iflush is not sufficient to get this right. The following table |
| @@ -838,6 +846,7 @@ reclaim: | |||
| 838 | if (!radix_tree_delete(&pag->pag_ici_root, | 846 | if (!radix_tree_delete(&pag->pag_ici_root, |
| 839 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | 847 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) |
| 840 | ASSERT(0); | 848 | ASSERT(0); |
| 849 | __xfs_inode_clear_reclaim(pag, ip); | ||
| 841 | write_unlock(&pag->pag_ici_lock); | 850 | write_unlock(&pag->pag_ici_lock); |
| 842 | 851 | ||
| 843 | /* | 852 | /* |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index ed575fb4b495..7e206fc1fa36 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
| @@ -405,9 +405,15 @@ xlog_cil_push( | |||
| 405 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); | 405 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
| 406 | new_ctx->ticket = xlog_cil_ticket_alloc(log); | 406 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
| 407 | 407 | ||
| 408 | /* lock out transaction commit, but don't block on background push */ | 408 | /* |
| 409 | * Lock out transaction commit, but don't block for background pushes | ||
| 410 | * unless we are well over the CIL space limit. See the definition of | ||
| 411 | * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic | ||
| 412 | * used here. | ||
| 413 | */ | ||
| 409 | if (!down_write_trylock(&cil->xc_ctx_lock)) { | 414 | if (!down_write_trylock(&cil->xc_ctx_lock)) { |
| 410 | if (!push_seq) | 415 | if (!push_seq && |
| 416 | cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log)) | ||
| 411 | goto out_free_ticket; | 417 | goto out_free_ticket; |
| 412 | down_write(&cil->xc_ctx_lock); | 418 | down_write(&cil->xc_ctx_lock); |
| 413 | } | 419 | } |
| @@ -422,7 +428,7 @@ xlog_cil_push( | |||
| 422 | goto out_skip; | 428 | goto out_skip; |
| 423 | 429 | ||
| 424 | /* check for a previously pushed seqeunce */ | 430 | /* check for a previously pushed seqeunce */ |
| 425 | if (push_seq < cil->xc_ctx->sequence) | 431 | if (push_seq && push_seq < cil->xc_ctx->sequence) |
| 426 | goto out_skip; | 432 | goto out_skip; |
| 427 | 433 | ||
| 428 | /* | 434 | /* |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index ced52b98b322..edcdfe01617f 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
| @@ -426,13 +426,13 @@ struct xfs_cil { | |||
| 426 | }; | 426 | }; |
| 427 | 427 | ||
| 428 | /* | 428 | /* |
| 429 | * The amount of log space we should the CIL to aggregate is difficult to size. | 429 | * The amount of log space we allow the CIL to aggregate is difficult to size. |
| 430 | * Whatever we chose we have to make we can get a reservation for the log space | 430 | * Whatever we choose, we have to make sure we can get a reservation for the |
| 431 | * effectively, that it is large enough to capture sufficient relogging to | 431 | * log space effectively, that it is large enough to capture sufficient |
| 432 | * reduce log buffer IO significantly, but it is not too large for the log or | 432 | * relogging to reduce log buffer IO significantly, but it is not too large for |
| 433 | * induces too much latency when writing out through the iclogs. We track both | 433 | * the log or induces too much latency when writing out through the iclogs. We |
| 434 | * space consumed and the number of vectors in the checkpoint context, so we | 434 | * track both space consumed and the number of vectors in the checkpoint |
| 435 | * need to decide which to use for limiting. | 435 | * context, so we need to decide which to use for limiting. |
| 436 | * | 436 | * |
| 437 | * Every log buffer we write out during a push needs a header reserved, which | 437 | * Every log buffer we write out during a push needs a header reserved, which |
| 438 | * is at least one sector and more for v2 logs. Hence we need a reservation of | 438 | * is at least one sector and more for v2 logs. Hence we need a reservation of |
| @@ -459,16 +459,21 @@ struct xfs_cil { | |||
| 459 | * checkpoint transaction ticket is specific to the checkpoint context, rather | 459 | * checkpoint transaction ticket is specific to the checkpoint context, rather |
| 460 | * than the CIL itself. | 460 | * than the CIL itself. |
| 461 | * | 461 | * |
| 462 | * With dynamic reservations, we can basically make up arbitrary limits for the | 462 | * With dynamic reservations, we can effectively make up arbitrary limits for |
| 463 | * checkpoint size so long as they don't violate any other size rules. Hence | 463 | * the checkpoint size so long as they don't violate any other size rules. |
| 464 | * the initial maximum size for the checkpoint transaction will be set to a | 464 | * Recovery imposes a rule that no transaction exceed half the log, so we are |
| 465 | * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit | 465 | * limited by that. Furthermore, the log transaction reservation subsystem |
| 466 | * right now based on the latency of writing out a large amount of data through | 466 | * tries to keep 25% of the log free, so we need to keep below that limit or we |
| 467 | * the circular iclog buffers. | 467 | * risk running out of free log space to start any new transactions. |
| 468 | * | ||
| 469 | * In order to keep background CIL push efficient, we will set a lower | ||
| 470 | * threshold at which background pushing is attempted without blocking current | ||
| 471 | * transaction commits. A separate, higher bound defines when CIL pushes are | ||
| 472 | * enforced to ensure we stay within our maximum checkpoint size bounds. | ||
| 473 | * threshold, yet give us plenty of space for aggregation on large logs. | ||
| 468 | */ | 474 | */ |
| 469 | 475 | #define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3) | |
| 470 | #define XLOG_CIL_SPACE_LIMIT(log) \ | 476 | #define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4)) |
| 471 | (min((log->l_logsize >> 2), (8 * 1024 * 1024))) | ||
| 472 | 477 | ||
| 473 | /* | 478 | /* |
| 474 | * The reservation head lsn is not made up of a cycle number and block number. | 479 | * The reservation head lsn is not made up of a cycle number and block number. |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index c0786d446a00..984cdc62e30b 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | extern u8 acpi_gbl_permanent_mmap; | 55 | extern u8 acpi_gbl_permanent_mmap; |
| 56 | 56 | ||
| 57 | /* | 57 | /* |
| 58 | * Globals that are publically available, allowing for | 58 | * Globals that are publicly available, allowing for |
| 59 | * run time configuration | 59 | * run time configuration |
| 60 | */ | 60 | */ |
| 61 | extern u32 acpi_dbg_level; | 61 | extern u32 acpi_dbg_level; |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7809d230adee..4c9461a4f9e6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -612,7 +612,7 @@ struct drm_gem_object { | |||
| 612 | struct kref refcount; | 612 | struct kref refcount; |
| 613 | 613 | ||
| 614 | /** Handle count of this object. Each handle also holds a reference */ | 614 | /** Handle count of this object. Each handle also holds a reference */ |
| 615 | struct kref handlecount; | 615 | atomic_t handle_count; /* number of handles on this object */ |
| 616 | 616 | ||
| 617 | /** Related drm device */ | 617 | /** Related drm device */ |
| 618 | struct drm_device *dev; | 618 | struct drm_device *dev; |
| @@ -808,7 +808,6 @@ struct drm_driver { | |||
| 808 | */ | 808 | */ |
| 809 | int (*gem_init_object) (struct drm_gem_object *obj); | 809 | int (*gem_init_object) (struct drm_gem_object *obj); |
| 810 | void (*gem_free_object) (struct drm_gem_object *obj); | 810 | void (*gem_free_object) (struct drm_gem_object *obj); |
| 811 | void (*gem_free_object_unlocked) (struct drm_gem_object *obj); | ||
| 812 | 811 | ||
| 813 | /* vga arb irq handler */ | 812 | /* vga arb irq handler */ |
| 814 | void (*vgaarb_irq)(struct drm_device *dev, bool state); | 813 | void (*vgaarb_irq)(struct drm_device *dev, bool state); |
| @@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp); | |||
| 1175 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); | 1174 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
| 1176 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); | 1175 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
| 1177 | extern void drm_vm_open_locked(struct vm_area_struct *vma); | 1176 | extern void drm_vm_open_locked(struct vm_area_struct *vma); |
| 1177 | extern void drm_vm_close_locked(struct vm_area_struct *vma); | ||
| 1178 | extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); | 1178 | extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); |
| 1179 | extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); | 1179 | extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); |
| 1180 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | 1180 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
| @@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev); | |||
| 1455 | void drm_gem_destroy(struct drm_device *dev); | 1455 | void drm_gem_destroy(struct drm_device *dev); |
| 1456 | void drm_gem_object_release(struct drm_gem_object *obj); | 1456 | void drm_gem_object_release(struct drm_gem_object *obj); |
| 1457 | void drm_gem_object_free(struct kref *kref); | 1457 | void drm_gem_object_free(struct kref *kref); |
| 1458 | void drm_gem_object_free_unlocked(struct kref *kref); | ||
| 1459 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | 1458 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
| 1460 | size_t size); | 1459 | size_t size); |
| 1461 | int drm_gem_object_init(struct drm_device *dev, | 1460 | int drm_gem_object_init(struct drm_device *dev, |
| 1462 | struct drm_gem_object *obj, size_t size); | 1461 | struct drm_gem_object *obj, size_t size); |
| 1463 | void drm_gem_object_handle_free(struct kref *kref); | 1462 | void drm_gem_object_handle_free(struct drm_gem_object *obj); |
| 1464 | void drm_gem_vm_open(struct vm_area_struct *vma); | 1463 | void drm_gem_vm_open(struct vm_area_struct *vma); |
| 1465 | void drm_gem_vm_close(struct vm_area_struct *vma); | 1464 | void drm_gem_vm_close(struct vm_area_struct *vma); |
| 1466 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 1465 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
| @@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) | |||
| 1483 | static inline void | 1482 | static inline void |
| 1484 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) | 1483 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
| 1485 | { | 1484 | { |
| 1486 | if (obj != NULL) | 1485 | if (obj != NULL) { |
| 1487 | kref_put(&obj->refcount, drm_gem_object_free_unlocked); | 1486 | struct drm_device *dev = obj->dev; |
| 1487 | mutex_lock(&dev->struct_mutex); | ||
| 1488 | kref_put(&obj->refcount, drm_gem_object_free); | ||
| 1489 | mutex_unlock(&dev->struct_mutex); | ||
| 1490 | } | ||
| 1488 | } | 1491 | } |
| 1489 | 1492 | ||
| 1490 | int drm_gem_handle_create(struct drm_file *file_priv, | 1493 | int drm_gem_handle_create(struct drm_file *file_priv, |
| @@ -1495,7 +1498,7 @@ static inline void | |||
| 1495 | drm_gem_object_handle_reference(struct drm_gem_object *obj) | 1498 | drm_gem_object_handle_reference(struct drm_gem_object *obj) |
| 1496 | { | 1499 | { |
| 1497 | drm_gem_object_reference(obj); | 1500 | drm_gem_object_reference(obj); |
| 1498 | kref_get(&obj->handlecount); | 1501 | atomic_inc(&obj->handle_count); |
| 1499 | } | 1502 | } |
| 1500 | 1503 | ||
| 1501 | static inline void | 1504 | static inline void |
| @@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) | |||
| 1504 | if (obj == NULL) | 1507 | if (obj == NULL) |
| 1505 | return; | 1508 | return; |
| 1506 | 1509 | ||
| 1510 | if (atomic_read(&obj->handle_count) == 0) | ||
| 1511 | return; | ||
| 1507 | /* | 1512 | /* |
| 1508 | * Must bump handle count first as this may be the last | 1513 | * Must bump handle count first as this may be the last |
| 1509 | * ref, in which case the object would disappear before we | 1514 | * ref, in which case the object would disappear before we |
| 1510 | * checked for a name | 1515 | * checked for a name |
| 1511 | */ | 1516 | */ |
| 1512 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1517 | if (atomic_dec_and_test(&obj->handle_count)) |
| 1518 | drm_gem_object_handle_free(obj); | ||
| 1513 | drm_gem_object_unreference(obj); | 1519 | drm_gem_object_unreference(obj); |
| 1514 | } | 1520 | } |
| 1515 | 1521 | ||
| @@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | |||
| 1519 | if (obj == NULL) | 1525 | if (obj == NULL) |
| 1520 | return; | 1526 | return; |
| 1521 | 1527 | ||
| 1528 | if (atomic_read(&obj->handle_count) == 0) | ||
| 1529 | return; | ||
| 1530 | |||
| 1522 | /* | 1531 | /* |
| 1523 | * Must bump handle count first as this may be the last | 1532 | * Must bump handle count first as this may be the last |
| 1524 | * ref, in which case the object would disappear before we | 1533 | * ref, in which case the object would disappear before we |
| 1525 | * checked for a name | 1534 | * checked for a name |
| 1526 | */ | 1535 | */ |
| 1527 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1536 | |
| 1537 | if (atomic_dec_and_test(&obj->handle_count)) | ||
| 1538 | drm_gem_object_handle_free(obj); | ||
| 1528 | drm_gem_object_unreference_unlocked(obj); | 1539 | drm_gem_object_unreference_unlocked(obj); |
| 1529 | } | 1540 | } |
| 1530 | 1541 | ||
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 3a9940ef728b..883c1d439899 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -85,7 +85,6 @@ | |||
| 85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 88 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | ||
| 89 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 88 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
| 90 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 89 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
| 91 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 90 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
| @@ -103,6 +102,7 @@ | |||
| 103 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 102 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 104 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 103 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 105 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 104 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 105 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | ||
| 106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ | 106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
| 107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ | 107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
| 108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 267a86c74e2e..2040e6c4f172 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
| @@ -246,9 +246,11 @@ struct ttm_buffer_object { | |||
| 246 | 246 | ||
| 247 | atomic_t reserved; | 247 | atomic_t reserved; |
| 248 | 248 | ||
| 249 | |||
| 250 | /** | 249 | /** |
| 251 | * Members protected by the bo::lock | 250 | * Members protected by the bo::lock |
| 251 | * In addition, setting sync_obj to anything else | ||
| 252 | * than NULL requires bo::reserved to be held. This allows for | ||
| 253 | * checking NULL while reserved but not holding bo::lock. | ||
| 252 | */ | 254 | */ |
| 253 | 255 | ||
| 254 | void *sync_obj_arg; | 256 | void *sync_obj_arg; |
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 626b629429ff..4e8ea8c8ec1e 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
| @@ -118,7 +118,6 @@ header-y += eventpoll.h | |||
| 118 | header-y += ext2_fs.h | 118 | header-y += ext2_fs.h |
| 119 | header-y += fadvise.h | 119 | header-y += fadvise.h |
| 120 | header-y += falloc.h | 120 | header-y += falloc.h |
| 121 | header-y += fanotify.h | ||
| 122 | header-y += fb.h | 121 | header-y += fb.h |
| 123 | header-y += fcntl.h | 122 | header-y += fcntl.h |
| 124 | header-y += fd.h | 123 | header-y += fd.h |
diff --git a/fs/ceph/auth.h b/include/linux/ceph/auth.h index d38a2fb4a137..7fff521d7eb5 100644 --- a/fs/ceph/auth.h +++ b/include/linux/ceph/auth.h | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | #ifndef _FS_CEPH_AUTH_H | 1 | #ifndef _FS_CEPH_AUTH_H |
| 2 | #define _FS_CEPH_AUTH_H | 2 | #define _FS_CEPH_AUTH_H |
| 3 | 3 | ||
| 4 | #include "types.h" | 4 | #include <linux/ceph/types.h> |
| 5 | #include "buffer.h" | 5 | #include <linux/ceph/buffer.h> |
| 6 | 6 | ||
| 7 | /* | 7 | /* |
| 8 | * Abstract interface for communicating with the authenticate module. | 8 | * Abstract interface for communicating with the authenticate module. |
diff --git a/fs/ceph/buffer.h b/include/linux/ceph/buffer.h index 58d19014068f..58d19014068f 100644 --- a/fs/ceph/buffer.h +++ b/include/linux/ceph/buffer.h | |||
diff --git a/fs/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index 1818c2305610..aa2e19182d99 100644 --- a/fs/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_CEPH_FS_PRETTYDEBUG | 6 | #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG |
| 7 | 7 | ||
| 8 | /* | 8 | /* |
| 9 | * wrap pr_debug to include a filename:lineno prefix on each line. | 9 | * wrap pr_debug to include a filename:lineno prefix on each line. |
| @@ -14,7 +14,8 @@ | |||
| 14 | # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) | 14 | # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) |
| 15 | extern const char *ceph_file_part(const char *s, int len); | 15 | extern const char *ceph_file_part(const char *s, int len); |
| 16 | # define dout(fmt, ...) \ | 16 | # define dout(fmt, ...) \ |
| 17 | pr_debug(" %12.12s:%-4d : " fmt, \ | 17 | pr_debug("%.*s %12.12s:%-4d : " fmt, \ |
| 18 | 8 - (int)sizeof(KBUILD_MODNAME), " ", \ | ||
| 18 | ceph_file_part(__FILE__, sizeof(__FILE__)), \ | 19 | ceph_file_part(__FILE__, sizeof(__FILE__)), \ |
| 19 | __LINE__, ##__VA_ARGS__) | 20 | __LINE__, ##__VA_ARGS__) |
| 20 | # else | 21 | # else |
diff --git a/fs/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h index 5babb8e95352..5babb8e95352 100644 --- a/fs/ceph/ceph_frag.h +++ b/include/linux/ceph/ceph_frag.h | |||
diff --git a/fs/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index d5619ac86711..c3c74aef289d 100644 --- a/fs/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h | |||
| @@ -299,6 +299,7 @@ enum { | |||
| 299 | CEPH_MDS_OP_SETATTR = 0x01108, | 299 | CEPH_MDS_OP_SETATTR = 0x01108, |
| 300 | CEPH_MDS_OP_SETFILELOCK= 0x01109, | 300 | CEPH_MDS_OP_SETFILELOCK= 0x01109, |
| 301 | CEPH_MDS_OP_GETFILELOCK= 0x00110, | 301 | CEPH_MDS_OP_GETFILELOCK= 0x00110, |
| 302 | CEPH_MDS_OP_SETDIRLAYOUT=0x0110a, | ||
| 302 | 303 | ||
| 303 | CEPH_MDS_OP_MKNOD = 0x01201, | 304 | CEPH_MDS_OP_MKNOD = 0x01201, |
| 304 | CEPH_MDS_OP_LINK = 0x01202, | 305 | CEPH_MDS_OP_LINK = 0x01202, |
diff --git a/fs/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h index d099c3f90236..d099c3f90236 100644 --- a/fs/ceph/ceph_hash.h +++ b/include/linux/ceph/ceph_hash.h | |||
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h new file mode 100644 index 000000000000..2a79702e092b --- /dev/null +++ b/include/linux/ceph/debugfs.h | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | #ifndef _FS_CEPH_DEBUGFS_H | ||
| 2 | #define _FS_CEPH_DEBUGFS_H | ||
| 3 | |||
| 4 | #include "ceph_debug.h" | ||
| 5 | #include "types.h" | ||
| 6 | |||
| 7 | #define CEPH_DEFINE_SHOW_FUNC(name) \ | ||
| 8 | static int name##_open(struct inode *inode, struct file *file) \ | ||
| 9 | { \ | ||
| 10 | struct seq_file *sf; \ | ||
| 11 | int ret; \ | ||
| 12 | \ | ||
| 13 | ret = single_open(file, name, NULL); \ | ||
| 14 | sf = file->private_data; \ | ||
| 15 | sf->private = inode->i_private; \ | ||
| 16 | return ret; \ | ||
| 17 | } \ | ||
| 18 | \ | ||
| 19 | static const struct file_operations name##_fops = { \ | ||
| 20 | .open = name##_open, \ | ||
| 21 | .read = seq_read, \ | ||
| 22 | .llseek = seq_lseek, \ | ||
| 23 | .release = single_release, \ | ||
| 24 | }; | ||
| 25 | |||
| 26 | /* debugfs.c */ | ||
| 27 | extern int ceph_debugfs_init(void); | ||
| 28 | extern void ceph_debugfs_cleanup(void); | ||
| 29 | extern int ceph_debugfs_client_init(struct ceph_client *client); | ||
| 30 | extern void ceph_debugfs_client_cleanup(struct ceph_client *client); | ||
| 31 | |||
| 32 | #endif | ||
| 33 | |||
diff --git a/fs/ceph/decode.h b/include/linux/ceph/decode.h index 3d25415afe63..c5b6939fb32a 100644 --- a/fs/ceph/decode.h +++ b/include/linux/ceph/decode.h | |||
| @@ -191,6 +191,11 @@ static inline void ceph_encode_string(void **p, void *end, | |||
| 191 | ceph_encode_need(p, end, n, bad); \ | 191 | ceph_encode_need(p, end, n, bad); \ |
| 192 | ceph_encode_copy(p, pv, n); \ | 192 | ceph_encode_copy(p, pv, n); \ |
| 193 | } while (0) | 193 | } while (0) |
| 194 | #define ceph_encode_string_safe(p, end, s, n, bad) \ | ||
| 195 | do { \ | ||
| 196 | ceph_encode_need(p, end, n, bad); \ | ||
| 197 | ceph_encode_string(p, end, s, n); \ | ||
| 198 | } while (0) | ||
| 194 | 199 | ||
| 195 | 200 | ||
| 196 | #endif | 201 | #endif |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h new file mode 100644 index 000000000000..f22b2e941686 --- /dev/null +++ b/include/linux/ceph/libceph.h | |||
| @@ -0,0 +1,249 @@ | |||
| 1 | #ifndef _FS_CEPH_LIBCEPH_H | ||
| 2 | #define _FS_CEPH_LIBCEPH_H | ||
| 3 | |||
| 4 | #include "ceph_debug.h" | ||
| 5 | |||
| 6 | #include <asm/unaligned.h> | ||
| 7 | #include <linux/backing-dev.h> | ||
| 8 | #include <linux/completion.h> | ||
| 9 | #include <linux/exportfs.h> | ||
| 10 | #include <linux/fs.h> | ||
| 11 | #include <linux/mempool.h> | ||
| 12 | #include <linux/pagemap.h> | ||
| 13 | #include <linux/wait.h> | ||
| 14 | #include <linux/writeback.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | |||
| 17 | #include "types.h" | ||
| 18 | #include "messenger.h" | ||
| 19 | #include "msgpool.h" | ||
| 20 | #include "mon_client.h" | ||
| 21 | #include "osd_client.h" | ||
| 22 | #include "ceph_fs.h" | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Supported features | ||
| 26 | */ | ||
| 27 | #define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR | ||
| 28 | #define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR | ||
| 29 | |||
| 30 | /* | ||
| 31 | * mount options | ||
| 32 | */ | ||
| 33 | #define CEPH_OPT_FSID (1<<0) | ||
| 34 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ | ||
| 35 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ | ||
| 36 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ | ||
| 37 | |||
| 38 | #define CEPH_OPT_DEFAULT (0); | ||
| 39 | |||
| 40 | #define ceph_set_opt(client, opt) \ | ||
| 41 | (client)->options->flags |= CEPH_OPT_##opt; | ||
| 42 | #define ceph_test_opt(client, opt) \ | ||
| 43 | (!!((client)->options->flags & CEPH_OPT_##opt)) | ||
| 44 | |||
| 45 | struct ceph_options { | ||
| 46 | int flags; | ||
| 47 | struct ceph_fsid fsid; | ||
| 48 | struct ceph_entity_addr my_addr; | ||
| 49 | int mount_timeout; | ||
| 50 | int osd_idle_ttl; | ||
| 51 | int osd_timeout; | ||
| 52 | int osd_keepalive_timeout; | ||
| 53 | |||
| 54 | /* | ||
| 55 | * any type that can't be simply compared or doesn't need need | ||
| 56 | * to be compared should go beyond this point, | ||
| 57 | * ceph_compare_options() should be updated accordingly | ||
| 58 | */ | ||
| 59 | |||
| 60 | struct ceph_entity_addr *mon_addr; /* should be the first | ||
| 61 | pointer type of args */ | ||
| 62 | int num_mon; | ||
| 63 | char *name; | ||
| 64 | char *secret; | ||
| 65 | }; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * defaults | ||
| 69 | */ | ||
| 70 | #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 | ||
| 71 | #define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ | ||
| 72 | #define CEPH_OSD_KEEPALIVE_DEFAULT 5 | ||
| 73 | #define CEPH_OSD_IDLE_TTL_DEFAULT 60 | ||
| 74 | #define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ | ||
| 75 | |||
| 76 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) | ||
| 77 | #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) | ||
| 78 | |||
| 79 | #define CEPH_AUTH_NAME_DEFAULT "guest" | ||
| 80 | |||
| 81 | /* | ||
| 82 | * Delay telling the MDS we no longer want caps, in case we reopen | ||
| 83 | * the file. Delay a minimum amount of time, even if we send a cap | ||
| 84 | * message for some other reason. Otherwise, take the oppotunity to | ||
| 85 | * update the mds to avoid sending another message later. | ||
| 86 | */ | ||
| 87 | #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ | ||
| 88 | #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ | ||
| 89 | |||
| 90 | #define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4) | ||
| 91 | |||
| 92 | /* mount state */ | ||
| 93 | enum { | ||
| 94 | CEPH_MOUNT_MOUNTING, | ||
| 95 | CEPH_MOUNT_MOUNTED, | ||
| 96 | CEPH_MOUNT_UNMOUNTING, | ||
| 97 | CEPH_MOUNT_UNMOUNTED, | ||
| 98 | CEPH_MOUNT_SHUTDOWN, | ||
| 99 | }; | ||
| 100 | |||
| 101 | /* | ||
| 102 | * subtract jiffies | ||
| 103 | */ | ||
| 104 | static inline unsigned long time_sub(unsigned long a, unsigned long b) | ||
| 105 | { | ||
| 106 | BUG_ON(time_after(b, a)); | ||
| 107 | return (long)a - (long)b; | ||
| 108 | } | ||
| 109 | |||
| 110 | struct ceph_mds_client; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * per client state | ||
| 114 | * | ||
| 115 | * possibly shared by multiple mount points, if they are | ||
| 116 | * mounting the same ceph filesystem/cluster. | ||
| 117 | */ | ||
| 118 | struct ceph_client { | ||
| 119 | struct ceph_fsid fsid; | ||
| 120 | bool have_fsid; | ||
| 121 | |||
| 122 | void *private; | ||
| 123 | |||
| 124 | struct ceph_options *options; | ||
| 125 | |||
| 126 | struct mutex mount_mutex; /* serialize mount attempts */ | ||
| 127 | wait_queue_head_t auth_wq; | ||
| 128 | int auth_err; | ||
| 129 | |||
| 130 | int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); | ||
| 131 | |||
| 132 | u32 supported_features; | ||
| 133 | u32 required_features; | ||
| 134 | |||
| 135 | struct ceph_messenger *msgr; /* messenger instance */ | ||
| 136 | struct ceph_mon_client monc; | ||
| 137 | struct ceph_osd_client osdc; | ||
| 138 | |||
| 139 | #ifdef CONFIG_DEBUG_FS | ||
| 140 | struct dentry *debugfs_dir; | ||
| 141 | struct dentry *debugfs_monmap; | ||
| 142 | struct dentry *debugfs_osdmap; | ||
| 143 | #endif | ||
| 144 | }; | ||
| 145 | |||
| 146 | |||
| 147 | |||
| 148 | /* | ||
| 149 | * snapshots | ||
| 150 | */ | ||
| 151 | |||
| 152 | /* | ||
| 153 | * A "snap context" is the set of existing snapshots when we | ||
| 154 | * write data. It is used by the OSD to guide its COW behavior. | ||
| 155 | * | ||
| 156 | * The ceph_snap_context is refcounted, and attached to each dirty | ||
| 157 | * page, indicating which context the dirty data belonged when it was | ||
| 158 | * dirtied. | ||
| 159 | */ | ||
| 160 | struct ceph_snap_context { | ||
| 161 | atomic_t nref; | ||
| 162 | u64 seq; | ||
| 163 | int num_snaps; | ||
| 164 | u64 snaps[]; | ||
| 165 | }; | ||
| 166 | |||
| 167 | static inline struct ceph_snap_context * | ||
| 168 | ceph_get_snap_context(struct ceph_snap_context *sc) | ||
| 169 | { | ||
| 170 | /* | ||
| 171 | printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
| 172 | atomic_read(&sc->nref)+1); | ||
| 173 | */ | ||
| 174 | if (sc) | ||
| 175 | atomic_inc(&sc->nref); | ||
| 176 | return sc; | ||
| 177 | } | ||
| 178 | |||
| 179 | static inline void ceph_put_snap_context(struct ceph_snap_context *sc) | ||
| 180 | { | ||
| 181 | if (!sc) | ||
| 182 | return; | ||
| 183 | /* | ||
| 184 | printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
| 185 | atomic_read(&sc->nref)-1); | ||
| 186 | */ | ||
| 187 | if (atomic_dec_and_test(&sc->nref)) { | ||
| 188 | /*printk(" deleting snap_context %p\n", sc);*/ | ||
| 189 | kfree(sc); | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | /* | ||
| 194 | * calculate the number of pages a given length and offset map onto, | ||
| 195 | * if we align the data. | ||
| 196 | */ | ||
| 197 | static inline int calc_pages_for(u64 off, u64 len) | ||
| 198 | { | ||
| 199 | return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - | ||
| 200 | (off >> PAGE_CACHE_SHIFT); | ||
| 201 | } | ||
| 202 | |||
| 203 | /* ceph_common.c */ | ||
| 204 | extern const char *ceph_msg_type_name(int type); | ||
| 205 | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | ||
| 206 | extern struct kmem_cache *ceph_inode_cachep; | ||
| 207 | extern struct kmem_cache *ceph_cap_cachep; | ||
| 208 | extern struct kmem_cache *ceph_dentry_cachep; | ||
| 209 | extern struct kmem_cache *ceph_file_cachep; | ||
| 210 | |||
| 211 | extern int ceph_parse_options(struct ceph_options **popt, char *options, | ||
| 212 | const char *dev_name, const char *dev_name_end, | ||
| 213 | int (*parse_extra_token)(char *c, void *private), | ||
| 214 | void *private); | ||
| 215 | extern void ceph_destroy_options(struct ceph_options *opt); | ||
| 216 | extern int ceph_compare_options(struct ceph_options *new_opt, | ||
| 217 | struct ceph_client *client); | ||
| 218 | extern struct ceph_client *ceph_create_client(struct ceph_options *opt, | ||
| 219 | void *private); | ||
| 220 | extern u64 ceph_client_id(struct ceph_client *client); | ||
| 221 | extern void ceph_destroy_client(struct ceph_client *client); | ||
| 222 | extern int __ceph_open_session(struct ceph_client *client, | ||
| 223 | unsigned long started); | ||
| 224 | extern int ceph_open_session(struct ceph_client *client); | ||
| 225 | |||
| 226 | /* pagevec.c */ | ||
| 227 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
| 228 | |||
| 229 | extern struct page **ceph_get_direct_page_vector(const char __user *data, | ||
| 230 | int num_pages, | ||
| 231 | loff_t off, size_t len); | ||
| 232 | extern void ceph_put_page_vector(struct page **pages, int num_pages); | ||
| 233 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
| 234 | extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); | ||
| 235 | extern int ceph_copy_user_to_page_vector(struct page **pages, | ||
| 236 | const char __user *data, | ||
| 237 | loff_t off, size_t len); | ||
| 238 | extern int ceph_copy_to_page_vector(struct page **pages, | ||
| 239 | const char *data, | ||
| 240 | loff_t off, size_t len); | ||
| 241 | extern int ceph_copy_from_page_vector(struct page **pages, | ||
| 242 | char *data, | ||
| 243 | loff_t off, size_t len); | ||
| 244 | extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data, | ||
| 245 | loff_t off, size_t len); | ||
| 246 | extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); | ||
| 247 | |||
| 248 | |||
| 249 | #endif /* _FS_CEPH_SUPER_H */ | ||
diff --git a/fs/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 4c5cb0880bba..4c5cb0880bba 100644 --- a/fs/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h | |||
diff --git a/fs/ceph/messenger.h b/include/linux/ceph/messenger.h index 76fbc957bc13..5956d62c3057 100644 --- a/fs/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -65,6 +65,9 @@ struct ceph_messenger { | |||
| 65 | */ | 65 | */ |
| 66 | u32 global_seq; | 66 | u32 global_seq; |
| 67 | spinlock_t global_seq_lock; | 67 | spinlock_t global_seq_lock; |
| 68 | |||
| 69 | u32 supported_features; | ||
| 70 | u32 required_features; | ||
| 68 | }; | 71 | }; |
| 69 | 72 | ||
| 70 | /* | 73 | /* |
| @@ -82,6 +85,10 @@ struct ceph_msg { | |||
| 82 | struct ceph_pagelist *pagelist; /* instead of pages */ | 85 | struct ceph_pagelist *pagelist; /* instead of pages */ |
| 83 | struct list_head list_head; | 86 | struct list_head list_head; |
| 84 | struct kref kref; | 87 | struct kref kref; |
| 88 | struct bio *bio; /* instead of pages/pagelist */ | ||
| 89 | struct bio *bio_iter; /* bio iterator */ | ||
| 90 | int bio_seg; /* current bio segment */ | ||
| 91 | struct ceph_pagelist *trail; /* the trailing part of the data */ | ||
| 85 | bool front_is_vmalloc; | 92 | bool front_is_vmalloc; |
| 86 | bool more_to_follow; | 93 | bool more_to_follow; |
| 87 | bool needs_out_seq; | 94 | bool needs_out_seq; |
| @@ -205,7 +212,7 @@ struct ceph_connection { | |||
| 205 | }; | 212 | }; |
| 206 | 213 | ||
| 207 | 214 | ||
| 208 | extern const char *pr_addr(const struct sockaddr_storage *ss); | 215 | extern const char *ceph_pr_addr(const struct sockaddr_storage *ss); |
| 209 | extern int ceph_parse_ips(const char *c, const char *end, | 216 | extern int ceph_parse_ips(const char *c, const char *end, |
| 210 | struct ceph_entity_addr *addr, | 217 | struct ceph_entity_addr *addr, |
| 211 | int max_count, int *count); | 218 | int max_count, int *count); |
| @@ -216,7 +223,8 @@ extern void ceph_msgr_exit(void); | |||
| 216 | extern void ceph_msgr_flush(void); | 223 | extern void ceph_msgr_flush(void); |
| 217 | 224 | ||
| 218 | extern struct ceph_messenger *ceph_messenger_create( | 225 | extern struct ceph_messenger *ceph_messenger_create( |
| 219 | struct ceph_entity_addr *myaddr); | 226 | struct ceph_entity_addr *myaddr, |
| 227 | u32 features, u32 required); | ||
| 220 | extern void ceph_messenger_destroy(struct ceph_messenger *); | 228 | extern void ceph_messenger_destroy(struct ceph_messenger *); |
| 221 | 229 | ||
| 222 | extern void ceph_con_init(struct ceph_messenger *msgr, | 230 | extern void ceph_con_init(struct ceph_messenger *msgr, |
diff --git a/fs/ceph/mon_client.h b/include/linux/ceph/mon_client.h index 8e396f2c0963..545f85917780 100644 --- a/fs/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h | |||
| @@ -79,6 +79,7 @@ struct ceph_mon_client { | |||
| 79 | u64 last_tid; | 79 | u64 last_tid; |
| 80 | 80 | ||
| 81 | /* mds/osd map */ | 81 | /* mds/osd map */ |
| 82 | int want_mdsmap; | ||
| 82 | int want_next_osdmap; /* 1 = want, 2 = want+asked */ | 83 | int want_next_osdmap; /* 1 = want, 2 = want+asked */ |
| 83 | u32 have_osdmap, have_mdsmap; | 84 | u32 have_osdmap, have_mdsmap; |
| 84 | 85 | ||
diff --git a/fs/ceph/msgpool.h b/include/linux/ceph/msgpool.h index a362605f9368..a362605f9368 100644 --- a/fs/ceph/msgpool.h +++ b/include/linux/ceph/msgpool.h | |||
diff --git a/fs/ceph/msgr.h b/include/linux/ceph/msgr.h index 680d3d648cac..680d3d648cac 100644 --- a/fs/ceph/msgr.h +++ b/include/linux/ceph/msgr.h | |||
diff --git a/fs/ceph/osd_client.h b/include/linux/ceph/osd_client.h index ce776989ef6a..6c91fb032c39 100644 --- a/fs/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -15,6 +15,7 @@ struct ceph_snap_context; | |||
| 15 | struct ceph_osd_request; | 15 | struct ceph_osd_request; |
| 16 | struct ceph_osd_client; | 16 | struct ceph_osd_client; |
| 17 | struct ceph_authorizer; | 17 | struct ceph_authorizer; |
| 18 | struct ceph_pagelist; | ||
| 18 | 19 | ||
| 19 | /* | 20 | /* |
| 20 | * completion callback for async writepages | 21 | * completion callback for async writepages |
| @@ -68,6 +69,7 @@ struct ceph_osd_request { | |||
| 68 | struct list_head r_unsafe_item; | 69 | struct list_head r_unsafe_item; |
| 69 | 70 | ||
| 70 | struct inode *r_inode; /* for use by callbacks */ | 71 | struct inode *r_inode; /* for use by callbacks */ |
| 72 | void *r_priv; /* ditto */ | ||
| 71 | 73 | ||
| 72 | char r_oid[40]; /* object name */ | 74 | char r_oid[40]; /* object name */ |
| 73 | int r_oid_len; | 75 | int r_oid_len; |
| @@ -80,6 +82,11 @@ struct ceph_osd_request { | |||
| 80 | struct page **r_pages; /* pages for data payload */ | 82 | struct page **r_pages; /* pages for data payload */ |
| 81 | int r_pages_from_pool; | 83 | int r_pages_from_pool; |
| 82 | int r_own_pages; /* if true, i own page list */ | 84 | int r_own_pages; /* if true, i own page list */ |
| 85 | #ifdef CONFIG_BLOCK | ||
| 86 | struct bio *r_bio; /* instead of pages */ | ||
| 87 | #endif | ||
| 88 | |||
| 89 | struct ceph_pagelist *r_trail; /* trailing part of the data */ | ||
| 83 | }; | 90 | }; |
| 84 | 91 | ||
| 85 | struct ceph_osd_client { | 92 | struct ceph_osd_client { |
| @@ -110,6 +117,42 @@ struct ceph_osd_client { | |||
| 110 | struct ceph_msgpool msgpool_op_reply; | 117 | struct ceph_msgpool msgpool_op_reply; |
| 111 | }; | 118 | }; |
| 112 | 119 | ||
| 120 | struct ceph_osd_req_op { | ||
| 121 | u16 op; /* CEPH_OSD_OP_* */ | ||
| 122 | u32 flags; /* CEPH_OSD_FLAG_* */ | ||
| 123 | union { | ||
| 124 | struct { | ||
| 125 | u64 offset, length; | ||
| 126 | u64 truncate_size; | ||
| 127 | u32 truncate_seq; | ||
| 128 | } extent; | ||
| 129 | struct { | ||
| 130 | const char *name; | ||
| 131 | u32 name_len; | ||
| 132 | const char *val; | ||
| 133 | u32 value_len; | ||
| 134 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | ||
| 135 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | ||
| 136 | } xattr; | ||
| 137 | struct { | ||
| 138 | const char *class_name; | ||
| 139 | __u8 class_len; | ||
| 140 | const char *method_name; | ||
| 141 | __u8 method_len; | ||
| 142 | __u8 argc; | ||
| 143 | const char *indata; | ||
| 144 | u32 indata_len; | ||
| 145 | } cls; | ||
| 146 | struct { | ||
| 147 | u64 cookie, count; | ||
| 148 | } pgls; | ||
| 149 | struct { | ||
| 150 | u64 snapid; | ||
| 151 | } snap; | ||
| 152 | }; | ||
| 153 | u32 payload_len; | ||
| 154 | }; | ||
| 155 | |||
| 113 | extern int ceph_osdc_init(struct ceph_osd_client *osdc, | 156 | extern int ceph_osdc_init(struct ceph_osd_client *osdc, |
| 114 | struct ceph_client *client); | 157 | struct ceph_client *client); |
| 115 | extern void ceph_osdc_stop(struct ceph_osd_client *osdc); | 158 | extern void ceph_osdc_stop(struct ceph_osd_client *osdc); |
| @@ -119,6 +162,30 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, | |||
| 119 | extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, | 162 | extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, |
| 120 | struct ceph_msg *msg); | 163 | struct ceph_msg *msg); |
| 121 | 164 | ||
| 165 | extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | ||
| 166 | struct ceph_file_layout *layout, | ||
| 167 | u64 snapid, | ||
| 168 | u64 off, u64 *plen, u64 *bno, | ||
| 169 | struct ceph_osd_request *req, | ||
| 170 | struct ceph_osd_req_op *op); | ||
| 171 | |||
| 172 | extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | ||
| 173 | int flags, | ||
| 174 | struct ceph_snap_context *snapc, | ||
| 175 | struct ceph_osd_req_op *ops, | ||
| 176 | bool use_mempool, | ||
| 177 | gfp_t gfp_flags, | ||
| 178 | struct page **pages, | ||
| 179 | struct bio *bio); | ||
| 180 | |||
| 181 | extern void ceph_osdc_build_request(struct ceph_osd_request *req, | ||
| 182 | u64 off, u64 *plen, | ||
| 183 | struct ceph_osd_req_op *src_ops, | ||
| 184 | struct ceph_snap_context *snapc, | ||
| 185 | struct timespec *mtime, | ||
| 186 | const char *oid, | ||
| 187 | int oid_len); | ||
| 188 | |||
| 122 | extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, | 189 | extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, |
| 123 | struct ceph_file_layout *layout, | 190 | struct ceph_file_layout *layout, |
| 124 | struct ceph_vino vino, | 191 | struct ceph_vino vino, |
diff --git a/fs/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 970b547e510d..ba4c205cbb01 100644 --- a/fs/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #include <linux/rbtree.h> | 4 | #include <linux/rbtree.h> |
| 5 | #include "types.h" | 5 | #include "types.h" |
| 6 | #include "ceph_fs.h" | 6 | #include "ceph_fs.h" |
| 7 | #include "crush/crush.h" | 7 | #include <linux/crush/crush.h> |
| 8 | 8 | ||
| 9 | /* | 9 | /* |
| 10 | * The osd map describes the current membership of the osd cluster and | 10 | * The osd map describes the current membership of the osd cluster and |
| @@ -125,4 +125,6 @@ extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
| 125 | extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, | 125 | extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, |
| 126 | struct ceph_pg pgid); | 126 | struct ceph_pg pgid); |
| 127 | 127 | ||
| 128 | extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); | ||
| 129 | |||
| 128 | #endif | 130 | #endif |
diff --git a/fs/ceph/pagelist.h b/include/linux/ceph/pagelist.h index e8a4187e1087..9660d6b0a35d 100644 --- a/fs/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h | |||
| @@ -8,6 +8,14 @@ struct ceph_pagelist { | |||
| 8 | void *mapped_tail; | 8 | void *mapped_tail; |
| 9 | size_t length; | 9 | size_t length; |
| 10 | size_t room; | 10 | size_t room; |
| 11 | struct list_head free_list; | ||
| 12 | size_t num_pages_free; | ||
| 13 | }; | ||
| 14 | |||
| 15 | struct ceph_pagelist_cursor { | ||
| 16 | struct ceph_pagelist *pl; /* pagelist, for error checking */ | ||
| 17 | struct list_head *page_lru; /* page in list */ | ||
| 18 | size_t room; /* room remaining to reset to */ | ||
| 11 | }; | 19 | }; |
| 12 | 20 | ||
| 13 | static inline void ceph_pagelist_init(struct ceph_pagelist *pl) | 21 | static inline void ceph_pagelist_init(struct ceph_pagelist *pl) |
| @@ -16,10 +24,23 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl) | |||
| 16 | pl->mapped_tail = NULL; | 24 | pl->mapped_tail = NULL; |
| 17 | pl->length = 0; | 25 | pl->length = 0; |
| 18 | pl->room = 0; | 26 | pl->room = 0; |
| 27 | INIT_LIST_HEAD(&pl->free_list); | ||
| 28 | pl->num_pages_free = 0; | ||
| 19 | } | 29 | } |
| 30 | |||
| 20 | extern int ceph_pagelist_release(struct ceph_pagelist *pl); | 31 | extern int ceph_pagelist_release(struct ceph_pagelist *pl); |
| 21 | 32 | ||
| 22 | extern int ceph_pagelist_append(struct ceph_pagelist *pl, void *d, size_t l); | 33 | extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); |
| 34 | |||
| 35 | extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); | ||
| 36 | |||
| 37 | extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); | ||
| 38 | |||
| 39 | extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, | ||
| 40 | struct ceph_pagelist_cursor *c); | ||
| 41 | |||
| 42 | extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, | ||
| 43 | struct ceph_pagelist_cursor *c); | ||
| 23 | 44 | ||
| 24 | static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) | 45 | static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) |
| 25 | { | 46 | { |
diff --git a/fs/ceph/rados.h b/include/linux/ceph/rados.h index 6d5247f2e81b..6d5247f2e81b 100644 --- a/fs/ceph/rados.h +++ b/include/linux/ceph/rados.h | |||
diff --git a/fs/ceph/types.h b/include/linux/ceph/types.h index 28b35a005ec2..28b35a005ec2 100644 --- a/fs/ceph/types.h +++ b/include/linux/ceph/types.h | |||
diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 8ba66a9d9022..ba4b85a6d9b8 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h | |||
| @@ -9,37 +9,7 @@ | |||
| 9 | * These are the only things you should do on a core-file: use only these | 9 | * These are the only things you should do on a core-file: use only these |
| 10 | * functions to write out all the necessary info. | 10 | * functions to write out all the necessary info. |
| 11 | */ | 11 | */ |
| 12 | static inline int dump_write(struct file *file, const void *addr, int nr) | 12 | extern int dump_write(struct file *file, const void *addr, int nr); |
| 13 | { | 13 | extern int dump_seek(struct file *file, loff_t off); |
| 14 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | ||
| 15 | } | ||
| 16 | |||
| 17 | static inline int dump_seek(struct file *file, loff_t off) | ||
| 18 | { | ||
| 19 | int ret = 1; | ||
| 20 | |||
| 21 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
| 22 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | ||
| 23 | return 0; | ||
| 24 | } else { | ||
| 25 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); | ||
| 26 | |||
| 27 | if (!buf) | ||
| 28 | return 0; | ||
| 29 | while (off > 0) { | ||
| 30 | unsigned long n = off; | ||
| 31 | |||
| 32 | if (n > PAGE_SIZE) | ||
| 33 | n = PAGE_SIZE; | ||
| 34 | if (!dump_write(file, buf, n)) { | ||
| 35 | ret = 0; | ||
| 36 | break; | ||
| 37 | } | ||
| 38 | off -= n; | ||
| 39 | } | ||
| 40 | free_page((unsigned long)buf); | ||
| 41 | } | ||
| 42 | return ret; | ||
| 43 | } | ||
| 44 | 14 | ||
| 45 | #endif /* _LINUX_COREDUMP_H */ | 15 | #endif /* _LINUX_COREDUMP_H */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 36ca9721a0c2..1be416bbbb82 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -53,6 +53,7 @@ struct cpuidle_state { | |||
| 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ | 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ |
| 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ | 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ |
| 55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ | 55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ |
| 56 | #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ | ||
| 56 | 57 | ||
| 57 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | 58 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
| 58 | 59 | ||
diff --git a/fs/ceph/crush/crush.h b/include/linux/crush/crush.h index 97e435b191f4..97e435b191f4 100644 --- a/fs/ceph/crush/crush.h +++ b/include/linux/crush/crush.h | |||
diff --git a/fs/ceph/crush/hash.h b/include/linux/crush/hash.h index 91e884230d5d..91e884230d5d 100644 --- a/fs/ceph/crush/hash.h +++ b/include/linux/crush/hash.h | |||
diff --git a/fs/ceph/crush/mapper.h b/include/linux/crush/mapper.h index c46b99c18bb0..c46b99c18bb0 100644 --- a/fs/ceph/crush/mapper.h +++ b/include/linux/crush/mapper.h | |||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c61d4ca27bcc..e2106495cc11 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | |||
| 548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | 548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; |
| 549 | } | 549 | } |
| 550 | 550 | ||
| 551 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | 551 | static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) |
| 552 | { | 552 | { |
| 553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | 553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; |
| 554 | } | 554 | } |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 926b50322a46..4fd978e7eb83 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -93,6 +93,7 @@ struct elevator_queue | |||
| 93 | struct elevator_type *elevator_type; | 93 | struct elevator_type *elevator_type; |
| 94 | struct mutex sysfs_lock; | 94 | struct mutex sysfs_lock; |
| 95 | struct hlist_head *hash; | 95 | struct hlist_head *hash; |
| 96 | unsigned int registered:1; | ||
| 96 | }; | 97 | }; |
| 97 | 98 | ||
| 98 | /* | 99 | /* |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2b0a35e6bc69..1759ba5adce8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -58,7 +58,18 @@ extern const char linux_proc_banner[]; | |||
| 58 | 58 | ||
| 59 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 59 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
| 60 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | 60 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
| 61 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) | 61 | #define roundup(x, y) ( \ |
| 62 | { \ | ||
| 63 | typeof(y) __y = y; \ | ||
| 64 | (((x) + (__y - 1)) / __y) * __y; \ | ||
| 65 | } \ | ||
| 66 | ) | ||
| 67 | #define rounddown(x, y) ( \ | ||
| 68 | { \ | ||
| 69 | typeof(x) __x = (x); \ | ||
| 70 | __x - (__x % (y)); \ | ||
| 71 | } \ | ||
| 72 | ) | ||
| 62 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 73 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
| 63 | { \ | 74 | { \ |
| 64 | typeof(divisor) __divisor = divisor; \ | 75 | typeof(divisor) __divisor = divisor; \ |
diff --git a/include/linux/module.h b/include/linux/module.h index 8a6b9fdc7ffa..aace066bad8f 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -686,17 +686,16 @@ extern int module_sysfs_initialized; | |||
| 686 | 686 | ||
| 687 | 687 | ||
| 688 | #ifdef CONFIG_GENERIC_BUG | 688 | #ifdef CONFIG_GENERIC_BUG |
| 689 | int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, | 689 | void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, |
| 690 | struct module *); | 690 | struct module *); |
| 691 | void module_bug_cleanup(struct module *); | 691 | void module_bug_cleanup(struct module *); |
| 692 | 692 | ||
| 693 | #else /* !CONFIG_GENERIC_BUG */ | 693 | #else /* !CONFIG_GENERIC_BUG */ |
| 694 | 694 | ||
| 695 | static inline int module_bug_finalize(const Elf_Ehdr *hdr, | 695 | static inline void module_bug_finalize(const Elf_Ehdr *hdr, |
| 696 | const Elf_Shdr *sechdrs, | 696 | const Elf_Shdr *sechdrs, |
| 697 | struct module *mod) | 697 | struct module *mod) |
| 698 | { | 698 | { |
| 699 | return 0; | ||
| 700 | } | 699 | } |
| 701 | static inline void module_bug_cleanup(struct module *mod) {} | 700 | static inline void module_bug_cleanup(struct module *mod) {} |
| 702 | #endif /* CONFIG_GENERIC_BUG */ | 701 | #endif /* CONFIG_GENERIC_BUG */ |
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 9ed534c991b9..70cd0603911c 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h | |||
| @@ -39,8 +39,9 @@ enum ctattr_type { | |||
| 39 | CTA_TUPLE_MASTER, | 39 | CTA_TUPLE_MASTER, |
| 40 | CTA_NAT_SEQ_ADJ_ORIG, | 40 | CTA_NAT_SEQ_ADJ_ORIG, |
| 41 | CTA_NAT_SEQ_ADJ_REPLY, | 41 | CTA_NAT_SEQ_ADJ_REPLY, |
| 42 | CTA_SECMARK, | 42 | CTA_SECMARK, /* obsolete */ |
| 43 | CTA_ZONE, | 43 | CTA_ZONE, |
| 44 | CTA_SECCTX, | ||
| 44 | __CTA_MAX | 45 | __CTA_MAX |
| 45 | }; | 46 | }; |
| 46 | #define CTA_MAX (__CTA_MAX - 1) | 47 | #define CTA_MAX (__CTA_MAX - 1) |
| @@ -172,4 +173,11 @@ enum ctattr_help { | |||
| 172 | }; | 173 | }; |
| 173 | #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) | 174 | #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) |
| 174 | 175 | ||
| 176 | enum ctattr_secctx { | ||
| 177 | CTA_SECCTX_UNSPEC, | ||
| 178 | CTA_SECCTX_NAME, | ||
| 179 | __CTA_SECCTX_MAX | ||
| 180 | }; | ||
| 181 | #define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1) | ||
| 182 | |||
| 175 | #endif /* _IPCONNTRACK_NETLINK_H */ | 183 | #endif /* _IPCONNTRACK_NETLINK_H */ |
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h index 6fcd3448b186..989092bd6274 100644 --- a/include/linux/netfilter/xt_SECMARK.h +++ b/include/linux/netfilter/xt_SECMARK.h | |||
| @@ -11,18 +11,12 @@ | |||
| 11 | * packets are being marked for. | 11 | * packets are being marked for. |
| 12 | */ | 12 | */ |
| 13 | #define SECMARK_MODE_SEL 0x01 /* SELinux */ | 13 | #define SECMARK_MODE_SEL 0x01 /* SELinux */ |
| 14 | #define SECMARK_SELCTX_MAX 256 | 14 | #define SECMARK_SECCTX_MAX 256 |
| 15 | |||
| 16 | struct xt_secmark_target_selinux_info { | ||
| 17 | __u32 selsid; | ||
| 18 | char selctx[SECMARK_SELCTX_MAX]; | ||
| 19 | }; | ||
| 20 | 15 | ||
| 21 | struct xt_secmark_target_info { | 16 | struct xt_secmark_target_info { |
| 22 | __u8 mode; | 17 | __u8 mode; |
| 23 | union { | 18 | __u32 secid; |
| 24 | struct xt_secmark_target_selinux_info sel; | 19 | char secctx[SECMARK_SECCTX_MAX]; |
| 25 | } u; | ||
| 26 | }; | 20 | }; |
| 27 | 21 | ||
| 28 | #endif /*_XT_SECMARK_H_target */ | 22 | #endif /*_XT_SECMARK_H_target */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9fbc54a2585d..83af1f8d8b74 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -454,7 +454,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 454 | * Makes rcu_dereference_check() do the dirty work. | 454 | * Makes rcu_dereference_check() do the dirty work. |
| 455 | */ | 455 | */ |
| 456 | #define rcu_dereference_bh(p) \ | 456 | #define rcu_dereference_bh(p) \ |
| 457 | rcu_dereference_check(p, rcu_read_lock_bh_held()) | 457 | rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) |
| 458 | 458 | ||
| 459 | /** | 459 | /** |
| 460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched | 460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched |
diff --git a/include/linux/security.h b/include/linux/security.h index a22219afff09..b8246a8df7d2 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot, | |||
| 74 | extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); | 74 | extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); |
| 75 | extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | 75 | extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, |
| 76 | unsigned long arg4, unsigned long arg5); | 76 | unsigned long arg4, unsigned long arg5); |
| 77 | extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); | 77 | extern int cap_task_setscheduler(struct task_struct *p); |
| 78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); | 78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); |
| 79 | extern int cap_task_setnice(struct task_struct *p, int nice); | 79 | extern int cap_task_setnice(struct task_struct *p, int nice); |
| 80 | extern int cap_syslog(int type, bool from_file); | 80 | extern int cap_syslog(int type, bool from_file); |
| @@ -959,6 +959,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
| 959 | * Sets the new child socket's sid to the openreq sid. | 959 | * Sets the new child socket's sid to the openreq sid. |
| 960 | * @inet_conn_established: | 960 | * @inet_conn_established: |
| 961 | * Sets the connection's peersid to the secmark on skb. | 961 | * Sets the connection's peersid to the secmark on skb. |
| 962 | * @secmark_relabel_packet: | ||
| 963 | * check if the process should be allowed to relabel packets to the given secid | ||
| 964 | * @security_secmark_refcount_inc | ||
| 965 | * tells the LSM to increment the number of secmark labeling rules loaded | ||
| 966 | * @security_secmark_refcount_dec | ||
| 967 | * tells the LSM to decrement the number of secmark labeling rules loaded | ||
| 962 | * @req_classify_flow: | 968 | * @req_classify_flow: |
| 963 | * Sets the flow's sid to the openreq sid. | 969 | * Sets the flow's sid to the openreq sid. |
| 964 | * @tun_dev_create: | 970 | * @tun_dev_create: |
| @@ -1279,9 +1285,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
| 1279 | * Return 0 if permission is granted. | 1285 | * Return 0 if permission is granted. |
| 1280 | * | 1286 | * |
| 1281 | * @secid_to_secctx: | 1287 | * @secid_to_secctx: |
| 1282 | * Convert secid to security context. | 1288 | * Convert secid to security context. If secdata is NULL the length of |
| 1289 | * the result will be returned in seclen, but no secdata will be returned. | ||
| 1290 | * This does mean that the length could change between calls to check the | ||
| 1291 | * length and the next call which actually allocates and returns the secdata. | ||
| 1283 | * @secid contains the security ID. | 1292 | * @secid contains the security ID. |
| 1284 | * @secdata contains the pointer that stores the converted security context. | 1293 | * @secdata contains the pointer that stores the converted security context. |
| 1294 | * @seclen pointer which contains the length of the data | ||
| 1285 | * @secctx_to_secid: | 1295 | * @secctx_to_secid: |
| 1286 | * Convert security context to secid. | 1296 | * Convert security context to secid. |
| 1287 | * @secid contains the pointer to the generated security ID. | 1297 | * @secid contains the pointer to the generated security ID. |
| @@ -1501,8 +1511,7 @@ struct security_operations { | |||
| 1501 | int (*task_getioprio) (struct task_struct *p); | 1511 | int (*task_getioprio) (struct task_struct *p); |
| 1502 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, | 1512 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, |
| 1503 | struct rlimit *new_rlim); | 1513 | struct rlimit *new_rlim); |
| 1504 | int (*task_setscheduler) (struct task_struct *p, int policy, | 1514 | int (*task_setscheduler) (struct task_struct *p); |
| 1505 | struct sched_param *lp); | ||
| 1506 | int (*task_getscheduler) (struct task_struct *p); | 1515 | int (*task_getscheduler) (struct task_struct *p); |
| 1507 | int (*task_movememory) (struct task_struct *p); | 1516 | int (*task_movememory) (struct task_struct *p); |
| 1508 | int (*task_kill) (struct task_struct *p, | 1517 | int (*task_kill) (struct task_struct *p, |
| @@ -1594,6 +1603,9 @@ struct security_operations { | |||
| 1594 | struct request_sock *req); | 1603 | struct request_sock *req); |
| 1595 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); | 1604 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); |
| 1596 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); | 1605 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); |
| 1606 | int (*secmark_relabel_packet) (u32 secid); | ||
| 1607 | void (*secmark_refcount_inc) (void); | ||
| 1608 | void (*secmark_refcount_dec) (void); | ||
| 1597 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); | 1609 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); |
| 1598 | int (*tun_dev_create)(void); | 1610 | int (*tun_dev_create)(void); |
| 1599 | void (*tun_dev_post_create)(struct sock *sk); | 1611 | void (*tun_dev_post_create)(struct sock *sk); |
| @@ -1752,8 +1764,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio); | |||
| 1752 | int security_task_getioprio(struct task_struct *p); | 1764 | int security_task_getioprio(struct task_struct *p); |
| 1753 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, | 1765 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, |
| 1754 | struct rlimit *new_rlim); | 1766 | struct rlimit *new_rlim); |
| 1755 | int security_task_setscheduler(struct task_struct *p, | 1767 | int security_task_setscheduler(struct task_struct *p); |
| 1756 | int policy, struct sched_param *lp); | ||
| 1757 | int security_task_getscheduler(struct task_struct *p); | 1768 | int security_task_getscheduler(struct task_struct *p); |
| 1758 | int security_task_movememory(struct task_struct *p); | 1769 | int security_task_movememory(struct task_struct *p); |
| 1759 | int security_task_kill(struct task_struct *p, struct siginfo *info, | 1770 | int security_task_kill(struct task_struct *p, struct siginfo *info, |
| @@ -2320,11 +2331,9 @@ static inline int security_task_setrlimit(struct task_struct *p, | |||
| 2320 | return 0; | 2331 | return 0; |
| 2321 | } | 2332 | } |
| 2322 | 2333 | ||
| 2323 | static inline int security_task_setscheduler(struct task_struct *p, | 2334 | static inline int security_task_setscheduler(struct task_struct *p) |
| 2324 | int policy, | ||
| 2325 | struct sched_param *lp) | ||
| 2326 | { | 2335 | { |
| 2327 | return cap_task_setscheduler(p, policy, lp); | 2336 | return cap_task_setscheduler(p); |
| 2328 | } | 2337 | } |
| 2329 | 2338 | ||
| 2330 | static inline int security_task_getscheduler(struct task_struct *p) | 2339 | static inline int security_task_getscheduler(struct task_struct *p) |
| @@ -2551,6 +2560,9 @@ void security_inet_csk_clone(struct sock *newsk, | |||
| 2551 | const struct request_sock *req); | 2560 | const struct request_sock *req); |
| 2552 | void security_inet_conn_established(struct sock *sk, | 2561 | void security_inet_conn_established(struct sock *sk, |
| 2553 | struct sk_buff *skb); | 2562 | struct sk_buff *skb); |
| 2563 | int security_secmark_relabel_packet(u32 secid); | ||
| 2564 | void security_secmark_refcount_inc(void); | ||
| 2565 | void security_secmark_refcount_dec(void); | ||
| 2554 | int security_tun_dev_create(void); | 2566 | int security_tun_dev_create(void); |
| 2555 | void security_tun_dev_post_create(struct sock *sk); | 2567 | void security_tun_dev_post_create(struct sock *sk); |
| 2556 | int security_tun_dev_attach(struct sock *sk); | 2568 | int security_tun_dev_attach(struct sock *sk); |
| @@ -2705,6 +2717,19 @@ static inline void security_inet_conn_established(struct sock *sk, | |||
| 2705 | { | 2717 | { |
| 2706 | } | 2718 | } |
| 2707 | 2719 | ||
| 2720 | static inline int security_secmark_relabel_packet(u32 secid) | ||
| 2721 | { | ||
| 2722 | return 0; | ||
| 2723 | } | ||
| 2724 | |||
| 2725 | static inline void security_secmark_refcount_inc(void) | ||
| 2726 | { | ||
| 2727 | } | ||
| 2728 | |||
| 2729 | static inline void security_secmark_refcount_dec(void) | ||
| 2730 | { | ||
| 2731 | } | ||
| 2732 | |||
| 2708 | static inline int security_tun_dev_create(void) | 2733 | static inline int security_tun_dev_create(void) |
| 2709 | { | 2734 | { |
| 2710 | return 0; | 2735 | return 0; |
diff --git a/include/linux/selinux.h b/include/linux/selinux.h index 82e0f26a1299..44f459612690 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h | |||
| @@ -21,74 +21,11 @@ struct kern_ipc_perm; | |||
| 21 | #ifdef CONFIG_SECURITY_SELINUX | 21 | #ifdef CONFIG_SECURITY_SELINUX |
| 22 | 22 | ||
| 23 | /** | 23 | /** |
| 24 | * selinux_string_to_sid - map a security context string to a security ID | ||
| 25 | * @str: the security context string to be mapped | ||
| 26 | * @sid: ID value returned via this. | ||
| 27 | * | ||
| 28 | * Returns 0 if successful, with the SID stored in sid. A value | ||
| 29 | * of zero for sid indicates no SID could be determined (but no error | ||
| 30 | * occurred). | ||
| 31 | */ | ||
| 32 | int selinux_string_to_sid(char *str, u32 *sid); | ||
| 33 | |||
| 34 | /** | ||
| 35 | * selinux_secmark_relabel_packet_permission - secmark permission check | ||
| 36 | * @sid: SECMARK ID value to be applied to network packet | ||
| 37 | * | ||
| 38 | * Returns 0 if the current task is allowed to set the SECMARK label of | ||
| 39 | * packets with the supplied security ID. Note that it is implicit that | ||
| 40 | * the packet is always being relabeled from the default unlabeled value, | ||
| 41 | * and that the access control decision is made in the AVC. | ||
| 42 | */ | ||
| 43 | int selinux_secmark_relabel_packet_permission(u32 sid); | ||
| 44 | |||
| 45 | /** | ||
| 46 | * selinux_secmark_refcount_inc - increments the secmark use counter | ||
| 47 | * | ||
| 48 | * SELinux keeps track of the current SECMARK targets in use so it knows | ||
| 49 | * when to apply SECMARK label access checks to network packets. This | ||
| 50 | * function incements this reference count to indicate that a new SECMARK | ||
| 51 | * target has been configured. | ||
| 52 | */ | ||
| 53 | void selinux_secmark_refcount_inc(void); | ||
| 54 | |||
| 55 | /** | ||
| 56 | * selinux_secmark_refcount_dec - decrements the secmark use counter | ||
| 57 | * | ||
| 58 | * SELinux keeps track of the current SECMARK targets in use so it knows | ||
| 59 | * when to apply SECMARK label access checks to network packets. This | ||
| 60 | * function decements this reference count to indicate that one of the | ||
| 61 | * existing SECMARK targets has been removed/flushed. | ||
| 62 | */ | ||
| 63 | void selinux_secmark_refcount_dec(void); | ||
| 64 | |||
| 65 | /** | ||
| 66 | * selinux_is_enabled - is SELinux enabled? | 24 | * selinux_is_enabled - is SELinux enabled? |
| 67 | */ | 25 | */ |
| 68 | bool selinux_is_enabled(void); | 26 | bool selinux_is_enabled(void); |
| 69 | #else | 27 | #else |
| 70 | 28 | ||
| 71 | static inline int selinux_string_to_sid(const char *str, u32 *sid) | ||
| 72 | { | ||
| 73 | *sid = 0; | ||
| 74 | return 0; | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline int selinux_secmark_relabel_packet_permission(u32 sid) | ||
| 78 | { | ||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline void selinux_secmark_refcount_inc(void) | ||
| 83 | { | ||
| 84 | return; | ||
| 85 | } | ||
| 86 | |||
| 87 | static inline void selinux_secmark_refcount_dec(void) | ||
| 88 | { | ||
| 89 | return; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline bool selinux_is_enabled(void) | 29 | static inline bool selinux_is_enabled(void) |
| 93 | { | 30 | { |
| 94 | return false; | 31 | return false; |
diff --git a/include/linux/types.h b/include/linux/types.h index 01a082f56ef4..357dbc19606f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -121,7 +121,15 @@ typedef __u64 u_int64_t; | |||
| 121 | typedef __s64 int64_t; | 121 | typedef __s64 int64_t; |
| 122 | #endif | 122 | #endif |
| 123 | 123 | ||
| 124 | /* this is a special 64bit data type that is 8-byte aligned */ | 124 | /* |
| 125 | * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid | ||
| 126 | * common 32/64-bit compat problems. | ||
| 127 | * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other | ||
| 128 | * architectures) and to 8-byte boundaries on 64-bit architetures. The new | ||
| 129 | * aligned_64 type enforces 8-byte alignment so that structs containing | ||
| 130 | * aligned_64 values have the same alignment on 32-bit and 64-bit architectures. | ||
| 131 | * No conversions are necessary between 32-bit user-space and a 64-bit kernel. | ||
| 132 | */ | ||
| 125 | #define aligned_u64 __u64 __attribute__((aligned(8))) | 133 | #define aligned_u64 __u64 __attribute__((aligned(8))) |
| 126 | #define aligned_be64 __be64 __attribute__((aligned(8))) | 134 | #define aligned_be64 __be64 __attribute__((aligned(8))) |
| 127 | #define aligned_le64 __le64 __attribute__((aligned(8))) | 135 | #define aligned_le64 __le64 __attribute__((aligned(8))) |
| @@ -178,6 +186,11 @@ typedef __u64 __bitwise __be64; | |||
| 178 | typedef __u16 __bitwise __sum16; | 186 | typedef __u16 __bitwise __sum16; |
| 179 | typedef __u32 __bitwise __wsum; | 187 | typedef __u32 __bitwise __wsum; |
| 180 | 188 | ||
| 189 | /* this is a special 64bit data type that is 8-byte aligned */ | ||
| 190 | #define __aligned_u64 __u64 __attribute__((aligned(8))) | ||
| 191 | #define __aligned_be64 __be64 __attribute__((aligned(8))) | ||
| 192 | #define __aligned_le64 __le64 __attribute__((aligned(8))) | ||
| 193 | |||
| 181 | #ifdef __KERNEL__ | 194 | #ifdef __KERNEL__ |
| 182 | typedef unsigned __bitwise__ gfp_t; | 195 | typedef unsigned __bitwise__ gfp_t; |
| 183 | typedef unsigned __bitwise__ fmode_t; | 196 | typedef unsigned __bitwise__ fmode_t; |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 0836ccc57121..3efc9f3f43a0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
| 614 | (wait)->private = current; \ | 614 | (wait)->private = current; \ |
| 615 | (wait)->func = autoremove_wake_function; \ | 615 | (wait)->func = autoremove_wake_function; \ |
| 616 | INIT_LIST_HEAD(&(wait)->task_list); \ | 616 | INIT_LIST_HEAD(&(wait)->task_list); \ |
| 617 | (wait)->flags = 0; \ | ||
| 617 | } while (0) | 618 | } while (0) |
| 618 | 619 | ||
| 619 | /** | 620 | /** |
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index 97e07f46a0fa..aa4ebb42a565 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h | |||
| @@ -48,6 +48,7 @@ struct videobuf_dmabuf { | |||
| 48 | 48 | ||
| 49 | /* for userland buffer */ | 49 | /* for userland buffer */ |
| 50 | int offset; | 50 | int offset; |
| 51 | size_t size; | ||
| 51 | struct page **pages; | 52 | struct page **pages; |
| 52 | 53 | ||
| 53 | /* for kernel buffers */ | 54 | /* for kernel buffers */ |
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 27a902d9b3a9..30fce0128dd7 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h | |||
| @@ -161,12 +161,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l | |||
| 161 | { | 161 | { |
| 162 | struct sk_buff *skb; | 162 | struct sk_buff *skb; |
| 163 | 163 | ||
| 164 | release_sock(sk); | ||
| 164 | if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { | 165 | if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { |
| 165 | skb_reserve(skb, BT_SKB_RESERVE); | 166 | skb_reserve(skb, BT_SKB_RESERVE); |
| 166 | bt_cb(skb)->incoming = 0; | 167 | bt_cb(skb)->incoming = 0; |
| 167 | } | 168 | } |
| 169 | lock_sock(sk); | ||
| 170 | |||
| 171 | if (!skb && *err) | ||
| 172 | return NULL; | ||
| 173 | |||
| 174 | *err = sock_error(sk); | ||
| 175 | if (*err) | ||
| 176 | goto out; | ||
| 177 | |||
| 178 | if (sk->sk_shutdown) { | ||
| 179 | *err = -ECONNRESET; | ||
| 180 | goto out; | ||
| 181 | } | ||
| 168 | 182 | ||
| 169 | return skb; | 183 | return skb; |
| 184 | |||
| 185 | out: | ||
| 186 | kfree_skb(skb); | ||
| 187 | return NULL; | ||
| 170 | } | 188 | } |
| 171 | 189 | ||
| 172 | int bt_err(__u16 code); | 190 | int bt_err(__u16 code); |
| @@ -743,6 +743,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, | |||
| 743 | { | 743 | { |
| 744 | struct semid_ds out; | 744 | struct semid_ds out; |
| 745 | 745 | ||
| 746 | memset(&out, 0, sizeof(out)); | ||
| 747 | |||
| 746 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); | 748 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); |
| 747 | 749 | ||
| 748 | out.sem_otime = in->sem_otime; | 750 | out.sem_otime = in->sem_otime; |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b23c0979bbe7..51b143e2a07a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
| 1397 | if (tsk->flags & PF_THREAD_BOUND) | 1397 | if (tsk->flags & PF_THREAD_BOUND) |
| 1398 | return -EINVAL; | 1398 | return -EINVAL; |
| 1399 | 1399 | ||
| 1400 | ret = security_task_setscheduler(tsk, 0, NULL); | 1400 | ret = security_task_setscheduler(tsk); |
| 1401 | if (ret) | 1401 | if (ret) |
| 1402 | return ret; | 1402 | return ret; |
| 1403 | if (threadgroup) { | 1403 | if (threadgroup) { |
| @@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
| 1405 | 1405 | ||
| 1406 | rcu_read_lock(); | 1406 | rcu_read_lock(); |
| 1407 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | 1407 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 1408 | ret = security_task_setscheduler(c, 0, NULL); | 1408 | ret = security_task_setscheduler(c); |
| 1409 | if (ret) { | 1409 | if (ret) { |
| 1410 | rcu_read_unlock(); | 1410 | rcu_read_unlock(); |
| 1411 | return ret; | 1411 | return ret; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1decafbb6b1a..72206cf5c6cf 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -931,6 +931,7 @@ static inline int | |||
| 931 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 931 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
| 932 | { | 932 | { |
| 933 | if (hrtimer_is_queued(timer)) { | 933 | if (hrtimer_is_queued(timer)) { |
| 934 | unsigned long state; | ||
| 934 | int reprogram; | 935 | int reprogram; |
| 935 | 936 | ||
| 936 | /* | 937 | /* |
| @@ -944,8 +945,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
| 944 | debug_deactivate(timer); | 945 | debug_deactivate(timer); |
| 945 | timer_stats_hrtimer_clear_start_info(timer); | 946 | timer_stats_hrtimer_clear_start_info(timer); |
| 946 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 947 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
| 947 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 948 | /* |
| 948 | reprogram); | 949 | * We must preserve the CALLBACK state flag here, |
| 950 | * otherwise we could move the timer base in | ||
| 951 | * switch_hrtimer_base. | ||
| 952 | */ | ||
| 953 | state = timer->state & HRTIMER_STATE_CALLBACK; | ||
| 954 | __remove_hrtimer(timer, base, state, reprogram); | ||
| 949 | return 1; | 955 | return 1; |
| 950 | } | 956 | } |
| 951 | return 0; | 957 | return 0; |
| @@ -1231,6 +1237,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
| 1231 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 1237 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
| 1232 | enqueue_hrtimer(timer, base); | 1238 | enqueue_hrtimer(timer, base); |
| 1233 | } | 1239 | } |
| 1240 | |||
| 1241 | WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); | ||
| 1242 | |||
| 1234 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1243 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
| 1235 | } | 1244 | } |
| 1236 | 1245 | ||
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 6b5580c57644..01a0700e873f 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | |||
| 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); |
| 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); |
| 367 | 367 | ||
| 368 | if (n) | ||
| 369 | sg_mark_end(sgl + n - 1); | ||
| 370 | return n; | 368 | return n; |
| 371 | } | 369 | } |
| 372 | 370 | ||
diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b4..ccd641991842 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1537,6 +1537,7 @@ static int __unlink_module(void *_mod) | |||
| 1537 | { | 1537 | { |
| 1538 | struct module *mod = _mod; | 1538 | struct module *mod = _mod; |
| 1539 | list_del(&mod->list); | 1539 | list_del(&mod->list); |
| 1540 | module_bug_cleanup(mod); | ||
| 1540 | return 0; | 1541 | return 0; |
| 1541 | } | 1542 | } |
| 1542 | 1543 | ||
| @@ -2625,6 +2626,7 @@ static struct module *load_module(void __user *umod, | |||
| 2625 | if (err < 0) | 2626 | if (err < 0) |
| 2626 | goto ddebug; | 2627 | goto ddebug; |
| 2627 | 2628 | ||
| 2629 | module_bug_finalize(info.hdr, info.sechdrs, mod); | ||
| 2628 | list_add_rcu(&mod->list, &modules); | 2630 | list_add_rcu(&mod->list, &modules); |
| 2629 | mutex_unlock(&module_mutex); | 2631 | mutex_unlock(&module_mutex); |
| 2630 | 2632 | ||
| @@ -2650,6 +2652,8 @@ static struct module *load_module(void __user *umod, | |||
| 2650 | mutex_lock(&module_mutex); | 2652 | mutex_lock(&module_mutex); |
| 2651 | /* Unlink carefully: kallsyms could be walking list. */ | 2653 | /* Unlink carefully: kallsyms could be walking list. */ |
| 2652 | list_del_rcu(&mod->list); | 2654 | list_del_rcu(&mod->list); |
| 2655 | module_bug_cleanup(mod); | ||
| 2656 | |||
| 2653 | ddebug: | 2657 | ddebug: |
| 2654 | if (!mod->taints) | 2658 | if (!mod->taints) |
| 2655 | dynamic_debug_remove(info.debug); | 2659 | dynamic_debug_remove(info.debug); |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index db5b56064687..b98bed3d8182 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -2202,15 +2202,13 @@ static void perf_event_for_each(struct perf_event *event, | |||
| 2202 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 2202 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
| 2203 | { | 2203 | { |
| 2204 | struct perf_event_context *ctx = event->ctx; | 2204 | struct perf_event_context *ctx = event->ctx; |
| 2205 | unsigned long size; | ||
| 2206 | int ret = 0; | 2205 | int ret = 0; |
| 2207 | u64 value; | 2206 | u64 value; |
| 2208 | 2207 | ||
| 2209 | if (!event->attr.sample_period) | 2208 | if (!event->attr.sample_period) |
| 2210 | return -EINVAL; | 2209 | return -EINVAL; |
| 2211 | 2210 | ||
| 2212 | size = copy_from_user(&value, arg, sizeof(value)); | 2211 | if (copy_from_user(&value, arg, sizeof(value))) |
| 2213 | if (size != sizeof(value)) | ||
| 2214 | return -EFAULT; | 2212 | return -EFAULT; |
| 2215 | 2213 | ||
| 2216 | if (!value) | 2214 | if (!value) |
diff --git a/kernel/sched.c b/kernel/sched.c index dc85ceb90832..df6579d9b4df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4645,7 +4645,7 @@ recheck: | |||
| 4645 | } | 4645 | } |
| 4646 | 4646 | ||
| 4647 | if (user) { | 4647 | if (user) { |
| 4648 | retval = security_task_setscheduler(p, policy, param); | 4648 | retval = security_task_setscheduler(p); |
| 4649 | if (retval) | 4649 | if (retval) |
| 4650 | return retval; | 4650 | return retval; |
| 4651 | } | 4651 | } |
| @@ -4887,7 +4887,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
| 4887 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 4887 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
| 4888 | goto out_unlock; | 4888 | goto out_unlock; |
| 4889 | 4889 | ||
| 4890 | retval = security_task_setscheduler(p, 0, NULL); | 4890 | retval = security_task_setscheduler(p); |
| 4891 | if (retval) | 4891 | if (retval) |
| 4892 | goto out_unlock; | 4892 | goto out_unlock; |
| 4893 | 4893 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index bded65187780..919562c3d6b7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2215,6 +2215,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
| 2215 | #ifdef __ARCH_SI_TRAPNO | 2215 | #ifdef __ARCH_SI_TRAPNO |
| 2216 | err |= __put_user(from->si_trapno, &to->si_trapno); | 2216 | err |= __put_user(from->si_trapno, &to->si_trapno); |
| 2217 | #endif | 2217 | #endif |
| 2218 | #ifdef BUS_MCEERR_AO | ||
| 2219 | /* | ||
| 2220 | * Other callers might not initialize the si_lsb field, | ||
| 2221 | * so check explicitely for the right codes here. | ||
| 2222 | */ | ||
| 2223 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | ||
| 2224 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | ||
| 2225 | #endif | ||
| 2218 | break; | 2226 | break; |
| 2219 | case __SI_CHLD: | 2227 | case __SI_CHLD: |
| 2220 | err |= __put_user(from->si_pid, &to->si_pid); | 2228 | err |= __put_user(from->si_pid, &to->si_pid); |
diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c715d3..ed6aacfcb7ef 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -365,9 +365,10 @@ call: | |||
| 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
| 366 | 366 | ||
| 367 | /** | 367 | /** |
| 368 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on a specific CPU |
| 369 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
| 370 | * @data: Pre-allocated and setup data structure | 370 | * @data: Pre-allocated and setup data structure |
| 371 | * @wait: If true, wait until function has completed on specified CPU. | ||
| 371 | * | 372 | * |
| 372 | * Like smp_call_function_single(), but allow caller to pass in a | 373 | * Like smp_call_function_single(), but allow caller to pass in a |
| 373 | * pre-allocated data structure. Useful for embedding @data inside | 374 | * pre-allocated data structure. Useful for embedding @data inside |
| @@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); | |||
| 376 | void __smp_call_function_single(int cpu, struct call_single_data *data, | 377 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
| 377 | int wait) | 378 | int wait) |
| 378 | { | 379 | { |
| 379 | csd_lock(data); | 380 | unsigned int this_cpu; |
| 381 | unsigned long flags; | ||
| 380 | 382 | ||
| 383 | this_cpu = get_cpu(); | ||
| 381 | /* | 384 | /* |
| 382 | * Can deadlock when called with interrupts disabled. | 385 | * Can deadlock when called with interrupts disabled. |
| 383 | * We allow cpu's that are not yet online though, as no one else can | 386 | * We allow cpu's that are not yet online though, as no one else can |
| @@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
| 387 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | 390 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() |
| 388 | && !oops_in_progress); | 391 | && !oops_in_progress); |
| 389 | 392 | ||
| 390 | generic_exec_single(cpu, data, wait); | 393 | if (cpu == this_cpu) { |
| 394 | local_irq_save(flags); | ||
| 395 | data->func(data->info); | ||
| 396 | local_irq_restore(flags); | ||
| 397 | } else { | ||
| 398 | csd_lock(data); | ||
| 399 | generic_exec_single(cpu, data, wait); | ||
| 400 | } | ||
| 401 | put_cpu(); | ||
| 391 | } | 402 | } |
| 392 | 403 | ||
| 393 | /** | 404 | /** |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f88552c6d227..3a45c224770f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -2485,7 +2485,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int | |||
| 2485 | kbuf[left] = 0; | 2485 | kbuf[left] = 0; |
| 2486 | } | 2486 | } |
| 2487 | 2487 | ||
| 2488 | for (; left && vleft--; i++, min++, max++, first=0) { | 2488 | for (; left && vleft--; i++, first = 0) { |
| 2489 | unsigned long val; | 2489 | unsigned long val; |
| 2490 | 2490 | ||
| 2491 | if (write) { | 2491 | if (write) { |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 04cdcf72c827..10b90d8a03c4 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
| @@ -143,15 +143,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
| 143 | if (!table->maxlen) | 143 | if (!table->maxlen) |
| 144 | set_fail(&fail, table, "No maxlen"); | 144 | set_fail(&fail, table, "No maxlen"); |
| 145 | } | 145 | } |
| 146 | if ((table->proc_handler == proc_doulongvec_minmax) || | ||
| 147 | (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { | ||
| 148 | if (table->maxlen > sizeof (unsigned long)) { | ||
| 149 | if (!table->extra1) | ||
| 150 | set_fail(&fail, table, "No min"); | ||
| 151 | if (!table->extra2) | ||
| 152 | set_fail(&fail, table, "No max"); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | #ifdef CONFIG_PROC_SYSCTL | 146 | #ifdef CONFIG_PROC_SYSCTL |
| 156 | if (table->procname && !table->proc_handler) | 147 | if (table->procname && !table->proc_handler) |
| 157 | set_fail(&fail, table, "No proc_handler"); | 148 | set_fail(&fail, table, "No proc_handler"); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 492197e2f86c..bca96377fd4e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -405,7 +405,7 @@ static inline int test_time_stamp(u64 delta) | |||
| 405 | #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) | 405 | #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) |
| 406 | 406 | ||
| 407 | /* Max number of timestamps that can fit on a page */ | 407 | /* Max number of timestamps that can fit on a page */ |
| 408 | #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP) | 408 | #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND) |
| 409 | 409 | ||
| 410 | int ring_buffer_print_page_header(struct trace_seq *s) | 410 | int ring_buffer_print_page_header(struct trace_seq *s) |
| 411 | { | 411 | { |
| @@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
| 72 | return NULL; | 72 | return NULL; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | 75 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
| 76 | struct module *mod) | 76 | struct module *mod) |
| 77 | { | 77 | { |
| 78 | char *secstrings; | 78 | char *secstrings; |
| 79 | unsigned int i; | 79 | unsigned int i; |
| @@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
| 97 | * could potentially lead to deadlock and thus be counter-productive. | 97 | * could potentially lead to deadlock and thus be counter-productive. |
| 98 | */ | 98 | */ |
| 99 | list_add(&mod->bug_list, &module_bug_list); | 99 | list_add(&mod->bug_list, &module_bug_list); |
| 100 | |||
| 101 | return 0; | ||
| 102 | } | 100 | } |
| 103 | 101 | ||
| 104 | void module_bug_cleanup(struct module *mod) | 102 | void module_bug_cleanup(struct module *mod) |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb794c38b..a7616fa3162e 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
| @@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, | |||
| 70 | * element comparison is needed, so the client's cmp() | 70 | * element comparison is needed, so the client's cmp() |
| 71 | * routine can invoke cond_resched() periodically. | 71 | * routine can invoke cond_resched() periodically. |
| 72 | */ | 72 | */ |
| 73 | (*cmp)(priv, tail, tail); | 73 | (*cmp)(priv, tail->next, tail->next); |
| 74 | 74 | ||
| 75 | tail->next->prev = tail; | 75 | tail->next->prev = tail; |
| 76 | tail = tail->next; | 76 | tail = tail->next; |
| @@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
| 712 | if (!ptep) | 712 | if (!ptep) |
| 713 | goto out; | 713 | goto out; |
| 714 | 714 | ||
| 715 | if (pte_write(*ptep)) { | 715 | if (pte_write(*ptep) || pte_dirty(*ptep)) { |
| 716 | pte_t entry; | 716 | pte_t entry; |
| 717 | 717 | ||
| 718 | swapped = PageSwapCache(page); | 718 | swapped = PageSwapCache(page); |
| @@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |||
| 735 | set_pte_at(mm, addr, ptep, entry); | 735 | set_pte_at(mm, addr, ptep, entry); |
| 736 | goto out_unlock; | 736 | goto out_unlock; |
| 737 | } | 737 | } |
| 738 | entry = pte_wrprotect(entry); | 738 | if (pte_dirty(entry)) |
| 739 | set_page_dirty(page); | ||
| 740 | entry = pte_mkclean(pte_wrprotect(entry)); | ||
| 739 | set_pte_at_notify(mm, addr, ptep, entry); | 741 | set_pte_at_notify(mm, addr, ptep, entry); |
| 740 | } | 742 | } |
| 741 | *orig_pte = *ptep; | 743 | *orig_pte = *ptep; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3eed583895a6..9be3cf8a5da4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -3587,9 +3587,13 @@ unlock: | |||
| 3587 | 3587 | ||
| 3588 | static void mem_cgroup_threshold(struct mem_cgroup *memcg) | 3588 | static void mem_cgroup_threshold(struct mem_cgroup *memcg) |
| 3589 | { | 3589 | { |
| 3590 | __mem_cgroup_threshold(memcg, false); | 3590 | while (memcg) { |
| 3591 | if (do_swap_account) | 3591 | __mem_cgroup_threshold(memcg, false); |
| 3592 | __mem_cgroup_threshold(memcg, true); | 3592 | if (do_swap_account) |
| 3593 | __mem_cgroup_threshold(memcg, true); | ||
| 3594 | |||
| 3595 | memcg = parent_mem_cgroup(memcg); | ||
| 3596 | } | ||
| 3593 | } | 3597 | } |
| 3594 | 3598 | ||
| 3595 | static int compare_thresholds(const void *a, const void *b) | 3599 | static int compare_thresholds(const void *a, const void *b) |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9c26eeca1342..757f6b0accfe 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter); | |||
| 183 | * signal. | 183 | * signal. |
| 184 | */ | 184 | */ |
| 185 | static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, | 185 | static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, |
| 186 | unsigned long pfn) | 186 | unsigned long pfn, struct page *page) |
| 187 | { | 187 | { |
| 188 | struct siginfo si; | 188 | struct siginfo si; |
| 189 | int ret; | 189 | int ret; |
| @@ -198,7 +198,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, | |||
| 198 | #ifdef __ARCH_SI_TRAPNO | 198 | #ifdef __ARCH_SI_TRAPNO |
| 199 | si.si_trapno = trapno; | 199 | si.si_trapno = trapno; |
| 200 | #endif | 200 | #endif |
| 201 | si.si_addr_lsb = PAGE_SHIFT; | 201 | si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; |
| 202 | /* | 202 | /* |
| 203 | * Don't use force here, it's convenient if the signal | 203 | * Don't use force here, it's convenient if the signal |
| 204 | * can be temporarily blocked. | 204 | * can be temporarily blocked. |
| @@ -235,7 +235,7 @@ void shake_page(struct page *p, int access) | |||
| 235 | int nr; | 235 | int nr; |
| 236 | do { | 236 | do { |
| 237 | nr = shrink_slab(1000, GFP_KERNEL, 1000); | 237 | nr = shrink_slab(1000, GFP_KERNEL, 1000); |
| 238 | if (page_count(p) == 0) | 238 | if (page_count(p) == 1) |
| 239 | break; | 239 | break; |
| 240 | } while (nr > 10); | 240 | } while (nr > 10); |
| 241 | } | 241 | } |
| @@ -327,7 +327,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, | |||
| 327 | * wrong earlier. | 327 | * wrong earlier. |
| 328 | */ | 328 | */ |
| 329 | static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, | 329 | static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, |
| 330 | int fail, unsigned long pfn) | 330 | int fail, struct page *page, unsigned long pfn) |
| 331 | { | 331 | { |
| 332 | struct to_kill *tk, *next; | 332 | struct to_kill *tk, *next; |
| 333 | 333 | ||
| @@ -352,7 +352,7 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, | |||
| 352 | * process anyways. | 352 | * process anyways. |
| 353 | */ | 353 | */ |
| 354 | else if (kill_proc_ao(tk->tsk, tk->addr, trapno, | 354 | else if (kill_proc_ao(tk->tsk, tk->addr, trapno, |
| 355 | pfn) < 0) | 355 | pfn, page) < 0) |
| 356 | printk(KERN_ERR | 356 | printk(KERN_ERR |
| 357 | "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", | 357 | "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", |
| 358 | pfn, tk->tsk->comm, tk->tsk->pid); | 358 | pfn, tk->tsk->comm, tk->tsk->pid); |
| @@ -928,7 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 928 | * any accesses to the poisoned memory. | 928 | * any accesses to the poisoned memory. |
| 929 | */ | 929 | */ |
| 930 | kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, | 930 | kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, |
| 931 | ret != SWAP_SUCCESS, pfn); | 931 | ret != SWAP_SUCCESS, p, pfn); |
| 932 | 932 | ||
| 933 | return ret; | 933 | return ret; |
| 934 | } | 934 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a8cfa9cc6e86..f12ad1836abe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -5182,9 +5182,9 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
| 5182 | if (!table) | 5182 | if (!table) |
| 5183 | panic("Failed to allocate %s hash table\n", tablename); | 5183 | panic("Failed to allocate %s hash table\n", tablename); |
| 5184 | 5184 | ||
| 5185 | printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", | 5185 | printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", |
| 5186 | tablename, | 5186 | tablename, |
| 5187 | (1U << log2qty), | 5187 | (1UL << log2qty), |
| 5188 | ilog2(size) - PAGE_SHIFT, | 5188 | ilog2(size) - PAGE_SHIFT, |
| 5189 | size); | 5189 | size); |
| 5190 | 5190 | ||
| @@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
| 381 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 381 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
| 382 | { | 382 | { |
| 383 | if (PageAnon(page)) { | 383 | if (PageAnon(page)) { |
| 384 | if (vma->anon_vma->root != page_anon_vma(page)->root) | 384 | struct anon_vma *page__anon_vma = page_anon_vma(page); |
| 385 | /* | ||
| 386 | * Note: swapoff's unuse_vma() is more efficient with this | ||
| 387 | * check, and needs it to match anon_vma when KSM is active. | ||
| 388 | */ | ||
| 389 | if (!vma->anon_vma || !page__anon_vma || | ||
| 390 | vma->anon_vma->root != page__anon_vma->root) | ||
| 385 | return -EFAULT; | 391 | return -EFAULT; |
| 386 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | 392 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
| 387 | if (!vma->vm_file || | 393 | if (!vma->vm_file || |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 01ddb0472f86..0eb96f7e44be 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
| @@ -24,8 +24,11 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
| 24 | 24 | ||
| 25 | if (vlan_dev) | 25 | if (vlan_dev) |
| 26 | skb->dev = vlan_dev; | 26 | skb->dev = vlan_dev; |
| 27 | else if (vlan_id) | 27 | else if (vlan_id) { |
| 28 | goto drop; | 28 | if (!(skb->dev->flags & IFF_PROMISC)) |
| 29 | goto drop; | ||
| 30 | skb->pkt_type = PACKET_OTHERHOST; | ||
| 31 | } | ||
| 29 | 32 | ||
| 30 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | 33 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); |
| 31 | 34 | ||
| @@ -102,8 +105,11 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
| 102 | 105 | ||
| 103 | if (vlan_dev) | 106 | if (vlan_dev) |
| 104 | skb->dev = vlan_dev; | 107 | skb->dev = vlan_dev; |
| 105 | else if (vlan_id) | 108 | else if (vlan_id) { |
| 106 | goto drop; | 109 | if (!(skb->dev->flags & IFF_PROMISC)) |
| 110 | goto drop; | ||
| 111 | skb->pkt_type = PACKET_OTHERHOST; | ||
| 112 | } | ||
| 107 | 113 | ||
| 108 | for (p = napi->gro_list; p; p = p->next) { | 114 | for (p = napi->gro_list; p; p = p->next) { |
| 109 | NAPI_GRO_CB(p)->same_flow = | 115 | NAPI_GRO_CB(p)->same_flow = |
diff --git a/net/Kconfig b/net/Kconfig index e926884c1675..55fd82e9ffd9 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
| @@ -293,6 +293,7 @@ source "net/wimax/Kconfig" | |||
| 293 | source "net/rfkill/Kconfig" | 293 | source "net/rfkill/Kconfig" |
| 294 | source "net/9p/Kconfig" | 294 | source "net/9p/Kconfig" |
| 295 | source "net/caif/Kconfig" | 295 | source "net/caif/Kconfig" |
| 296 | source "net/ceph/Kconfig" | ||
| 296 | 297 | ||
| 297 | 298 | ||
| 298 | endif # if NET | 299 | endif # if NET |
diff --git a/net/Makefile b/net/Makefile index ea60fbce9b1b..6b7bfd7f1416 100644 --- a/net/Makefile +++ b/net/Makefile | |||
| @@ -68,3 +68,4 @@ obj-$(CONFIG_SYSCTL) += sysctl_net.o | |||
| 68 | endif | 68 | endif |
| 69 | obj-$(CONFIG_WIMAX) += wimax/ | 69 | obj-$(CONFIG_WIMAX) += wimax/ |
| 70 | obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ | 70 | obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ |
| 71 | obj-$(CONFIG_CEPH_LIB) += ceph/ | ||
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 622b471e14e0..74bcc662c3dd 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
| @@ -778,7 +778,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 778 | eg->packets_rcvd++; | 778 | eg->packets_rcvd++; |
| 779 | mpc->eg_ops->put(eg); | 779 | mpc->eg_ops->put(eg); |
| 780 | 780 | ||
| 781 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); | 781 | memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data)); |
| 782 | netif_rx(new_skb); | 782 | netif_rx(new_skb); |
| 783 | } | 783 | } |
| 784 | 784 | ||
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index fadf26b4ed7c..0b54b7dd8401 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
| @@ -1441,33 +1441,23 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) | |||
| 1441 | 1441 | ||
| 1442 | static void l2cap_streaming_send(struct sock *sk) | 1442 | static void l2cap_streaming_send(struct sock *sk) |
| 1443 | { | 1443 | { |
| 1444 | struct sk_buff *skb, *tx_skb; | 1444 | struct sk_buff *skb; |
| 1445 | struct l2cap_pinfo *pi = l2cap_pi(sk); | 1445 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
| 1446 | u16 control, fcs; | 1446 | u16 control, fcs; |
| 1447 | 1447 | ||
| 1448 | while ((skb = sk->sk_send_head)) { | 1448 | while ((skb = skb_dequeue(TX_QUEUE(sk)))) { |
| 1449 | tx_skb = skb_clone(skb, GFP_ATOMIC); | 1449 | control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); |
| 1450 | |||
| 1451 | control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); | ||
| 1452 | control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; | 1450 | control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; |
| 1453 | put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); | 1451 | put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); |
| 1454 | 1452 | ||
| 1455 | if (pi->fcs == L2CAP_FCS_CRC16) { | 1453 | if (pi->fcs == L2CAP_FCS_CRC16) { |
| 1456 | fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); | 1454 | fcs = crc16(0, (u8 *)skb->data, skb->len - 2); |
| 1457 | put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); | 1455 | put_unaligned_le16(fcs, skb->data + skb->len - 2); |
| 1458 | } | 1456 | } |
| 1459 | 1457 | ||
| 1460 | l2cap_do_send(sk, tx_skb); | 1458 | l2cap_do_send(sk, skb); |
| 1461 | 1459 | ||
| 1462 | pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; | 1460 | pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; |
| 1463 | |||
| 1464 | if (skb_queue_is_last(TX_QUEUE(sk), skb)) | ||
| 1465 | sk->sk_send_head = NULL; | ||
| 1466 | else | ||
| 1467 | sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); | ||
| 1468 | |||
| 1469 | skb = skb_dequeue(TX_QUEUE(sk)); | ||
| 1470 | kfree_skb(skb); | ||
| 1471 | } | 1461 | } |
| 1472 | } | 1462 | } |
| 1473 | 1463 | ||
| @@ -1960,6 +1950,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
| 1960 | 1950 | ||
| 1961 | switch (optname) { | 1951 | switch (optname) { |
| 1962 | case L2CAP_OPTIONS: | 1952 | case L2CAP_OPTIONS: |
| 1953 | if (sk->sk_state == BT_CONNECTED) { | ||
| 1954 | err = -EINVAL; | ||
| 1955 | break; | ||
| 1956 | } | ||
| 1957 | |||
| 1963 | opts.imtu = l2cap_pi(sk)->imtu; | 1958 | opts.imtu = l2cap_pi(sk)->imtu; |
| 1964 | opts.omtu = l2cap_pi(sk)->omtu; | 1959 | opts.omtu = l2cap_pi(sk)->omtu; |
| 1965 | opts.flush_to = l2cap_pi(sk)->flush_to; | 1960 | opts.flush_to = l2cap_pi(sk)->flush_to; |
| @@ -2771,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, | |||
| 2771 | case L2CAP_CONF_MTU: | 2766 | case L2CAP_CONF_MTU: |
| 2772 | if (val < L2CAP_DEFAULT_MIN_MTU) { | 2767 | if (val < L2CAP_DEFAULT_MIN_MTU) { |
| 2773 | *result = L2CAP_CONF_UNACCEPT; | 2768 | *result = L2CAP_CONF_UNACCEPT; |
| 2774 | pi->omtu = L2CAP_DEFAULT_MIN_MTU; | 2769 | pi->imtu = L2CAP_DEFAULT_MIN_MTU; |
| 2775 | } else | 2770 | } else |
| 2776 | pi->omtu = val; | 2771 | pi->imtu = val; |
| 2777 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); | 2772 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); |
| 2778 | break; | 2773 | break; |
| 2779 | 2774 | ||
| 2780 | case L2CAP_CONF_FLUSH_TO: | 2775 | case L2CAP_CONF_FLUSH_TO: |
| @@ -3071,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
| 3071 | return 0; | 3066 | return 0; |
| 3072 | } | 3067 | } |
| 3073 | 3068 | ||
| 3069 | static inline void set_default_fcs(struct l2cap_pinfo *pi) | ||
| 3070 | { | ||
| 3071 | /* FCS is enabled only in ERTM or streaming mode, if one or both | ||
| 3072 | * sides request it. | ||
| 3073 | */ | ||
| 3074 | if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING) | ||
| 3075 | pi->fcs = L2CAP_FCS_NONE; | ||
| 3076 | else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV)) | ||
| 3077 | pi->fcs = L2CAP_FCS_CRC16; | ||
| 3078 | } | ||
| 3079 | |||
| 3074 | static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) | 3080 | static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) |
| 3075 | { | 3081 | { |
| 3076 | struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; | 3082 | struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; |
| @@ -3088,14 +3094,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
| 3088 | if (!sk) | 3094 | if (!sk) |
| 3089 | return -ENOENT; | 3095 | return -ENOENT; |
| 3090 | 3096 | ||
| 3091 | if (sk->sk_state != BT_CONFIG) { | 3097 | if (sk->sk_state == BT_DISCONN) |
| 3092 | struct l2cap_cmd_rej rej; | ||
| 3093 | |||
| 3094 | rej.reason = cpu_to_le16(0x0002); | ||
| 3095 | l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, | ||
| 3096 | sizeof(rej), &rej); | ||
| 3097 | goto unlock; | 3098 | goto unlock; |
| 3098 | } | ||
| 3099 | 3099 | ||
| 3100 | /* Reject if config buffer is too small. */ | 3100 | /* Reject if config buffer is too small. */ |
| 3101 | len = cmd_len - sizeof(*req); | 3101 | len = cmd_len - sizeof(*req); |
| @@ -3135,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
| 3135 | goto unlock; | 3135 | goto unlock; |
| 3136 | 3136 | ||
| 3137 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { | 3137 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { |
| 3138 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || | 3138 | set_default_fcs(l2cap_pi(sk)); |
| 3139 | l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) | ||
| 3140 | l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; | ||
| 3141 | 3139 | ||
| 3142 | sk->sk_state = BT_CONNECTED; | 3140 | sk->sk_state = BT_CONNECTED; |
| 3143 | 3141 | ||
| @@ -3225,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
| 3225 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; | 3223 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; |
| 3226 | 3224 | ||
| 3227 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { | 3225 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { |
| 3228 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || | 3226 | set_default_fcs(l2cap_pi(sk)); |
| 3229 | l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) | ||
| 3230 | l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; | ||
| 3231 | 3227 | ||
| 3232 | sk->sk_state = BT_CONNECTED; | 3228 | sk->sk_state = BT_CONNECTED; |
| 3233 | l2cap_pi(sk)->next_tx_seq = 0; | 3229 | l2cap_pi(sk)->next_tx_seq = 0; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 44a623275951..194b3a04cfd3 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -82,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) | |||
| 82 | static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) | 82 | static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) |
| 83 | { | 83 | { |
| 84 | struct sock *sk = d->owner, *parent; | 84 | struct sock *sk = d->owner, *parent; |
| 85 | unsigned long flags; | ||
| 86 | |||
| 85 | if (!sk) | 87 | if (!sk) |
| 86 | return; | 88 | return; |
| 87 | 89 | ||
| 88 | BT_DBG("dlc %p state %ld err %d", d, d->state, err); | 90 | BT_DBG("dlc %p state %ld err %d", d, d->state, err); |
| 89 | 91 | ||
| 92 | local_irq_save(flags); | ||
| 90 | bh_lock_sock(sk); | 93 | bh_lock_sock(sk); |
| 91 | 94 | ||
| 92 | if (err) | 95 | if (err) |
| @@ -108,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) | |||
| 108 | } | 111 | } |
| 109 | 112 | ||
| 110 | bh_unlock_sock(sk); | 113 | bh_unlock_sock(sk); |
| 114 | local_irq_restore(flags); | ||
| 111 | 115 | ||
| 112 | if (parent && sock_flag(sk, SOCK_ZAPPED)) { | 116 | if (parent && sock_flag(sk, SOCK_ZAPPED)) { |
| 113 | /* We have to drop DLC lock here, otherwise | 117 | /* We have to drop DLC lock here, otherwise |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 8ce904786116..4bf28f25f368 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
| @@ -827,6 +827,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 827 | long timeo; | 827 | long timeo; |
| 828 | int err; | 828 | int err; |
| 829 | int ifindex, headroom, tailroom; | 829 | int ifindex, headroom, tailroom; |
| 830 | unsigned int mtu; | ||
| 830 | struct net_device *dev; | 831 | struct net_device *dev; |
| 831 | 832 | ||
| 832 | lock_sock(sk); | 833 | lock_sock(sk); |
| @@ -896,15 +897,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 896 | cf_sk->sk.sk_state = CAIF_DISCONNECTED; | 897 | cf_sk->sk.sk_state = CAIF_DISCONNECTED; |
| 897 | goto out; | 898 | goto out; |
| 898 | } | 899 | } |
| 899 | dev = dev_get_by_index(sock_net(sk), ifindex); | 900 | |
| 901 | err = -ENODEV; | ||
| 902 | rcu_read_lock(); | ||
| 903 | dev = dev_get_by_index_rcu(sock_net(sk), ifindex); | ||
| 904 | if (!dev) { | ||
| 905 | rcu_read_unlock(); | ||
| 906 | goto out; | ||
| 907 | } | ||
| 900 | cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); | 908 | cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); |
| 909 | mtu = dev->mtu; | ||
| 910 | rcu_read_unlock(); | ||
| 911 | |||
| 901 | cf_sk->tailroom = tailroom; | 912 | cf_sk->tailroom = tailroom; |
| 902 | cf_sk->maxframe = dev->mtu - (headroom + tailroom); | 913 | cf_sk->maxframe = mtu - (headroom + tailroom); |
| 903 | dev_put(dev); | ||
| 904 | if (cf_sk->maxframe < 1) { | 914 | if (cf_sk->maxframe < 1) { |
| 905 | pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n", | 915 | pr_warning("CAIF: %s(): CAIF Interface MTU too small (%u)\n", |
| 906 | __func__, dev->mtu); | 916 | __func__, mtu); |
| 907 | err = -ENODEV; | ||
| 908 | goto out; | 917 | goto out; |
| 909 | } | 918 | } |
| 910 | 919 | ||
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig new file mode 100644 index 000000000000..ad424049b0cf --- /dev/null +++ b/net/ceph/Kconfig | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | config CEPH_LIB | ||
| 2 | tristate "Ceph core library (EXPERIMENTAL)" | ||
| 3 | depends on INET && EXPERIMENTAL | ||
| 4 | select LIBCRC32C | ||
| 5 | select CRYPTO_AES | ||
| 6 | select CRYPTO | ||
| 7 | default n | ||
| 8 | help | ||
| 9 | Choose Y or M here to include cephlib, which provides the | ||
| 10 | common functionality to both the Ceph filesystem and | ||
| 11 | to the rados block device (rbd). | ||
| 12 | |||
| 13 | More information at http://ceph.newdream.net/. | ||
| 14 | |||
| 15 | If unsure, say N. | ||
| 16 | |||
| 17 | config CEPH_LIB_PRETTYDEBUG | ||
| 18 | bool "Include file:line in ceph debug output" | ||
| 19 | depends on CEPH_LIB | ||
| 20 | default n | ||
| 21 | help | ||
| 22 | If you say Y here, debug output will include a filename and | ||
| 23 | line to aid debugging. This increases kernel size and slows | ||
| 24 | execution slightly when debug call sites are enabled (e.g., | ||
| 25 | via CONFIG_DYNAMIC_DEBUG). | ||
| 26 | |||
| 27 | If unsure, say N. | ||
| 28 | |||
diff --git a/net/ceph/Makefile b/net/ceph/Makefile new file mode 100644 index 000000000000..aab1cabb8035 --- /dev/null +++ b/net/ceph/Makefile | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | # | ||
| 2 | # Makefile for CEPH filesystem. | ||
| 3 | # | ||
| 4 | |||
| 5 | ifneq ($(KERNELRELEASE),) | ||
| 6 | |||
| 7 | obj-$(CONFIG_CEPH_LIB) += libceph.o | ||
| 8 | |||
| 9 | libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ | ||
| 10 | mon_client.o \ | ||
| 11 | osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ | ||
| 12 | debugfs.o \ | ||
| 13 | auth.o auth_none.o \ | ||
| 14 | crypto.o armor.o \ | ||
| 15 | auth_x.o \ | ||
| 16 | ceph_fs.o ceph_strings.o ceph_hash.o \ | ||
| 17 | pagevec.o | ||
| 18 | |||
| 19 | else | ||
| 20 | #Otherwise we were called directly from the command | ||
| 21 | # line; invoke the kernel build system. | ||
| 22 | |||
| 23 | KERNELDIR ?= /lib/modules/$(shell uname -r)/build | ||
| 24 | PWD := $(shell pwd) | ||
| 25 | |||
| 26 | default: all | ||
| 27 | |||
| 28 | all: | ||
| 29 | $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules | ||
| 30 | |||
| 31 | modules_install: | ||
| 32 | $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install | ||
| 33 | |||
| 34 | clean: | ||
| 35 | $(MAKE) -C $(KERNELDIR) M=$(PWD) clean | ||
| 36 | |||
| 37 | endif | ||
diff --git a/fs/ceph/armor.c b/net/ceph/armor.c index eb2a666b0be7..eb2a666b0be7 100644 --- a/fs/ceph/armor.c +++ b/net/ceph/armor.c | |||
diff --git a/fs/ceph/auth.c b/net/ceph/auth.c index 6d2e30600627..549c1f43e1d5 100644 --- a/fs/ceph/auth.c +++ b/net/ceph/auth.c | |||
| @@ -1,16 +1,16 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
| 4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| 5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 6 | 6 | ||
| 7 | #include "types.h" | 7 | #include <linux/ceph/types.h> |
| 8 | #include <linux/ceph/decode.h> | ||
| 9 | #include <linux/ceph/libceph.h> | ||
| 10 | #include <linux/ceph/messenger.h> | ||
| 8 | #include "auth_none.h" | 11 | #include "auth_none.h" |
| 9 | #include "auth_x.h" | 12 | #include "auth_x.h" |
| 10 | #include "decode.h" | ||
| 11 | #include "super.h" | ||
| 12 | 13 | ||
| 13 | #include "messenger.h" | ||
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * get protocol handler | 16 | * get protocol handler |
diff --git a/fs/ceph/auth_none.c b/net/ceph/auth_none.c index ad1dc21286c7..214c2bb43d62 100644 --- a/fs/ceph/auth_none.c +++ b/net/ceph/auth_none.c | |||
| @@ -1,14 +1,15 @@ | |||
| 1 | 1 | ||
| 2 | #include "ceph_debug.h" | 2 | #include <linux/ceph/ceph_debug.h> |
| 3 | 3 | ||
| 4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| 5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 6 | #include <linux/random.h> | 6 | #include <linux/random.h> |
| 7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
| 8 | 8 | ||
| 9 | #include <linux/ceph/decode.h> | ||
| 10 | #include <linux/ceph/auth.h> | ||
| 11 | |||
| 9 | #include "auth_none.h" | 12 | #include "auth_none.h" |
| 10 | #include "auth.h" | ||
| 11 | #include "decode.h" | ||
| 12 | 13 | ||
| 13 | static void reset(struct ceph_auth_client *ac) | 14 | static void reset(struct ceph_auth_client *ac) |
| 14 | { | 15 | { |
diff --git a/fs/ceph/auth_none.h b/net/ceph/auth_none.h index 8164df1a08be..ed7d088b1bc9 100644 --- a/fs/ceph/auth_none.h +++ b/net/ceph/auth_none.h | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | #define _FS_CEPH_AUTH_NONE_H | 2 | #define _FS_CEPH_AUTH_NONE_H |
| 3 | 3 | ||
| 4 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
| 5 | 5 | #include <linux/ceph/auth.h> | |
| 6 | #include "auth.h" | ||
| 7 | 6 | ||
| 8 | /* | 7 | /* |
| 9 | * null security mode. | 8 | * null security mode. |
diff --git a/fs/ceph/auth_x.c b/net/ceph/auth_x.c index a2d002cbdec2..7fd5dfcf6e18 100644 --- a/fs/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
| @@ -1,16 +1,17 @@ | |||
| 1 | 1 | ||
| 2 | #include "ceph_debug.h" | 2 | #include <linux/ceph/ceph_debug.h> |
| 3 | 3 | ||
| 4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| 5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 6 | #include <linux/random.h> | 6 | #include <linux/random.h> |
| 7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
| 8 | 8 | ||
| 9 | #include <linux/ceph/decode.h> | ||
| 10 | #include <linux/ceph/auth.h> | ||
| 11 | |||
| 12 | #include "crypto.h" | ||
| 9 | #include "auth_x.h" | 13 | #include "auth_x.h" |
| 10 | #include "auth_x_protocol.h" | 14 | #include "auth_x_protocol.h" |
| 11 | #include "crypto.h" | ||
| 12 | #include "auth.h" | ||
| 13 | #include "decode.h" | ||
| 14 | 15 | ||
| 15 | #define TEMP_TICKET_BUF_LEN 256 | 16 | #define TEMP_TICKET_BUF_LEN 256 |
| 16 | 17 | ||
diff --git a/fs/ceph/auth_x.h b/net/ceph/auth_x.h index ff6f8180e681..e02da7a5c5a1 100644 --- a/fs/ceph/auth_x.h +++ b/net/ceph/auth_x.h | |||
| @@ -3,8 +3,9 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/rbtree.h> | 4 | #include <linux/rbtree.h> |
| 5 | 5 | ||
| 6 | #include <linux/ceph/auth.h> | ||
| 7 | |||
| 6 | #include "crypto.h" | 8 | #include "crypto.h" |
| 7 | #include "auth.h" | ||
| 8 | #include "auth_x_protocol.h" | 9 | #include "auth_x_protocol.h" |
| 9 | 10 | ||
| 10 | /* | 11 | /* |
diff --git a/fs/ceph/auth_x_protocol.h b/net/ceph/auth_x_protocol.h index 671d30576c4f..671d30576c4f 100644 --- a/fs/ceph/auth_x_protocol.h +++ b/net/ceph/auth_x_protocol.h | |||
diff --git a/fs/ceph/buffer.c b/net/ceph/buffer.c index cd39f17021de..53d8abfa25d5 100644 --- a/fs/ceph/buffer.c +++ b/net/ceph/buffer.c | |||
| @@ -1,10 +1,11 @@ | |||
| 1 | 1 | ||
| 2 | #include "ceph_debug.h" | 2 | #include <linux/ceph/ceph_debug.h> |
| 3 | 3 | ||
| 4 | #include <linux/module.h> | ||
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 5 | 6 | ||
| 6 | #include "buffer.h" | 7 | #include <linux/ceph/buffer.h> |
| 7 | #include "decode.h" | 8 | #include <linux/ceph/decode.h> |
| 8 | 9 | ||
| 9 | struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | 10 | struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) |
| 10 | { | 11 | { |
| @@ -32,6 +33,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | |||
| 32 | dout("buffer_new %p\n", b); | 33 | dout("buffer_new %p\n", b); |
| 33 | return b; | 34 | return b; |
| 34 | } | 35 | } |
| 36 | EXPORT_SYMBOL(ceph_buffer_new); | ||
| 35 | 37 | ||
| 36 | void ceph_buffer_release(struct kref *kref) | 38 | void ceph_buffer_release(struct kref *kref) |
| 37 | { | 39 | { |
| @@ -46,6 +48,7 @@ void ceph_buffer_release(struct kref *kref) | |||
| 46 | } | 48 | } |
| 47 | kfree(b); | 49 | kfree(b); |
| 48 | } | 50 | } |
| 51 | EXPORT_SYMBOL(ceph_buffer_release); | ||
| 49 | 52 | ||
| 50 | int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end) | 53 | int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end) |
| 51 | { | 54 | { |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c new file mode 100644 index 000000000000..f3e4a13fea0c --- /dev/null +++ b/net/ceph/ceph_common.c | |||
| @@ -0,0 +1,529 @@ | |||
| 1 | |||
| 2 | #include <linux/ceph/ceph_debug.h> | ||
| 3 | #include <linux/backing-dev.h> | ||
| 4 | #include <linux/ctype.h> | ||
| 5 | #include <linux/fs.h> | ||
| 6 | #include <linux/inet.h> | ||
| 7 | #include <linux/in6.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/mount.h> | ||
| 10 | #include <linux/parser.h> | ||
| 11 | #include <linux/sched.h> | ||
| 12 | #include <linux/seq_file.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/statfs.h> | ||
| 15 | #include <linux/string.h> | ||
| 16 | |||
| 17 | |||
| 18 | #include <linux/ceph/libceph.h> | ||
| 19 | #include <linux/ceph/debugfs.h> | ||
| 20 | #include <linux/ceph/decode.h> | ||
| 21 | #include <linux/ceph/mon_client.h> | ||
| 22 | #include <linux/ceph/auth.h> | ||
| 23 | |||
| 24 | |||
| 25 | |||
| 26 | /* | ||
| 27 | * find filename portion of a path (/foo/bar/baz -> baz) | ||
| 28 | */ | ||
| 29 | const char *ceph_file_part(const char *s, int len) | ||
| 30 | { | ||
| 31 | const char *e = s + len; | ||
| 32 | |||
| 33 | while (e != s && *(e-1) != '/') | ||
| 34 | e--; | ||
| 35 | return e; | ||
| 36 | } | ||
| 37 | EXPORT_SYMBOL(ceph_file_part); | ||
| 38 | |||
| 39 | const char *ceph_msg_type_name(int type) | ||
| 40 | { | ||
| 41 | switch (type) { | ||
| 42 | case CEPH_MSG_SHUTDOWN: return "shutdown"; | ||
| 43 | case CEPH_MSG_PING: return "ping"; | ||
| 44 | case CEPH_MSG_AUTH: return "auth"; | ||
| 45 | case CEPH_MSG_AUTH_REPLY: return "auth_reply"; | ||
| 46 | case CEPH_MSG_MON_MAP: return "mon_map"; | ||
| 47 | case CEPH_MSG_MON_GET_MAP: return "mon_get_map"; | ||
| 48 | case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe"; | ||
| 49 | case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack"; | ||
| 50 | case CEPH_MSG_STATFS: return "statfs"; | ||
| 51 | case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; | ||
| 52 | case CEPH_MSG_MDS_MAP: return "mds_map"; | ||
| 53 | case CEPH_MSG_CLIENT_SESSION: return "client_session"; | ||
| 54 | case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect"; | ||
| 55 | case CEPH_MSG_CLIENT_REQUEST: return "client_request"; | ||
| 56 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward"; | ||
| 57 | case CEPH_MSG_CLIENT_REPLY: return "client_reply"; | ||
| 58 | case CEPH_MSG_CLIENT_CAPS: return "client_caps"; | ||
| 59 | case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release"; | ||
| 60 | case CEPH_MSG_CLIENT_SNAP: return "client_snap"; | ||
| 61 | case CEPH_MSG_CLIENT_LEASE: return "client_lease"; | ||
| 62 | case CEPH_MSG_OSD_MAP: return "osd_map"; | ||
| 63 | case CEPH_MSG_OSD_OP: return "osd_op"; | ||
| 64 | case CEPH_MSG_OSD_OPREPLY: return "osd_opreply"; | ||
| 65 | default: return "unknown"; | ||
| 66 | } | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL(ceph_msg_type_name); | ||
| 69 | |||
| 70 | /* | ||
| 71 | * Initially learn our fsid, or verify an fsid matches. | ||
| 72 | */ | ||
| 73 | int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) | ||
| 74 | { | ||
| 75 | if (client->have_fsid) { | ||
| 76 | if (ceph_fsid_compare(&client->fsid, fsid)) { | ||
| 77 | pr_err("bad fsid, had %pU got %pU", | ||
| 78 | &client->fsid, fsid); | ||
| 79 | return -1; | ||
| 80 | } | ||
| 81 | } else { | ||
| 82 | pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); | ||
| 83 | memcpy(&client->fsid, fsid, sizeof(*fsid)); | ||
| 84 | ceph_debugfs_client_init(client); | ||
| 85 | client->have_fsid = true; | ||
| 86 | } | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | EXPORT_SYMBOL(ceph_check_fsid); | ||
| 90 | |||
| 91 | static int strcmp_null(const char *s1, const char *s2) | ||
| 92 | { | ||
| 93 | if (!s1 && !s2) | ||
| 94 | return 0; | ||
| 95 | if (s1 && !s2) | ||
| 96 | return -1; | ||
| 97 | if (!s1 && s2) | ||
| 98 | return 1; | ||
| 99 | return strcmp(s1, s2); | ||
| 100 | } | ||
| 101 | |||
| 102 | int ceph_compare_options(struct ceph_options *new_opt, | ||
| 103 | struct ceph_client *client) | ||
| 104 | { | ||
| 105 | struct ceph_options *opt1 = new_opt; | ||
| 106 | struct ceph_options *opt2 = client->options; | ||
| 107 | int ofs = offsetof(struct ceph_options, mon_addr); | ||
| 108 | int i; | ||
| 109 | int ret; | ||
| 110 | |||
| 111 | ret = memcmp(opt1, opt2, ofs); | ||
| 112 | if (ret) | ||
| 113 | return ret; | ||
| 114 | |||
| 115 | ret = strcmp_null(opt1->name, opt2->name); | ||
| 116 | if (ret) | ||
| 117 | return ret; | ||
| 118 | |||
| 119 | ret = strcmp_null(opt1->secret, opt2->secret); | ||
| 120 | if (ret) | ||
| 121 | return ret; | ||
| 122 | |||
| 123 | /* any matching mon ip implies a match */ | ||
| 124 | for (i = 0; i < opt1->num_mon; i++) { | ||
| 125 | if (ceph_monmap_contains(client->monc.monmap, | ||
| 126 | &opt1->mon_addr[i])) | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | return -1; | ||
| 130 | } | ||
| 131 | EXPORT_SYMBOL(ceph_compare_options); | ||
| 132 | |||
| 133 | |||
| 134 | static int parse_fsid(const char *str, struct ceph_fsid *fsid) | ||
| 135 | { | ||
| 136 | int i = 0; | ||
| 137 | char tmp[3]; | ||
| 138 | int err = -EINVAL; | ||
| 139 | int d; | ||
| 140 | |||
| 141 | dout("parse_fsid '%s'\n", str); | ||
| 142 | tmp[2] = 0; | ||
| 143 | while (*str && i < 16) { | ||
| 144 | if (ispunct(*str)) { | ||
| 145 | str++; | ||
| 146 | continue; | ||
| 147 | } | ||
| 148 | if (!isxdigit(str[0]) || !isxdigit(str[1])) | ||
| 149 | break; | ||
| 150 | tmp[0] = str[0]; | ||
| 151 | tmp[1] = str[1]; | ||
| 152 | if (sscanf(tmp, "%x", &d) < 1) | ||
| 153 | break; | ||
| 154 | fsid->fsid[i] = d & 0xff; | ||
| 155 | i++; | ||
| 156 | str += 2; | ||
| 157 | } | ||
| 158 | |||
| 159 | if (i == 16) | ||
| 160 | err = 0; | ||
| 161 | dout("parse_fsid ret %d got fsid %pU", err, fsid); | ||
| 162 | return err; | ||
| 163 | } | ||
| 164 | |||
| 165 | /* | ||
| 166 | * ceph options | ||
| 167 | */ | ||
| 168 | enum { | ||
| 169 | Opt_osdtimeout, | ||
| 170 | Opt_osdkeepalivetimeout, | ||
| 171 | Opt_mount_timeout, | ||
| 172 | Opt_osd_idle_ttl, | ||
| 173 | Opt_last_int, | ||
| 174 | /* int args above */ | ||
| 175 | Opt_fsid, | ||
| 176 | Opt_name, | ||
| 177 | Opt_secret, | ||
| 178 | Opt_ip, | ||
| 179 | Opt_last_string, | ||
| 180 | /* string args above */ | ||
| 181 | Opt_noshare, | ||
| 182 | Opt_nocrc, | ||
| 183 | }; | ||
| 184 | |||
| 185 | static match_table_t opt_tokens = { | ||
| 186 | {Opt_osdtimeout, "osdtimeout=%d"}, | ||
| 187 | {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, | ||
| 188 | {Opt_mount_timeout, "mount_timeout=%d"}, | ||
| 189 | {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, | ||
| 190 | /* int args above */ | ||
| 191 | {Opt_fsid, "fsid=%s"}, | ||
| 192 | {Opt_name, "name=%s"}, | ||
| 193 | {Opt_secret, "secret=%s"}, | ||
| 194 | {Opt_ip, "ip=%s"}, | ||
| 195 | /* string args above */ | ||
| 196 | {Opt_noshare, "noshare"}, | ||
| 197 | {Opt_nocrc, "nocrc"}, | ||
| 198 | {-1, NULL} | ||
| 199 | }; | ||
| 200 | |||
| 201 | void ceph_destroy_options(struct ceph_options *opt) | ||
| 202 | { | ||
| 203 | dout("destroy_options %p\n", opt); | ||
| 204 | kfree(opt->name); | ||
| 205 | kfree(opt->secret); | ||
| 206 | kfree(opt); | ||
| 207 | } | ||
| 208 | EXPORT_SYMBOL(ceph_destroy_options); | ||
| 209 | |||
| 210 | int ceph_parse_options(struct ceph_options **popt, char *options, | ||
| 211 | const char *dev_name, const char *dev_name_end, | ||
| 212 | int (*parse_extra_token)(char *c, void *private), | ||
| 213 | void *private) | ||
| 214 | { | ||
| 215 | struct ceph_options *opt; | ||
| 216 | const char *c; | ||
| 217 | int err = -ENOMEM; | ||
| 218 | substring_t argstr[MAX_OPT_ARGS]; | ||
| 219 | |||
| 220 | opt = kzalloc(sizeof(*opt), GFP_KERNEL); | ||
| 221 | if (!opt) | ||
| 222 | return err; | ||
| 223 | opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr), | ||
| 224 | GFP_KERNEL); | ||
| 225 | if (!opt->mon_addr) | ||
| 226 | goto out; | ||
| 227 | |||
| 228 | dout("parse_options %p options '%s' dev_name '%s'\n", opt, options, | ||
| 229 | dev_name); | ||
| 230 | |||
| 231 | /* start with defaults */ | ||
| 232 | opt->flags = CEPH_OPT_DEFAULT; | ||
| 233 | opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; | ||
| 234 | opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; | ||
| 235 | opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ | ||
| 236 | opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ | ||
| 237 | |||
| 238 | /* get mon ip(s) */ | ||
| 239 | /* ip1[:port1][,ip2[:port2]...] */ | ||
| 240 | err = ceph_parse_ips(dev_name, dev_name_end, opt->mon_addr, | ||
| 241 | CEPH_MAX_MON, &opt->num_mon); | ||
| 242 | if (err < 0) | ||
| 243 | goto out; | ||
| 244 | |||
| 245 | /* parse mount options */ | ||
| 246 | while ((c = strsep(&options, ",")) != NULL) { | ||
| 247 | int token, intval, ret; | ||
| 248 | if (!*c) | ||
| 249 | continue; | ||
| 250 | err = -EINVAL; | ||
| 251 | token = match_token((char *)c, opt_tokens, argstr); | ||
| 252 | if (token < 0 && parse_extra_token) { | ||
| 253 | /* extra? */ | ||
| 254 | err = parse_extra_token((char *)c, private); | ||
| 255 | if (err < 0) { | ||
| 256 | pr_err("bad option at '%s'\n", c); | ||
| 257 | goto out; | ||
| 258 | } | ||
| 259 | continue; | ||
| 260 | } | ||
| 261 | if (token < Opt_last_int) { | ||
| 262 | ret = match_int(&argstr[0], &intval); | ||
| 263 | if (ret < 0) { | ||
| 264 | pr_err("bad mount option arg (not int) " | ||
| 265 | "at '%s'\n", c); | ||
| 266 | continue; | ||
| 267 | } | ||
| 268 | dout("got int token %d val %d\n", token, intval); | ||
| 269 | } else if (token > Opt_last_int && token < Opt_last_string) { | ||
| 270 | dout("got string token %d val %s\n", token, | ||
| 271 | argstr[0].from); | ||
| 272 | } else { | ||
| 273 | dout("got token %d\n", token); | ||
| 274 | } | ||
| 275 | switch (token) { | ||
| 276 | case Opt_ip: | ||
| 277 | err = ceph_parse_ips(argstr[0].from, | ||
| 278 | argstr[0].to, | ||
| 279 | &opt->my_addr, | ||
| 280 | 1, NULL); | ||
| 281 | if (err < 0) | ||
| 282 | goto out; | ||
| 283 | opt->flags |= CEPH_OPT_MYIP; | ||
| 284 | break; | ||
| 285 | |||
| 286 | case Opt_fsid: | ||
| 287 | err = parse_fsid(argstr[0].from, &opt->fsid); | ||
| 288 | if (err == 0) | ||
| 289 | opt->flags |= CEPH_OPT_FSID; | ||
| 290 | break; | ||
| 291 | case Opt_name: | ||
| 292 | opt->name = kstrndup(argstr[0].from, | ||
| 293 | argstr[0].to-argstr[0].from, | ||
| 294 | GFP_KERNEL); | ||
| 295 | break; | ||
| 296 | case Opt_secret: | ||
| 297 | opt->secret = kstrndup(argstr[0].from, | ||
| 298 | argstr[0].to-argstr[0].from, | ||
| 299 | GFP_KERNEL); | ||
| 300 | break; | ||
| 301 | |||
| 302 | /* misc */ | ||
| 303 | case Opt_osdtimeout: | ||
| 304 | opt->osd_timeout = intval; | ||
| 305 | break; | ||
| 306 | case Opt_osdkeepalivetimeout: | ||
| 307 | opt->osd_keepalive_timeout = intval; | ||
| 308 | break; | ||
| 309 | case Opt_osd_idle_ttl: | ||
| 310 | opt->osd_idle_ttl = intval; | ||
| 311 | break; | ||
| 312 | case Opt_mount_timeout: | ||
| 313 | opt->mount_timeout = intval; | ||
| 314 | break; | ||
| 315 | |||
| 316 | case Opt_noshare: | ||
| 317 | opt->flags |= CEPH_OPT_NOSHARE; | ||
| 318 | break; | ||
| 319 | |||
| 320 | case Opt_nocrc: | ||
| 321 | opt->flags |= CEPH_OPT_NOCRC; | ||
| 322 | break; | ||
| 323 | |||
| 324 | default: | ||
| 325 | BUG_ON(token); | ||
| 326 | } | ||
| 327 | } | ||
| 328 | |||
| 329 | /* success */ | ||
| 330 | *popt = opt; | ||
| 331 | return 0; | ||
| 332 | |||
| 333 | out: | ||
| 334 | ceph_destroy_options(opt); | ||
| 335 | return err; | ||
| 336 | } | ||
| 337 | EXPORT_SYMBOL(ceph_parse_options); | ||
| 338 | |||
| 339 | u64 ceph_client_id(struct ceph_client *client) | ||
| 340 | { | ||
| 341 | return client->monc.auth->global_id; | ||
| 342 | } | ||
| 343 | EXPORT_SYMBOL(ceph_client_id); | ||
| 344 | |||
| 345 | /* | ||
| 346 | * create a fresh client instance | ||
| 347 | */ | ||
| 348 | struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private) | ||
| 349 | { | ||
| 350 | struct ceph_client *client; | ||
| 351 | int err = -ENOMEM; | ||
| 352 | |||
| 353 | client = kzalloc(sizeof(*client), GFP_KERNEL); | ||
| 354 | if (client == NULL) | ||
| 355 | return ERR_PTR(-ENOMEM); | ||
| 356 | |||
| 357 | client->private = private; | ||
| 358 | client->options = opt; | ||
| 359 | |||
| 360 | mutex_init(&client->mount_mutex); | ||
| 361 | init_waitqueue_head(&client->auth_wq); | ||
| 362 | client->auth_err = 0; | ||
| 363 | |||
| 364 | client->extra_mon_dispatch = NULL; | ||
| 365 | client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT; | ||
| 366 | client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT; | ||
| 367 | |||
| 368 | client->msgr = NULL; | ||
| 369 | |||
| 370 | /* subsystems */ | ||
| 371 | err = ceph_monc_init(&client->monc, client); | ||
| 372 | if (err < 0) | ||
| 373 | goto fail; | ||
| 374 | err = ceph_osdc_init(&client->osdc, client); | ||
| 375 | if (err < 0) | ||
| 376 | goto fail_monc; | ||
| 377 | |||
| 378 | return client; | ||
| 379 | |||
| 380 | fail_monc: | ||
| 381 | ceph_monc_stop(&client->monc); | ||
| 382 | fail: | ||
| 383 | kfree(client); | ||
| 384 | return ERR_PTR(err); | ||
| 385 | } | ||
| 386 | EXPORT_SYMBOL(ceph_create_client); | ||
| 387 | |||
| 388 | void ceph_destroy_client(struct ceph_client *client) | ||
| 389 | { | ||
| 390 | dout("destroy_client %p\n", client); | ||
| 391 | |||
| 392 | /* unmount */ | ||
| 393 | ceph_osdc_stop(&client->osdc); | ||
| 394 | |||
| 395 | /* | ||
| 396 | * make sure mds and osd connections close out before destroying | ||
| 397 | * the auth module, which is needed to free those connections' | ||
| 398 | * ceph_authorizers. | ||
| 399 | */ | ||
| 400 | ceph_msgr_flush(); | ||
| 401 | |||
| 402 | ceph_monc_stop(&client->monc); | ||
| 403 | |||
| 404 | ceph_debugfs_client_cleanup(client); | ||
| 405 | |||
| 406 | if (client->msgr) | ||
| 407 | ceph_messenger_destroy(client->msgr); | ||
| 408 | |||
| 409 | ceph_destroy_options(client->options); | ||
| 410 | |||
| 411 | kfree(client); | ||
| 412 | dout("destroy_client %p done\n", client); | ||
| 413 | } | ||
| 414 | EXPORT_SYMBOL(ceph_destroy_client); | ||
| 415 | |||
| 416 | /* | ||
| 417 | * true if we have the mon map (and have thus joined the cluster) | ||
| 418 | */ | ||
| 419 | static int have_mon_and_osd_map(struct ceph_client *client) | ||
| 420 | { | ||
| 421 | return client->monc.monmap && client->monc.monmap->epoch && | ||
| 422 | client->osdc.osdmap && client->osdc.osdmap->epoch; | ||
| 423 | } | ||
| 424 | |||
| 425 | /* | ||
| 426 | * mount: join the ceph cluster, and open root directory. | ||
| 427 | */ | ||
| 428 | int __ceph_open_session(struct ceph_client *client, unsigned long started) | ||
| 429 | { | ||
| 430 | struct ceph_entity_addr *myaddr = NULL; | ||
| 431 | int err; | ||
| 432 | unsigned long timeout = client->options->mount_timeout * HZ; | ||
| 433 | |||
| 434 | /* initialize the messenger */ | ||
| 435 | if (client->msgr == NULL) { | ||
| 436 | if (ceph_test_opt(client, MYIP)) | ||
| 437 | myaddr = &client->options->my_addr; | ||
| 438 | client->msgr = ceph_messenger_create(myaddr, | ||
| 439 | client->supported_features, | ||
| 440 | client->required_features); | ||
| 441 | if (IS_ERR(client->msgr)) { | ||
| 442 | client->msgr = NULL; | ||
| 443 | return PTR_ERR(client->msgr); | ||
| 444 | } | ||
| 445 | client->msgr->nocrc = ceph_test_opt(client, NOCRC); | ||
| 446 | } | ||
| 447 | |||
| 448 | /* open session, and wait for mon and osd maps */ | ||
| 449 | err = ceph_monc_open_session(&client->monc); | ||
| 450 | if (err < 0) | ||
| 451 | return err; | ||
| 452 | |||
| 453 | while (!have_mon_and_osd_map(client)) { | ||
| 454 | err = -EIO; | ||
| 455 | if (timeout && time_after_eq(jiffies, started + timeout)) | ||
| 456 | return err; | ||
| 457 | |||
| 458 | /* wait */ | ||
| 459 | dout("mount waiting for mon_map\n"); | ||
| 460 | err = wait_event_interruptible_timeout(client->auth_wq, | ||
| 461 | have_mon_and_osd_map(client) || (client->auth_err < 0), | ||
| 462 | timeout); | ||
| 463 | if (err == -EINTR || err == -ERESTARTSYS) | ||
| 464 | return err; | ||
| 465 | if (client->auth_err < 0) | ||
| 466 | return client->auth_err; | ||
| 467 | } | ||
| 468 | |||
| 469 | return 0; | ||
| 470 | } | ||
| 471 | EXPORT_SYMBOL(__ceph_open_session); | ||
| 472 | |||
| 473 | |||
| 474 | int ceph_open_session(struct ceph_client *client) | ||
| 475 | { | ||
| 476 | int ret; | ||
| 477 | unsigned long started = jiffies; /* note the start time */ | ||
| 478 | |||
| 479 | dout("open_session start\n"); | ||
| 480 | mutex_lock(&client->mount_mutex); | ||
| 481 | |||
| 482 | ret = __ceph_open_session(client, started); | ||
| 483 | |||
| 484 | mutex_unlock(&client->mount_mutex); | ||
| 485 | return ret; | ||
| 486 | } | ||
| 487 | EXPORT_SYMBOL(ceph_open_session); | ||
| 488 | |||
| 489 | |||
| 490 | static int __init init_ceph_lib(void) | ||
| 491 | { | ||
| 492 | int ret = 0; | ||
| 493 | |||
| 494 | ret = ceph_debugfs_init(); | ||
| 495 | if (ret < 0) | ||
| 496 | goto out; | ||
| 497 | |||
| 498 | ret = ceph_msgr_init(); | ||
| 499 | if (ret < 0) | ||
| 500 | goto out_debugfs; | ||
| 501 | |||
| 502 | pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n", | ||
| 503 | CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL, | ||
| 504 | CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT, | ||
| 505 | CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT); | ||
| 506 | |||
| 507 | return 0; | ||
| 508 | |||
| 509 | out_debugfs: | ||
| 510 | ceph_debugfs_cleanup(); | ||
| 511 | out: | ||
| 512 | return ret; | ||
| 513 | } | ||
| 514 | |||
| 515 | static void __exit exit_ceph_lib(void) | ||
| 516 | { | ||
| 517 | dout("exit_ceph_lib\n"); | ||
| 518 | ceph_msgr_exit(); | ||
| 519 | ceph_debugfs_cleanup(); | ||
| 520 | } | ||
| 521 | |||
| 522 | module_init(init_ceph_lib); | ||
| 523 | module_exit(exit_ceph_lib); | ||
| 524 | |||
| 525 | MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); | ||
| 526 | MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); | ||
| 527 | MODULE_AUTHOR("Patience Warnick <patience@newdream.net>"); | ||
| 528 | MODULE_DESCRIPTION("Ceph filesystem for Linux"); | ||
| 529 | MODULE_LICENSE("GPL"); | ||
diff --git a/fs/ceph/ceph_fs.c b/net/ceph/ceph_fs.c index 3ac6cc7c1156..a3a3a31d3c37 100644 --- a/fs/ceph/ceph_fs.c +++ b/net/ceph/ceph_fs.c | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Some non-inline ceph helpers | 2 | * Some non-inline ceph helpers |
| 3 | */ | 3 | */ |
| 4 | #include "types.h" | 4 | #include <linux/module.h> |
| 5 | #include <linux/ceph/types.h> | ||
| 5 | 6 | ||
| 6 | /* | 7 | /* |
| 7 | * return true if @layout appears to be valid | 8 | * return true if @layout appears to be valid |
| @@ -52,6 +53,7 @@ int ceph_flags_to_mode(int flags) | |||
| 52 | 53 | ||
| 53 | return mode; | 54 | return mode; |
| 54 | } | 55 | } |
| 56 | EXPORT_SYMBOL(ceph_flags_to_mode); | ||
| 55 | 57 | ||
| 56 | int ceph_caps_for_mode(int mode) | 58 | int ceph_caps_for_mode(int mode) |
| 57 | { | 59 | { |
| @@ -70,3 +72,4 @@ int ceph_caps_for_mode(int mode) | |||
| 70 | 72 | ||
| 71 | return caps; | 73 | return caps; |
| 72 | } | 74 | } |
| 75 | EXPORT_SYMBOL(ceph_caps_for_mode); | ||
diff --git a/fs/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index bd570015d147..815ef8826796 100644 --- a/fs/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | 1 | ||
| 2 | #include "types.h" | 2 | #include <linux/ceph/types.h> |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Robert Jenkin's hash function. | 5 | * Robert Jenkin's hash function. |
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c new file mode 100644 index 000000000000..3fbda04de29c --- /dev/null +++ b/net/ceph/ceph_strings.c | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | /* | ||
| 2 | * Ceph string constants | ||
| 3 | */ | ||
| 4 | #include <linux/module.h> | ||
| 5 | #include <linux/ceph/types.h> | ||
| 6 | |||
| 7 | const char *ceph_entity_type_name(int type) | ||
| 8 | { | ||
| 9 | switch (type) { | ||
| 10 | case CEPH_ENTITY_TYPE_MDS: return "mds"; | ||
| 11 | case CEPH_ENTITY_TYPE_OSD: return "osd"; | ||
| 12 | case CEPH_ENTITY_TYPE_MON: return "mon"; | ||
| 13 | case CEPH_ENTITY_TYPE_CLIENT: return "client"; | ||
| 14 | case CEPH_ENTITY_TYPE_AUTH: return "auth"; | ||
| 15 | default: return "unknown"; | ||
| 16 | } | ||
| 17 | } | ||
| 18 | |||
| 19 | const char *ceph_osd_op_name(int op) | ||
| 20 | { | ||
| 21 | switch (op) { | ||
| 22 | case CEPH_OSD_OP_READ: return "read"; | ||
| 23 | case CEPH_OSD_OP_STAT: return "stat"; | ||
| 24 | |||
| 25 | case CEPH_OSD_OP_MASKTRUNC: return "masktrunc"; | ||
| 26 | |||
| 27 | case CEPH_OSD_OP_WRITE: return "write"; | ||
| 28 | case CEPH_OSD_OP_DELETE: return "delete"; | ||
| 29 | case CEPH_OSD_OP_TRUNCATE: return "truncate"; | ||
| 30 | case CEPH_OSD_OP_ZERO: return "zero"; | ||
| 31 | case CEPH_OSD_OP_WRITEFULL: return "writefull"; | ||
| 32 | case CEPH_OSD_OP_ROLLBACK: return "rollback"; | ||
| 33 | |||
| 34 | case CEPH_OSD_OP_APPEND: return "append"; | ||
| 35 | case CEPH_OSD_OP_STARTSYNC: return "startsync"; | ||
| 36 | case CEPH_OSD_OP_SETTRUNC: return "settrunc"; | ||
| 37 | case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc"; | ||
| 38 | |||
| 39 | case CEPH_OSD_OP_TMAPUP: return "tmapup"; | ||
| 40 | case CEPH_OSD_OP_TMAPGET: return "tmapget"; | ||
| 41 | case CEPH_OSD_OP_TMAPPUT: return "tmapput"; | ||
| 42 | |||
| 43 | case CEPH_OSD_OP_GETXATTR: return "getxattr"; | ||
| 44 | case CEPH_OSD_OP_GETXATTRS: return "getxattrs"; | ||
| 45 | case CEPH_OSD_OP_SETXATTR: return "setxattr"; | ||
| 46 | case CEPH_OSD_OP_SETXATTRS: return "setxattrs"; | ||
| 47 | case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs"; | ||
| 48 | case CEPH_OSD_OP_RMXATTR: return "rmxattr"; | ||
| 49 | case CEPH_OSD_OP_CMPXATTR: return "cmpxattr"; | ||
| 50 | |||
| 51 | case CEPH_OSD_OP_PULL: return "pull"; | ||
| 52 | case CEPH_OSD_OP_PUSH: return "push"; | ||
| 53 | case CEPH_OSD_OP_BALANCEREADS: return "balance-reads"; | ||
| 54 | case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads"; | ||
| 55 | case CEPH_OSD_OP_SCRUB: return "scrub"; | ||
| 56 | |||
| 57 | case CEPH_OSD_OP_WRLOCK: return "wrlock"; | ||
| 58 | case CEPH_OSD_OP_WRUNLOCK: return "wrunlock"; | ||
| 59 | case CEPH_OSD_OP_RDLOCK: return "rdlock"; | ||
| 60 | case CEPH_OSD_OP_RDUNLOCK: return "rdunlock"; | ||
| 61 | case CEPH_OSD_OP_UPLOCK: return "uplock"; | ||
| 62 | case CEPH_OSD_OP_DNLOCK: return "dnlock"; | ||
| 63 | |||
| 64 | case CEPH_OSD_OP_CALL: return "call"; | ||
| 65 | |||
| 66 | case CEPH_OSD_OP_PGLS: return "pgls"; | ||
| 67 | } | ||
| 68 | return "???"; | ||
| 69 | } | ||
| 70 | |||
| 71 | |||
| 72 | const char *ceph_pool_op_name(int op) | ||
| 73 | { | ||
| 74 | switch (op) { | ||
| 75 | case POOL_OP_CREATE: return "create"; | ||
| 76 | case POOL_OP_DELETE: return "delete"; | ||
| 77 | case POOL_OP_AUID_CHANGE: return "auid change"; | ||
| 78 | case POOL_OP_CREATE_SNAP: return "create snap"; | ||
| 79 | case POOL_OP_DELETE_SNAP: return "delete snap"; | ||
| 80 | case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap"; | ||
| 81 | case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap"; | ||
| 82 | } | ||
| 83 | return "???"; | ||
| 84 | } | ||
diff --git a/fs/ceph/crush/crush.c b/net/ceph/crush/crush.c index fabd302e5779..d6ebb13a18a4 100644 --- a/fs/ceph/crush/crush.c +++ b/net/ceph/crush/crush.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | # define BUG_ON(x) assert(!(x)) | 8 | # define BUG_ON(x) assert(!(x)) |
| 9 | #endif | 9 | #endif |
| 10 | 10 | ||
| 11 | #include "crush.h" | 11 | #include <linux/crush/crush.h> |
| 12 | 12 | ||
| 13 | const char *crush_bucket_alg_name(int alg) | 13 | const char *crush_bucket_alg_name(int alg) |
| 14 | { | 14 | { |
diff --git a/fs/ceph/crush/hash.c b/net/ceph/crush/hash.c index 5873aed694bf..5bb63e37a8a1 100644 --- a/fs/ceph/crush/hash.c +++ b/net/ceph/crush/hash.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | 1 | ||
| 2 | #include <linux/types.h> | 2 | #include <linux/types.h> |
| 3 | #include "hash.h" | 3 | #include <linux/crush/hash.h> |
| 4 | 4 | ||
| 5 | /* | 5 | /* |
| 6 | * Robert Jenkins' function for mixing 32-bit values | 6 | * Robert Jenkins' function for mixing 32-bit values |
diff --git a/fs/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index a4eec133258e..42599e31dcad 100644 --- a/fs/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c | |||
| @@ -18,8 +18,8 @@ | |||
| 18 | # define kfree(x) free(x) | 18 | # define kfree(x) free(x) |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
| 21 | #include "crush.h" | 21 | #include <linux/crush/crush.h> |
| 22 | #include "hash.h" | 22 | #include <linux/crush/hash.h> |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * Implement the core CRUSH mapping algorithm. | 25 | * Implement the core CRUSH mapping algorithm. |
diff --git a/fs/ceph/crypto.c b/net/ceph/crypto.c index a3e627f63293..7b505b0c983f 100644 --- a/fs/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
| @@ -1,13 +1,13 @@ | |||
| 1 | 1 | ||
| 2 | #include "ceph_debug.h" | 2 | #include <linux/ceph/ceph_debug.h> |
| 3 | 3 | ||
| 4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| 5 | #include <linux/scatterlist.h> | 5 | #include <linux/scatterlist.h> |
| 6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
| 7 | #include <crypto/hash.h> | 7 | #include <crypto/hash.h> |
| 8 | 8 | ||
| 9 | #include <linux/ceph/decode.h> | ||
| 9 | #include "crypto.h" | 10 | #include "crypto.h" |
| 10 | #include "decode.h" | ||
| 11 | 11 | ||
| 12 | int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) | 12 | int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) |
| 13 | { | 13 | { |
diff --git a/fs/ceph/crypto.h b/net/ceph/crypto.h index bdf38607323c..f9eccace592b 100644 --- a/fs/ceph/crypto.h +++ b/net/ceph/crypto.h | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | #ifndef _FS_CEPH_CRYPTO_H | 1 | #ifndef _FS_CEPH_CRYPTO_H |
| 2 | #define _FS_CEPH_CRYPTO_H | 2 | #define _FS_CEPH_CRYPTO_H |
| 3 | 3 | ||
| 4 | #include "types.h" | 4 | #include <linux/ceph/types.h> |
| 5 | #include "buffer.h" | 5 | #include <linux/ceph/buffer.h> |
| 6 | 6 | ||
| 7 | /* | 7 | /* |
| 8 | * cryptographic secret | 8 | * cryptographic secret |
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c new file mode 100644 index 000000000000..27d4ea315d12 --- /dev/null +++ b/net/ceph/debugfs.c | |||
| @@ -0,0 +1,267 @@ | |||
| 1 | #include <linux/ceph/ceph_debug.h> | ||
| 2 | |||
| 3 | #include <linux/device.h> | ||
| 4 | #include <linux/slab.h> | ||
| 5 | #include <linux/module.h> | ||
| 6 | #include <linux/ctype.h> | ||
| 7 | #include <linux/debugfs.h> | ||
| 8 | #include <linux/seq_file.h> | ||
| 9 | |||
| 10 | #include <linux/ceph/libceph.h> | ||
| 11 | #include <linux/ceph/mon_client.h> | ||
| 12 | #include <linux/ceph/auth.h> | ||
| 13 | #include <linux/ceph/debugfs.h> | ||
| 14 | |||
| 15 | #ifdef CONFIG_DEBUG_FS | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Implement /sys/kernel/debug/ceph fun | ||
| 19 | * | ||
| 20 | * /sys/kernel/debug/ceph/client* - an instance of the ceph client | ||
| 21 | * .../osdmap - current osdmap | ||
| 22 | * .../monmap - current monmap | ||
| 23 | * .../osdc - active osd requests | ||
| 24 | * .../monc - mon client state | ||
| 25 | * .../dentry_lru - dump contents of dentry lru | ||
| 26 | * .../caps - expose cap (reservation) stats | ||
| 27 | * .../bdi - symlink to ../../bdi/something | ||
| 28 | */ | ||
| 29 | |||
| 30 | static struct dentry *ceph_debugfs_dir; | ||
| 31 | |||
| 32 | static int monmap_show(struct seq_file *s, void *p) | ||
| 33 | { | ||
| 34 | int i; | ||
| 35 | struct ceph_client *client = s->private; | ||
| 36 | |||
| 37 | if (client->monc.monmap == NULL) | ||
| 38 | return 0; | ||
| 39 | |||
| 40 | seq_printf(s, "epoch %d\n", client->monc.monmap->epoch); | ||
| 41 | for (i = 0; i < client->monc.monmap->num_mon; i++) { | ||
| 42 | struct ceph_entity_inst *inst = | ||
| 43 | &client->monc.monmap->mon_inst[i]; | ||
| 44 | |||
| 45 | seq_printf(s, "\t%s%lld\t%s\n", | ||
| 46 | ENTITY_NAME(inst->name), | ||
| 47 | ceph_pr_addr(&inst->addr.in_addr)); | ||
| 48 | } | ||
| 49 | return 0; | ||
| 50 | } | ||
| 51 | |||
| 52 | static int osdmap_show(struct seq_file *s, void *p) | ||
| 53 | { | ||
| 54 | int i; | ||
| 55 | struct ceph_client *client = s->private; | ||
| 56 | struct rb_node *n; | ||
| 57 | |||
| 58 | if (client->osdc.osdmap == NULL) | ||
| 59 | return 0; | ||
| 60 | seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch); | ||
| 61 | seq_printf(s, "flags%s%s\n", | ||
| 62 | (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ? | ||
| 63 | " NEARFULL" : "", | ||
| 64 | (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ? | ||
| 65 | " FULL" : ""); | ||
| 66 | for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { | ||
| 67 | struct ceph_pg_pool_info *pool = | ||
| 68 | rb_entry(n, struct ceph_pg_pool_info, node); | ||
| 69 | seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", | ||
| 70 | pool->id, pool->v.pg_num, pool->pg_num_mask, | ||
| 71 | pool->v.lpg_num, pool->lpg_num_mask); | ||
| 72 | } | ||
| 73 | for (i = 0; i < client->osdc.osdmap->max_osd; i++) { | ||
| 74 | struct ceph_entity_addr *addr = | ||
| 75 | &client->osdc.osdmap->osd_addr[i]; | ||
| 76 | int state = client->osdc.osdmap->osd_state[i]; | ||
| 77 | char sb[64]; | ||
| 78 | |||
| 79 | seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n", | ||
| 80 | i, ceph_pr_addr(&addr->in_addr), | ||
| 81 | ((client->osdc.osdmap->osd_weight[i]*100) >> 16), | ||
| 82 | ceph_osdmap_state_str(sb, sizeof(sb), state)); | ||
| 83 | } | ||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | static int monc_show(struct seq_file *s, void *p) | ||
| 88 | { | ||
| 89 | struct ceph_client *client = s->private; | ||
| 90 | struct ceph_mon_generic_request *req; | ||
| 91 | struct ceph_mon_client *monc = &client->monc; | ||
| 92 | struct rb_node *rp; | ||
| 93 | |||
| 94 | mutex_lock(&monc->mutex); | ||
| 95 | |||
| 96 | if (monc->have_mdsmap) | ||
| 97 | seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); | ||
| 98 | if (monc->have_osdmap) | ||
| 99 | seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); | ||
| 100 | if (monc->want_next_osdmap) | ||
| 101 | seq_printf(s, "want next osdmap\n"); | ||
| 102 | |||
| 103 | for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { | ||
| 104 | __u16 op; | ||
| 105 | req = rb_entry(rp, struct ceph_mon_generic_request, node); | ||
| 106 | op = le16_to_cpu(req->request->hdr.type); | ||
| 107 | if (op == CEPH_MSG_STATFS) | ||
| 108 | seq_printf(s, "%lld statfs\n", req->tid); | ||
| 109 | else | ||
| 110 | seq_printf(s, "%lld unknown\n", req->tid); | ||
| 111 | } | ||
| 112 | |||
| 113 | mutex_unlock(&monc->mutex); | ||
| 114 | return 0; | ||
| 115 | } | ||
| 116 | |||
| 117 | static int osdc_show(struct seq_file *s, void *pp) | ||
| 118 | { | ||
| 119 | struct ceph_client *client = s->private; | ||
| 120 | struct ceph_osd_client *osdc = &client->osdc; | ||
| 121 | struct rb_node *p; | ||
| 122 | |||
| 123 | mutex_lock(&osdc->request_mutex); | ||
| 124 | for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { | ||
| 125 | struct ceph_osd_request *req; | ||
| 126 | struct ceph_osd_request_head *head; | ||
| 127 | struct ceph_osd_op *op; | ||
| 128 | int num_ops; | ||
| 129 | int opcode, olen; | ||
| 130 | int i; | ||
| 131 | |||
| 132 | req = rb_entry(p, struct ceph_osd_request, r_node); | ||
| 133 | |||
| 134 | seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid, | ||
| 135 | req->r_osd ? req->r_osd->o_osd : -1, | ||
| 136 | le32_to_cpu(req->r_pgid.pool), | ||
| 137 | le16_to_cpu(req->r_pgid.ps)); | ||
| 138 | |||
| 139 | head = req->r_request->front.iov_base; | ||
| 140 | op = (void *)(head + 1); | ||
| 141 | |||
| 142 | num_ops = le16_to_cpu(head->num_ops); | ||
| 143 | olen = le32_to_cpu(head->object_len); | ||
| 144 | seq_printf(s, "%.*s", olen, | ||
| 145 | (const char *)(head->ops + num_ops)); | ||
| 146 | |||
| 147 | if (req->r_reassert_version.epoch) | ||
| 148 | seq_printf(s, "\t%u'%llu", | ||
| 149 | (unsigned)le32_to_cpu(req->r_reassert_version.epoch), | ||
| 150 | le64_to_cpu(req->r_reassert_version.version)); | ||
| 151 | else | ||
| 152 | seq_printf(s, "\t"); | ||
| 153 | |||
| 154 | for (i = 0; i < num_ops; i++) { | ||
| 155 | opcode = le16_to_cpu(op->op); | ||
| 156 | seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); | ||
| 157 | op++; | ||
| 158 | } | ||
| 159 | |||
| 160 | seq_printf(s, "\n"); | ||
| 161 | } | ||
| 162 | mutex_unlock(&osdc->request_mutex); | ||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | |||
| 166 | CEPH_DEFINE_SHOW_FUNC(monmap_show) | ||
| 167 | CEPH_DEFINE_SHOW_FUNC(osdmap_show) | ||
| 168 | CEPH_DEFINE_SHOW_FUNC(monc_show) | ||
| 169 | CEPH_DEFINE_SHOW_FUNC(osdc_show) | ||
| 170 | |||
| 171 | int ceph_debugfs_init(void) | ||
| 172 | { | ||
| 173 | ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); | ||
| 174 | if (!ceph_debugfs_dir) | ||
| 175 | return -ENOMEM; | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | void ceph_debugfs_cleanup(void) | ||
| 180 | { | ||
| 181 | debugfs_remove(ceph_debugfs_dir); | ||
| 182 | } | ||
| 183 | |||
| 184 | int ceph_debugfs_client_init(struct ceph_client *client) | ||
| 185 | { | ||
| 186 | int ret = -ENOMEM; | ||
| 187 | char name[80]; | ||
| 188 | |||
| 189 | snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, | ||
| 190 | client->monc.auth->global_id); | ||
| 191 | |||
| 192 | client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); | ||
| 193 | if (!client->debugfs_dir) | ||
| 194 | goto out; | ||
| 195 | |||
| 196 | client->monc.debugfs_file = debugfs_create_file("monc", | ||
| 197 | 0600, | ||
| 198 | client->debugfs_dir, | ||
| 199 | client, | ||
| 200 | &monc_show_fops); | ||
| 201 | if (!client->monc.debugfs_file) | ||
| 202 | goto out; | ||
| 203 | |||
| 204 | client->osdc.debugfs_file = debugfs_create_file("osdc", | ||
| 205 | 0600, | ||
| 206 | client->debugfs_dir, | ||
| 207 | client, | ||
| 208 | &osdc_show_fops); | ||
| 209 | if (!client->osdc.debugfs_file) | ||
| 210 | goto out; | ||
| 211 | |||
| 212 | client->debugfs_monmap = debugfs_create_file("monmap", | ||
| 213 | 0600, | ||
| 214 | client->debugfs_dir, | ||
| 215 | client, | ||
| 216 | &monmap_show_fops); | ||
| 217 | if (!client->debugfs_monmap) | ||
| 218 | goto out; | ||
| 219 | |||
| 220 | client->debugfs_osdmap = debugfs_create_file("osdmap", | ||
| 221 | 0600, | ||
| 222 | client->debugfs_dir, | ||
| 223 | client, | ||
| 224 | &osdmap_show_fops); | ||
| 225 | if (!client->debugfs_osdmap) | ||
| 226 | goto out; | ||
| 227 | |||
| 228 | return 0; | ||
| 229 | |||
| 230 | out: | ||
| 231 | ceph_debugfs_client_cleanup(client); | ||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | void ceph_debugfs_client_cleanup(struct ceph_client *client) | ||
| 236 | { | ||
| 237 | debugfs_remove(client->debugfs_osdmap); | ||
| 238 | debugfs_remove(client->debugfs_monmap); | ||
| 239 | debugfs_remove(client->osdc.debugfs_file); | ||
| 240 | debugfs_remove(client->monc.debugfs_file); | ||
| 241 | debugfs_remove(client->debugfs_dir); | ||
| 242 | } | ||
| 243 | |||
| 244 | #else /* CONFIG_DEBUG_FS */ | ||
| 245 | |||
| 246 | int ceph_debugfs_init(void) | ||
| 247 | { | ||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | |||
| 251 | void ceph_debugfs_cleanup(void) | ||
| 252 | { | ||
| 253 | } | ||
| 254 | |||
| 255 | int ceph_debugfs_client_init(struct ceph_client *client) | ||
| 256 | { | ||
| 257 | return 0; | ||
| 258 | } | ||
| 259 | |||
| 260 | void ceph_debugfs_client_cleanup(struct ceph_client *client) | ||
| 261 | { | ||
| 262 | } | ||
| 263 | |||
| 264 | #endif /* CONFIG_DEBUG_FS */ | ||
| 265 | |||
| 266 | EXPORT_SYMBOL(ceph_debugfs_init); | ||
| 267 | EXPORT_SYMBOL(ceph_debugfs_cleanup); | ||
diff --git a/fs/ceph/messenger.c b/net/ceph/messenger.c index 2502d76fcec1..0e8157ee5d43 100644 --- a/fs/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/crc32c.h> | 3 | #include <linux/crc32c.h> |
| 4 | #include <linux/ctype.h> | 4 | #include <linux/ctype.h> |
| @@ -9,12 +9,14 @@ | |||
| 9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
| 10 | #include <linux/socket.h> | 10 | #include <linux/socket.h> |
| 11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
| 12 | #include <linux/bio.h> | ||
| 13 | #include <linux/blkdev.h> | ||
| 12 | #include <net/tcp.h> | 14 | #include <net/tcp.h> |
| 13 | 15 | ||
| 14 | #include "super.h" | 16 | #include <linux/ceph/libceph.h> |
| 15 | #include "messenger.h" | 17 | #include <linux/ceph/messenger.h> |
| 16 | #include "decode.h" | 18 | #include <linux/ceph/decode.h> |
| 17 | #include "pagelist.h" | 19 | #include <linux/ceph/pagelist.h> |
| 18 | 20 | ||
| 19 | /* | 21 | /* |
| 20 | * Ceph uses the messenger to exchange ceph_msg messages with other | 22 | * Ceph uses the messenger to exchange ceph_msg messages with other |
| @@ -48,7 +50,7 @@ static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN]; | |||
| 48 | static DEFINE_SPINLOCK(addr_str_lock); | 50 | static DEFINE_SPINLOCK(addr_str_lock); |
| 49 | static int last_addr_str; | 51 | static int last_addr_str; |
| 50 | 52 | ||
| 51 | const char *pr_addr(const struct sockaddr_storage *ss) | 53 | const char *ceph_pr_addr(const struct sockaddr_storage *ss) |
| 52 | { | 54 | { |
| 53 | int i; | 55 | int i; |
| 54 | char *s; | 56 | char *s; |
| @@ -79,6 +81,7 @@ const char *pr_addr(const struct sockaddr_storage *ss) | |||
| 79 | 81 | ||
| 80 | return s; | 82 | return s; |
| 81 | } | 83 | } |
| 84 | EXPORT_SYMBOL(ceph_pr_addr); | ||
| 82 | 85 | ||
| 83 | static void encode_my_addr(struct ceph_messenger *msgr) | 86 | static void encode_my_addr(struct ceph_messenger *msgr) |
| 84 | { | 87 | { |
| @@ -91,7 +94,7 @@ static void encode_my_addr(struct ceph_messenger *msgr) | |||
| 91 | */ | 94 | */ |
| 92 | struct workqueue_struct *ceph_msgr_wq; | 95 | struct workqueue_struct *ceph_msgr_wq; |
| 93 | 96 | ||
| 94 | int __init ceph_msgr_init(void) | 97 | int ceph_msgr_init(void) |
| 95 | { | 98 | { |
| 96 | ceph_msgr_wq = create_workqueue("ceph-msgr"); | 99 | ceph_msgr_wq = create_workqueue("ceph-msgr"); |
| 97 | if (IS_ERR(ceph_msgr_wq)) { | 100 | if (IS_ERR(ceph_msgr_wq)) { |
| @@ -102,16 +105,19 @@ int __init ceph_msgr_init(void) | |||
| 102 | } | 105 | } |
| 103 | return 0; | 106 | return 0; |
| 104 | } | 107 | } |
| 108 | EXPORT_SYMBOL(ceph_msgr_init); | ||
| 105 | 109 | ||
| 106 | void ceph_msgr_exit(void) | 110 | void ceph_msgr_exit(void) |
| 107 | { | 111 | { |
| 108 | destroy_workqueue(ceph_msgr_wq); | 112 | destroy_workqueue(ceph_msgr_wq); |
| 109 | } | 113 | } |
| 114 | EXPORT_SYMBOL(ceph_msgr_exit); | ||
| 110 | 115 | ||
| 111 | void ceph_msgr_flush(void) | 116 | void ceph_msgr_flush(void) |
| 112 | { | 117 | { |
| 113 | flush_workqueue(ceph_msgr_wq); | 118 | flush_workqueue(ceph_msgr_wq); |
| 114 | } | 119 | } |
| 120 | EXPORT_SYMBOL(ceph_msgr_flush); | ||
| 115 | 121 | ||
| 116 | 122 | ||
| 117 | /* | 123 | /* |
| @@ -221,19 +227,19 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con) | |||
| 221 | 227 | ||
| 222 | set_sock_callbacks(sock, con); | 228 | set_sock_callbacks(sock, con); |
| 223 | 229 | ||
| 224 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); | 230 | dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); |
| 225 | 231 | ||
| 226 | ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), | 232 | ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), |
| 227 | O_NONBLOCK); | 233 | O_NONBLOCK); |
| 228 | if (ret == -EINPROGRESS) { | 234 | if (ret == -EINPROGRESS) { |
| 229 | dout("connect %s EINPROGRESS sk_state = %u\n", | 235 | dout("connect %s EINPROGRESS sk_state = %u\n", |
| 230 | pr_addr(&con->peer_addr.in_addr), | 236 | ceph_pr_addr(&con->peer_addr.in_addr), |
| 231 | sock->sk->sk_state); | 237 | sock->sk->sk_state); |
| 232 | ret = 0; | 238 | ret = 0; |
| 233 | } | 239 | } |
| 234 | if (ret < 0) { | 240 | if (ret < 0) { |
| 235 | pr_err("connect %s error %d\n", | 241 | pr_err("connect %s error %d\n", |
| 236 | pr_addr(&con->peer_addr.in_addr), ret); | 242 | ceph_pr_addr(&con->peer_addr.in_addr), ret); |
| 237 | sock_release(sock); | 243 | sock_release(sock); |
| 238 | con->sock = NULL; | 244 | con->sock = NULL; |
| 239 | con->error_msg = "connect error"; | 245 | con->error_msg = "connect error"; |
| @@ -334,7 +340,8 @@ static void reset_connection(struct ceph_connection *con) | |||
| 334 | */ | 340 | */ |
| 335 | void ceph_con_close(struct ceph_connection *con) | 341 | void ceph_con_close(struct ceph_connection *con) |
| 336 | { | 342 | { |
| 337 | dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr)); | 343 | dout("con_close %p peer %s\n", con, |
| 344 | ceph_pr_addr(&con->peer_addr.in_addr)); | ||
| 338 | set_bit(CLOSED, &con->state); /* in case there's queued work */ | 345 | set_bit(CLOSED, &con->state); /* in case there's queued work */ |
| 339 | clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ | 346 | clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ |
| 340 | clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ | 347 | clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ |
| @@ -347,19 +354,21 @@ void ceph_con_close(struct ceph_connection *con) | |||
| 347 | mutex_unlock(&con->mutex); | 354 | mutex_unlock(&con->mutex); |
| 348 | queue_con(con); | 355 | queue_con(con); |
| 349 | } | 356 | } |
| 357 | EXPORT_SYMBOL(ceph_con_close); | ||
| 350 | 358 | ||
| 351 | /* | 359 | /* |
| 352 | * Reopen a closed connection, with a new peer address. | 360 | * Reopen a closed connection, with a new peer address. |
| 353 | */ | 361 | */ |
| 354 | void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) | 362 | void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) |
| 355 | { | 363 | { |
| 356 | dout("con_open %p %s\n", con, pr_addr(&addr->in_addr)); | 364 | dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); |
| 357 | set_bit(OPENING, &con->state); | 365 | set_bit(OPENING, &con->state); |
| 358 | clear_bit(CLOSED, &con->state); | 366 | clear_bit(CLOSED, &con->state); |
| 359 | memcpy(&con->peer_addr, addr, sizeof(*addr)); | 367 | memcpy(&con->peer_addr, addr, sizeof(*addr)); |
| 360 | con->delay = 0; /* reset backoff memory */ | 368 | con->delay = 0; /* reset backoff memory */ |
| 361 | queue_con(con); | 369 | queue_con(con); |
| 362 | } | 370 | } |
| 371 | EXPORT_SYMBOL(ceph_con_open); | ||
| 363 | 372 | ||
| 364 | /* | 373 | /* |
| 365 | * return true if this connection ever successfully opened | 374 | * return true if this connection ever successfully opened |
| @@ -406,6 +415,7 @@ void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) | |||
| 406 | INIT_LIST_HEAD(&con->out_sent); | 415 | INIT_LIST_HEAD(&con->out_sent); |
| 407 | INIT_DELAYED_WORK(&con->work, con_work); | 416 | INIT_DELAYED_WORK(&con->work, con_work); |
| 408 | } | 417 | } |
| 418 | EXPORT_SYMBOL(ceph_con_init); | ||
| 409 | 419 | ||
| 410 | 420 | ||
| 411 | /* | 421 | /* |
| @@ -529,8 +539,11 @@ static void prepare_write_message(struct ceph_connection *con) | |||
| 529 | if (le32_to_cpu(m->hdr.data_len) > 0) { | 539 | if (le32_to_cpu(m->hdr.data_len) > 0) { |
| 530 | /* initialize page iterator */ | 540 | /* initialize page iterator */ |
| 531 | con->out_msg_pos.page = 0; | 541 | con->out_msg_pos.page = 0; |
| 532 | con->out_msg_pos.page_pos = | 542 | if (m->pages) |
| 533 | le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK; | 543 | con->out_msg_pos.page_pos = |
| 544 | le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK; | ||
| 545 | else | ||
| 546 | con->out_msg_pos.page_pos = 0; | ||
| 534 | con->out_msg_pos.data_pos = 0; | 547 | con->out_msg_pos.data_pos = 0; |
| 535 | con->out_msg_pos.did_page_crc = 0; | 548 | con->out_msg_pos.did_page_crc = 0; |
| 536 | con->out_more = 1; /* data + footer will follow */ | 549 | con->out_more = 1; /* data + footer will follow */ |
| @@ -647,7 +660,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr, | |||
| 647 | dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, | 660 | dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, |
| 648 | con->connect_seq, global_seq, proto); | 661 | con->connect_seq, global_seq, proto); |
| 649 | 662 | ||
| 650 | con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED); | 663 | con->out_connect.features = cpu_to_le64(msgr->supported_features); |
| 651 | con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); | 664 | con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); |
| 652 | con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); | 665 | con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); |
| 653 | con->out_connect.global_seq = cpu_to_le32(global_seq); | 666 | con->out_connect.global_seq = cpu_to_le32(global_seq); |
| @@ -712,6 +725,31 @@ out: | |||
| 712 | return ret; /* done! */ | 725 | return ret; /* done! */ |
| 713 | } | 726 | } |
| 714 | 727 | ||
| 728 | #ifdef CONFIG_BLOCK | ||
| 729 | static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) | ||
| 730 | { | ||
| 731 | if (!bio) { | ||
| 732 | *iter = NULL; | ||
| 733 | *seg = 0; | ||
| 734 | return; | ||
| 735 | } | ||
| 736 | *iter = bio; | ||
| 737 | *seg = bio->bi_idx; | ||
| 738 | } | ||
| 739 | |||
| 740 | static void iter_bio_next(struct bio **bio_iter, int *seg) | ||
| 741 | { | ||
| 742 | if (*bio_iter == NULL) | ||
| 743 | return; | ||
| 744 | |||
| 745 | BUG_ON(*seg >= (*bio_iter)->bi_vcnt); | ||
| 746 | |||
| 747 | (*seg)++; | ||
| 748 | if (*seg == (*bio_iter)->bi_vcnt) | ||
| 749 | init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); | ||
| 750 | } | ||
| 751 | #endif | ||
| 752 | |||
| 715 | /* | 753 | /* |
| 716 | * Write as much message data payload as we can. If we finish, queue | 754 | * Write as much message data payload as we can. If we finish, queue |
| 717 | * up the footer. | 755 | * up the footer. |
| @@ -726,21 +764,46 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
| 726 | size_t len; | 764 | size_t len; |
| 727 | int crc = con->msgr->nocrc; | 765 | int crc = con->msgr->nocrc; |
| 728 | int ret; | 766 | int ret; |
| 767 | int total_max_write; | ||
| 768 | int in_trail = 0; | ||
| 769 | size_t trail_len = (msg->trail ? msg->trail->length : 0); | ||
| 729 | 770 | ||
| 730 | dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", | 771 | dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", |
| 731 | con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, | 772 | con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, |
| 732 | con->out_msg_pos.page_pos); | 773 | con->out_msg_pos.page_pos); |
| 733 | 774 | ||
| 734 | while (con->out_msg_pos.page < con->out_msg->nr_pages) { | 775 | #ifdef CONFIG_BLOCK |
| 776 | if (msg->bio && !msg->bio_iter) | ||
| 777 | init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); | ||
| 778 | #endif | ||
| 779 | |||
| 780 | while (data_len > con->out_msg_pos.data_pos) { | ||
| 735 | struct page *page = NULL; | 781 | struct page *page = NULL; |
| 736 | void *kaddr = NULL; | 782 | void *kaddr = NULL; |
| 783 | int max_write = PAGE_SIZE; | ||
| 784 | int page_shift = 0; | ||
| 785 | |||
| 786 | total_max_write = data_len - trail_len - | ||
| 787 | con->out_msg_pos.data_pos; | ||
| 737 | 788 | ||
| 738 | /* | 789 | /* |
| 739 | * if we are calculating the data crc (the default), we need | 790 | * if we are calculating the data crc (the default), we need |
| 740 | * to map the page. if our pages[] has been revoked, use the | 791 | * to map the page. if our pages[] has been revoked, use the |
| 741 | * zero page. | 792 | * zero page. |
| 742 | */ | 793 | */ |
| 743 | if (msg->pages) { | 794 | |
| 795 | /* have we reached the trail part of the data? */ | ||
| 796 | if (con->out_msg_pos.data_pos >= data_len - trail_len) { | ||
| 797 | in_trail = 1; | ||
| 798 | |||
| 799 | total_max_write = data_len - con->out_msg_pos.data_pos; | ||
| 800 | |||
| 801 | page = list_first_entry(&msg->trail->head, | ||
| 802 | struct page, lru); | ||
| 803 | if (crc) | ||
| 804 | kaddr = kmap(page); | ||
| 805 | max_write = PAGE_SIZE; | ||
| 806 | } else if (msg->pages) { | ||
| 744 | page = msg->pages[con->out_msg_pos.page]; | 807 | page = msg->pages[con->out_msg_pos.page]; |
| 745 | if (crc) | 808 | if (crc) |
| 746 | kaddr = kmap(page); | 809 | kaddr = kmap(page); |
| @@ -749,13 +812,25 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
| 749 | struct page, lru); | 812 | struct page, lru); |
| 750 | if (crc) | 813 | if (crc) |
| 751 | kaddr = kmap(page); | 814 | kaddr = kmap(page); |
| 815 | #ifdef CONFIG_BLOCK | ||
| 816 | } else if (msg->bio) { | ||
| 817 | struct bio_vec *bv; | ||
| 818 | |||
| 819 | bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); | ||
| 820 | page = bv->bv_page; | ||
| 821 | page_shift = bv->bv_offset; | ||
| 822 | if (crc) | ||
| 823 | kaddr = kmap(page) + page_shift; | ||
| 824 | max_write = bv->bv_len; | ||
| 825 | #endif | ||
| 752 | } else { | 826 | } else { |
| 753 | page = con->msgr->zero_page; | 827 | page = con->msgr->zero_page; |
| 754 | if (crc) | 828 | if (crc) |
| 755 | kaddr = page_address(con->msgr->zero_page); | 829 | kaddr = page_address(con->msgr->zero_page); |
| 756 | } | 830 | } |
| 757 | len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos), | 831 | len = min_t(int, max_write - con->out_msg_pos.page_pos, |
| 758 | (int)(data_len - con->out_msg_pos.data_pos)); | 832 | total_max_write); |
| 833 | |||
| 759 | if (crc && !con->out_msg_pos.did_page_crc) { | 834 | if (crc && !con->out_msg_pos.did_page_crc) { |
| 760 | void *base = kaddr + con->out_msg_pos.page_pos; | 835 | void *base = kaddr + con->out_msg_pos.page_pos; |
| 761 | u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); | 836 | u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); |
| @@ -765,13 +840,14 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
| 765 | cpu_to_le32(crc32c(tmpcrc, base, len)); | 840 | cpu_to_le32(crc32c(tmpcrc, base, len)); |
| 766 | con->out_msg_pos.did_page_crc = 1; | 841 | con->out_msg_pos.did_page_crc = 1; |
| 767 | } | 842 | } |
| 768 | |||
| 769 | ret = kernel_sendpage(con->sock, page, | 843 | ret = kernel_sendpage(con->sock, page, |
| 770 | con->out_msg_pos.page_pos, len, | 844 | con->out_msg_pos.page_pos + page_shift, |
| 845 | len, | ||
| 771 | MSG_DONTWAIT | MSG_NOSIGNAL | | 846 | MSG_DONTWAIT | MSG_NOSIGNAL | |
| 772 | MSG_MORE); | 847 | MSG_MORE); |
| 773 | 848 | ||
| 774 | if (crc && (msg->pages || msg->pagelist)) | 849 | if (crc && |
| 850 | (msg->pages || msg->pagelist || msg->bio || in_trail)) | ||
| 775 | kunmap(page); | 851 | kunmap(page); |
| 776 | 852 | ||
| 777 | if (ret <= 0) | 853 | if (ret <= 0) |
| @@ -783,9 +859,16 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
| 783 | con->out_msg_pos.page_pos = 0; | 859 | con->out_msg_pos.page_pos = 0; |
| 784 | con->out_msg_pos.page++; | 860 | con->out_msg_pos.page++; |
| 785 | con->out_msg_pos.did_page_crc = 0; | 861 | con->out_msg_pos.did_page_crc = 0; |
| 786 | if (msg->pagelist) | 862 | if (in_trail) |
| 863 | list_move_tail(&page->lru, | ||
| 864 | &msg->trail->head); | ||
| 865 | else if (msg->pagelist) | ||
| 787 | list_move_tail(&page->lru, | 866 | list_move_tail(&page->lru, |
| 788 | &msg->pagelist->head); | 867 | &msg->pagelist->head); |
| 868 | #ifdef CONFIG_BLOCK | ||
| 869 | else if (msg->bio) | ||
| 870 | iter_bio_next(&msg->bio_iter, &msg->bio_seg); | ||
| 871 | #endif | ||
| 789 | } | 872 | } |
| 790 | } | 873 | } |
| 791 | 874 | ||
| @@ -938,7 +1021,7 @@ static int verify_hello(struct ceph_connection *con) | |||
| 938 | { | 1021 | { |
| 939 | if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { | 1022 | if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { |
| 940 | pr_err("connect to %s got bad banner\n", | 1023 | pr_err("connect to %s got bad banner\n", |
| 941 | pr_addr(&con->peer_addr.in_addr)); | 1024 | ceph_pr_addr(&con->peer_addr.in_addr)); |
| 942 | con->error_msg = "protocol error, bad banner"; | 1025 | con->error_msg = "protocol error, bad banner"; |
| 943 | return -1; | 1026 | return -1; |
| 944 | } | 1027 | } |
| @@ -1041,7 +1124,7 @@ int ceph_parse_ips(const char *c, const char *end, | |||
| 1041 | 1124 | ||
| 1042 | addr_set_port(ss, port); | 1125 | addr_set_port(ss, port); |
| 1043 | 1126 | ||
| 1044 | dout("parse_ips got %s\n", pr_addr(ss)); | 1127 | dout("parse_ips got %s\n", ceph_pr_addr(ss)); |
| 1045 | 1128 | ||
| 1046 | if (p == end) | 1129 | if (p == end) |
| 1047 | break; | 1130 | break; |
| @@ -1061,6 +1144,7 @@ bad: | |||
| 1061 | pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); | 1144 | pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); |
| 1062 | return -EINVAL; | 1145 | return -EINVAL; |
| 1063 | } | 1146 | } |
| 1147 | EXPORT_SYMBOL(ceph_parse_ips); | ||
| 1064 | 1148 | ||
| 1065 | static int process_banner(struct ceph_connection *con) | 1149 | static int process_banner(struct ceph_connection *con) |
| 1066 | { | 1150 | { |
| @@ -1082,9 +1166,9 @@ static int process_banner(struct ceph_connection *con) | |||
| 1082 | !(addr_is_blank(&con->actual_peer_addr.in_addr) && | 1166 | !(addr_is_blank(&con->actual_peer_addr.in_addr) && |
| 1083 | con->actual_peer_addr.nonce == con->peer_addr.nonce)) { | 1167 | con->actual_peer_addr.nonce == con->peer_addr.nonce)) { |
| 1084 | pr_warning("wrong peer, want %s/%d, got %s/%d\n", | 1168 | pr_warning("wrong peer, want %s/%d, got %s/%d\n", |
| 1085 | pr_addr(&con->peer_addr.in_addr), | 1169 | ceph_pr_addr(&con->peer_addr.in_addr), |
| 1086 | (int)le32_to_cpu(con->peer_addr.nonce), | 1170 | (int)le32_to_cpu(con->peer_addr.nonce), |
| 1087 | pr_addr(&con->actual_peer_addr.in_addr), | 1171 | ceph_pr_addr(&con->actual_peer_addr.in_addr), |
| 1088 | (int)le32_to_cpu(con->actual_peer_addr.nonce)); | 1172 | (int)le32_to_cpu(con->actual_peer_addr.nonce)); |
| 1089 | con->error_msg = "wrong peer at address"; | 1173 | con->error_msg = "wrong peer at address"; |
| 1090 | return -1; | 1174 | return -1; |
| @@ -1102,7 +1186,7 @@ static int process_banner(struct ceph_connection *con) | |||
| 1102 | addr_set_port(&con->msgr->inst.addr.in_addr, port); | 1186 | addr_set_port(&con->msgr->inst.addr.in_addr, port); |
| 1103 | encode_my_addr(con->msgr); | 1187 | encode_my_addr(con->msgr); |
| 1104 | dout("process_banner learned my addr is %s\n", | 1188 | dout("process_banner learned my addr is %s\n", |
| 1105 | pr_addr(&con->msgr->inst.addr.in_addr)); | 1189 | ceph_pr_addr(&con->msgr->inst.addr.in_addr)); |
| 1106 | } | 1190 | } |
| 1107 | 1191 | ||
| 1108 | set_bit(NEGOTIATING, &con->state); | 1192 | set_bit(NEGOTIATING, &con->state); |
| @@ -1123,8 +1207,8 @@ static void fail_protocol(struct ceph_connection *con) | |||
| 1123 | 1207 | ||
| 1124 | static int process_connect(struct ceph_connection *con) | 1208 | static int process_connect(struct ceph_connection *con) |
| 1125 | { | 1209 | { |
| 1126 | u64 sup_feat = CEPH_FEATURE_SUPPORTED; | 1210 | u64 sup_feat = con->msgr->supported_features; |
| 1127 | u64 req_feat = CEPH_FEATURE_REQUIRED; | 1211 | u64 req_feat = con->msgr->required_features; |
| 1128 | u64 server_feat = le64_to_cpu(con->in_reply.features); | 1212 | u64 server_feat = le64_to_cpu(con->in_reply.features); |
| 1129 | 1213 | ||
| 1130 | dout("process_connect on %p tag %d\n", con, (int)con->in_tag); | 1214 | dout("process_connect on %p tag %d\n", con, (int)con->in_tag); |
| @@ -1134,7 +1218,7 @@ static int process_connect(struct ceph_connection *con) | |||
| 1134 | pr_err("%s%lld %s feature set mismatch," | 1218 | pr_err("%s%lld %s feature set mismatch," |
| 1135 | " my %llx < server's %llx, missing %llx\n", | 1219 | " my %llx < server's %llx, missing %llx\n", |
| 1136 | ENTITY_NAME(con->peer_name), | 1220 | ENTITY_NAME(con->peer_name), |
| 1137 | pr_addr(&con->peer_addr.in_addr), | 1221 | ceph_pr_addr(&con->peer_addr.in_addr), |
| 1138 | sup_feat, server_feat, server_feat & ~sup_feat); | 1222 | sup_feat, server_feat, server_feat & ~sup_feat); |
| 1139 | con->error_msg = "missing required protocol features"; | 1223 | con->error_msg = "missing required protocol features"; |
| 1140 | fail_protocol(con); | 1224 | fail_protocol(con); |
| @@ -1144,7 +1228,7 @@ static int process_connect(struct ceph_connection *con) | |||
| 1144 | pr_err("%s%lld %s protocol version mismatch," | 1228 | pr_err("%s%lld %s protocol version mismatch," |
| 1145 | " my %d != server's %d\n", | 1229 | " my %d != server's %d\n", |
| 1146 | ENTITY_NAME(con->peer_name), | 1230 | ENTITY_NAME(con->peer_name), |
| 1147 | pr_addr(&con->peer_addr.in_addr), | 1231 | ceph_pr_addr(&con->peer_addr.in_addr), |
| 1148 | le32_to_cpu(con->out_connect.protocol_version), | 1232 | le32_to_cpu(con->out_connect.protocol_version), |
| 1149 | le32_to_cpu(con->in_reply.protocol_version)); | 1233 | le32_to_cpu(con->in_reply.protocol_version)); |
| 1150 | con->error_msg = "protocol version mismatch"; | 1234 | con->error_msg = "protocol version mismatch"; |
| @@ -1178,7 +1262,7 @@ static int process_connect(struct ceph_connection *con) | |||
| 1178 | le32_to_cpu(con->in_connect.connect_seq)); | 1262 | le32_to_cpu(con->in_connect.connect_seq)); |
| 1179 | pr_err("%s%lld %s connection reset\n", | 1263 | pr_err("%s%lld %s connection reset\n", |
| 1180 | ENTITY_NAME(con->peer_name), | 1264 | ENTITY_NAME(con->peer_name), |
| 1181 | pr_addr(&con->peer_addr.in_addr)); | 1265 | ceph_pr_addr(&con->peer_addr.in_addr)); |
| 1182 | reset_connection(con); | 1266 | reset_connection(con); |
| 1183 | prepare_write_connect(con->msgr, con, 0); | 1267 | prepare_write_connect(con->msgr, con, 0); |
| 1184 | prepare_read_connect(con); | 1268 | prepare_read_connect(con); |
| @@ -1223,7 +1307,7 @@ static int process_connect(struct ceph_connection *con) | |||
| 1223 | pr_err("%s%lld %s protocol feature mismatch," | 1307 | pr_err("%s%lld %s protocol feature mismatch," |
| 1224 | " my required %llx > server's %llx, need %llx\n", | 1308 | " my required %llx > server's %llx, need %llx\n", |
| 1225 | ENTITY_NAME(con->peer_name), | 1309 | ENTITY_NAME(con->peer_name), |
| 1226 | pr_addr(&con->peer_addr.in_addr), | 1310 | ceph_pr_addr(&con->peer_addr.in_addr), |
| 1227 | req_feat, server_feat, req_feat & ~server_feat); | 1311 | req_feat, server_feat, req_feat & ~server_feat); |
| 1228 | con->error_msg = "missing required protocol features"; | 1312 | con->error_msg = "missing required protocol features"; |
| 1229 | fail_protocol(con); | 1313 | fail_protocol(con); |
| @@ -1305,8 +1389,7 @@ static int read_partial_message_section(struct ceph_connection *con, | |||
| 1305 | struct kvec *section, | 1389 | struct kvec *section, |
| 1306 | unsigned int sec_len, u32 *crc) | 1390 | unsigned int sec_len, u32 *crc) |
| 1307 | { | 1391 | { |
| 1308 | int left; | 1392 | int ret, left; |
| 1309 | int ret; | ||
| 1310 | 1393 | ||
| 1311 | BUG_ON(!section); | 1394 | BUG_ON(!section); |
| 1312 | 1395 | ||
| @@ -1329,13 +1412,83 @@ static int read_partial_message_section(struct ceph_connection *con, | |||
| 1329 | static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, | 1412 | static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, |
| 1330 | struct ceph_msg_header *hdr, | 1413 | struct ceph_msg_header *hdr, |
| 1331 | int *skip); | 1414 | int *skip); |
| 1415 | |||
| 1416 | |||
| 1417 | static int read_partial_message_pages(struct ceph_connection *con, | ||
| 1418 | struct page **pages, | ||
| 1419 | unsigned data_len, int datacrc) | ||
| 1420 | { | ||
| 1421 | void *p; | ||
| 1422 | int ret; | ||
| 1423 | int left; | ||
| 1424 | |||
| 1425 | left = min((int)(data_len - con->in_msg_pos.data_pos), | ||
| 1426 | (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); | ||
| 1427 | /* (page) data */ | ||
| 1428 | BUG_ON(pages == NULL); | ||
| 1429 | p = kmap(pages[con->in_msg_pos.page]); | ||
| 1430 | ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, | ||
| 1431 | left); | ||
| 1432 | if (ret > 0 && datacrc) | ||
| 1433 | con->in_data_crc = | ||
| 1434 | crc32c(con->in_data_crc, | ||
| 1435 | p + con->in_msg_pos.page_pos, ret); | ||
| 1436 | kunmap(pages[con->in_msg_pos.page]); | ||
| 1437 | if (ret <= 0) | ||
| 1438 | return ret; | ||
| 1439 | con->in_msg_pos.data_pos += ret; | ||
| 1440 | con->in_msg_pos.page_pos += ret; | ||
| 1441 | if (con->in_msg_pos.page_pos == PAGE_SIZE) { | ||
| 1442 | con->in_msg_pos.page_pos = 0; | ||
| 1443 | con->in_msg_pos.page++; | ||
| 1444 | } | ||
| 1445 | |||
| 1446 | return ret; | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | #ifdef CONFIG_BLOCK | ||
| 1450 | static int read_partial_message_bio(struct ceph_connection *con, | ||
| 1451 | struct bio **bio_iter, int *bio_seg, | ||
| 1452 | unsigned data_len, int datacrc) | ||
| 1453 | { | ||
| 1454 | struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); | ||
| 1455 | void *p; | ||
| 1456 | int ret, left; | ||
| 1457 | |||
| 1458 | if (IS_ERR(bv)) | ||
| 1459 | return PTR_ERR(bv); | ||
| 1460 | |||
| 1461 | left = min((int)(data_len - con->in_msg_pos.data_pos), | ||
| 1462 | (int)(bv->bv_len - con->in_msg_pos.page_pos)); | ||
| 1463 | |||
| 1464 | p = kmap(bv->bv_page) + bv->bv_offset; | ||
| 1465 | |||
| 1466 | ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, | ||
| 1467 | left); | ||
| 1468 | if (ret > 0 && datacrc) | ||
| 1469 | con->in_data_crc = | ||
| 1470 | crc32c(con->in_data_crc, | ||
| 1471 | p + con->in_msg_pos.page_pos, ret); | ||
| 1472 | kunmap(bv->bv_page); | ||
| 1473 | if (ret <= 0) | ||
| 1474 | return ret; | ||
| 1475 | con->in_msg_pos.data_pos += ret; | ||
| 1476 | con->in_msg_pos.page_pos += ret; | ||
| 1477 | if (con->in_msg_pos.page_pos == bv->bv_len) { | ||
| 1478 | con->in_msg_pos.page_pos = 0; | ||
| 1479 | iter_bio_next(bio_iter, bio_seg); | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | return ret; | ||
| 1483 | } | ||
| 1484 | #endif | ||
| 1485 | |||
| 1332 | /* | 1486 | /* |
| 1333 | * read (part of) a message. | 1487 | * read (part of) a message. |
| 1334 | */ | 1488 | */ |
| 1335 | static int read_partial_message(struct ceph_connection *con) | 1489 | static int read_partial_message(struct ceph_connection *con) |
| 1336 | { | 1490 | { |
| 1337 | struct ceph_msg *m = con->in_msg; | 1491 | struct ceph_msg *m = con->in_msg; |
| 1338 | void *p; | ||
| 1339 | int ret; | 1492 | int ret; |
| 1340 | int to, left; | 1493 | int to, left; |
| 1341 | unsigned front_len, middle_len, data_len, data_off; | 1494 | unsigned front_len, middle_len, data_len, data_off; |
| @@ -1381,7 +1534,7 @@ static int read_partial_message(struct ceph_connection *con) | |||
| 1381 | if ((s64)seq - (s64)con->in_seq < 1) { | 1534 | if ((s64)seq - (s64)con->in_seq < 1) { |
| 1382 | pr_info("skipping %s%lld %s seq %lld, expected %lld\n", | 1535 | pr_info("skipping %s%lld %s seq %lld, expected %lld\n", |
| 1383 | ENTITY_NAME(con->peer_name), | 1536 | ENTITY_NAME(con->peer_name), |
| 1384 | pr_addr(&con->peer_addr.in_addr), | 1537 | ceph_pr_addr(&con->peer_addr.in_addr), |
| 1385 | seq, con->in_seq + 1); | 1538 | seq, con->in_seq + 1); |
| 1386 | con->in_base_pos = -front_len - middle_len - data_len - | 1539 | con->in_base_pos = -front_len - middle_len - data_len - |
| 1387 | sizeof(m->footer); | 1540 | sizeof(m->footer); |
| @@ -1422,7 +1575,10 @@ static int read_partial_message(struct ceph_connection *con) | |||
| 1422 | m->middle->vec.iov_len = 0; | 1575 | m->middle->vec.iov_len = 0; |
| 1423 | 1576 | ||
| 1424 | con->in_msg_pos.page = 0; | 1577 | con->in_msg_pos.page = 0; |
| 1425 | con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; | 1578 | if (m->pages) |
| 1579 | con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; | ||
| 1580 | else | ||
| 1581 | con->in_msg_pos.page_pos = 0; | ||
| 1426 | con->in_msg_pos.data_pos = 0; | 1582 | con->in_msg_pos.data_pos = 0; |
| 1427 | } | 1583 | } |
| 1428 | 1584 | ||
| @@ -1440,27 +1596,29 @@ static int read_partial_message(struct ceph_connection *con) | |||
| 1440 | if (ret <= 0) | 1596 | if (ret <= 0) |
| 1441 | return ret; | 1597 | return ret; |
| 1442 | } | 1598 | } |
| 1599 | #ifdef CONFIG_BLOCK | ||
| 1600 | if (m->bio && !m->bio_iter) | ||
| 1601 | init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); | ||
| 1602 | #endif | ||
| 1443 | 1603 | ||
| 1444 | /* (page) data */ | 1604 | /* (page) data */ |
| 1445 | while (con->in_msg_pos.data_pos < data_len) { | 1605 | while (con->in_msg_pos.data_pos < data_len) { |
| 1446 | left = min((int)(data_len - con->in_msg_pos.data_pos), | 1606 | if (m->pages) { |
| 1447 | (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); | 1607 | ret = read_partial_message_pages(con, m->pages, |
| 1448 | BUG_ON(m->pages == NULL); | 1608 | data_len, datacrc); |
| 1449 | p = kmap(m->pages[con->in_msg_pos.page]); | 1609 | if (ret <= 0) |
| 1450 | ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, | 1610 | return ret; |
| 1451 | left); | 1611 | #ifdef CONFIG_BLOCK |
| 1452 | if (ret > 0 && datacrc) | 1612 | } else if (m->bio) { |
| 1453 | con->in_data_crc = | 1613 | |
| 1454 | crc32c(con->in_data_crc, | 1614 | ret = read_partial_message_bio(con, |
| 1455 | p + con->in_msg_pos.page_pos, ret); | 1615 | &m->bio_iter, &m->bio_seg, |
| 1456 | kunmap(m->pages[con->in_msg_pos.page]); | 1616 | data_len, datacrc); |
| 1457 | if (ret <= 0) | 1617 | if (ret <= 0) |
| 1458 | return ret; | 1618 | return ret; |
| 1459 | con->in_msg_pos.data_pos += ret; | 1619 | #endif |
| 1460 | con->in_msg_pos.page_pos += ret; | 1620 | } else { |
| 1461 | if (con->in_msg_pos.page_pos == PAGE_SIZE) { | 1621 | BUG_ON(1); |
| 1462 | con->in_msg_pos.page_pos = 0; | ||
| 1463 | con->in_msg_pos.page++; | ||
| 1464 | } | 1622 | } |
| 1465 | } | 1623 | } |
| 1466 | 1624 | ||
| @@ -1874,9 +2032,9 @@ out: | |||
| 1874 | static void ceph_fault(struct ceph_connection *con) | 2032 | static void ceph_fault(struct ceph_connection *con) |
| 1875 | { | 2033 | { |
| 1876 | pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), | 2034 | pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), |
| 1877 | pr_addr(&con->peer_addr.in_addr), con->error_msg); | 2035 | ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); |
| 1878 | dout("fault %p state %lu to peer %s\n", | 2036 | dout("fault %p state %lu to peer %s\n", |
| 1879 | con, con->state, pr_addr(&con->peer_addr.in_addr)); | 2037 | con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); |
| 1880 | 2038 | ||
| 1881 | if (test_bit(LOSSYTX, &con->state)) { | 2039 | if (test_bit(LOSSYTX, &con->state)) { |
| 1882 | dout("fault on LOSSYTX channel\n"); | 2040 | dout("fault on LOSSYTX channel\n"); |
| @@ -1936,7 +2094,9 @@ out: | |||
| 1936 | /* | 2094 | /* |
| 1937 | * create a new messenger instance | 2095 | * create a new messenger instance |
| 1938 | */ | 2096 | */ |
| 1939 | struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) | 2097 | struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr, |
| 2098 | u32 supported_features, | ||
| 2099 | u32 required_features) | ||
| 1940 | { | 2100 | { |
| 1941 | struct ceph_messenger *msgr; | 2101 | struct ceph_messenger *msgr; |
| 1942 | 2102 | ||
| @@ -1944,6 +2104,9 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) | |||
| 1944 | if (msgr == NULL) | 2104 | if (msgr == NULL) |
| 1945 | return ERR_PTR(-ENOMEM); | 2105 | return ERR_PTR(-ENOMEM); |
| 1946 | 2106 | ||
| 2107 | msgr->supported_features = supported_features; | ||
| 2108 | msgr->required_features = required_features; | ||
| 2109 | |||
| 1947 | spin_lock_init(&msgr->global_seq_lock); | 2110 | spin_lock_init(&msgr->global_seq_lock); |
| 1948 | 2111 | ||
| 1949 | /* the zero page is needed if a request is "canceled" while the message | 2112 | /* the zero page is needed if a request is "canceled" while the message |
| @@ -1966,6 +2129,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) | |||
| 1966 | dout("messenger_create %p\n", msgr); | 2129 | dout("messenger_create %p\n", msgr); |
| 1967 | return msgr; | 2130 | return msgr; |
| 1968 | } | 2131 | } |
| 2132 | EXPORT_SYMBOL(ceph_messenger_create); | ||
| 1969 | 2133 | ||
| 1970 | void ceph_messenger_destroy(struct ceph_messenger *msgr) | 2134 | void ceph_messenger_destroy(struct ceph_messenger *msgr) |
| 1971 | { | 2135 | { |
| @@ -1975,6 +2139,7 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr) | |||
| 1975 | kfree(msgr); | 2139 | kfree(msgr); |
| 1976 | dout("destroyed messenger %p\n", msgr); | 2140 | dout("destroyed messenger %p\n", msgr); |
| 1977 | } | 2141 | } |
| 2142 | EXPORT_SYMBOL(ceph_messenger_destroy); | ||
| 1978 | 2143 | ||
| 1979 | /* | 2144 | /* |
| 1980 | * Queue up an outgoing message on the given connection. | 2145 | * Queue up an outgoing message on the given connection. |
| @@ -2011,6 +2176,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) | |||
| 2011 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2176 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
| 2012 | queue_con(con); | 2177 | queue_con(con); |
| 2013 | } | 2178 | } |
| 2179 | EXPORT_SYMBOL(ceph_con_send); | ||
| 2014 | 2180 | ||
| 2015 | /* | 2181 | /* |
| 2016 | * Revoke a message that was previously queued for send | 2182 | * Revoke a message that was previously queued for send |
| @@ -2076,6 +2242,7 @@ void ceph_con_keepalive(struct ceph_connection *con) | |||
| 2076 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2242 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
| 2077 | queue_con(con); | 2243 | queue_con(con); |
| 2078 | } | 2244 | } |
| 2245 | EXPORT_SYMBOL(ceph_con_keepalive); | ||
| 2079 | 2246 | ||
| 2080 | 2247 | ||
| 2081 | /* | 2248 | /* |
| @@ -2136,6 +2303,10 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) | |||
| 2136 | m->nr_pages = 0; | 2303 | m->nr_pages = 0; |
| 2137 | m->pages = NULL; | 2304 | m->pages = NULL; |
| 2138 | m->pagelist = NULL; | 2305 | m->pagelist = NULL; |
| 2306 | m->bio = NULL; | ||
| 2307 | m->bio_iter = NULL; | ||
| 2308 | m->bio_seg = 0; | ||
| 2309 | m->trail = NULL; | ||
| 2139 | 2310 | ||
| 2140 | dout("ceph_msg_new %p front %d\n", m, front_len); | 2311 | dout("ceph_msg_new %p front %d\n", m, front_len); |
| 2141 | return m; | 2312 | return m; |
| @@ -2146,6 +2317,7 @@ out: | |||
| 2146 | pr_err("msg_new can't create type %d front %d\n", type, front_len); | 2317 | pr_err("msg_new can't create type %d front %d\n", type, front_len); |
| 2147 | return NULL; | 2318 | return NULL; |
| 2148 | } | 2319 | } |
| 2320 | EXPORT_SYMBOL(ceph_msg_new); | ||
| 2149 | 2321 | ||
| 2150 | /* | 2322 | /* |
| 2151 | * Allocate "middle" portion of a message, if it is needed and wasn't | 2323 | * Allocate "middle" portion of a message, if it is needed and wasn't |
| @@ -2250,11 +2422,14 @@ void ceph_msg_last_put(struct kref *kref) | |||
| 2250 | m->pagelist = NULL; | 2422 | m->pagelist = NULL; |
| 2251 | } | 2423 | } |
| 2252 | 2424 | ||
| 2425 | m->trail = NULL; | ||
| 2426 | |||
| 2253 | if (m->pool) | 2427 | if (m->pool) |
| 2254 | ceph_msgpool_put(m->pool, m); | 2428 | ceph_msgpool_put(m->pool, m); |
| 2255 | else | 2429 | else |
| 2256 | ceph_msg_kfree(m); | 2430 | ceph_msg_kfree(m); |
| 2257 | } | 2431 | } |
| 2432 | EXPORT_SYMBOL(ceph_msg_last_put); | ||
| 2258 | 2433 | ||
| 2259 | void ceph_msg_dump(struct ceph_msg *msg) | 2434 | void ceph_msg_dump(struct ceph_msg *msg) |
| 2260 | { | 2435 | { |
| @@ -2275,3 +2450,4 @@ void ceph_msg_dump(struct ceph_msg *msg) | |||
| 2275 | DUMP_PREFIX_OFFSET, 16, 1, | 2450 | DUMP_PREFIX_OFFSET, 16, 1, |
| 2276 | &msg->footer, sizeof(msg->footer), true); | 2451 | &msg->footer, sizeof(msg->footer), true); |
| 2277 | } | 2452 | } |
| 2453 | EXPORT_SYMBOL(ceph_msg_dump); | ||
diff --git a/fs/ceph/mon_client.c b/net/ceph/mon_client.c index b2a5a3e4a671..8a079399174a 100644 --- a/fs/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
| @@ -1,14 +1,16 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/module.h> | ||
| 3 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 5 | #include <linux/random.h> | 6 | #include <linux/random.h> |
| 6 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 7 | 8 | ||
| 8 | #include "mon_client.h" | 9 | #include <linux/ceph/mon_client.h> |
| 9 | #include "super.h" | 10 | #include <linux/ceph/libceph.h> |
| 10 | #include "auth.h" | 11 | #include <linux/ceph/decode.h> |
| 11 | #include "decode.h" | 12 | |
| 13 | #include <linux/ceph/auth.h> | ||
| 12 | 14 | ||
| 13 | /* | 15 | /* |
| 14 | * Interact with Ceph monitor cluster. Handle requests for new map | 16 | * Interact with Ceph monitor cluster. Handle requests for new map |
| @@ -74,7 +76,7 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) | |||
| 74 | m->num_mon); | 76 | m->num_mon); |
| 75 | for (i = 0; i < m->num_mon; i++) | 77 | for (i = 0; i < m->num_mon; i++) |
| 76 | dout("monmap_decode mon%d is %s\n", i, | 78 | dout("monmap_decode mon%d is %s\n", i, |
| 77 | pr_addr(&m->mon_inst[i].addr.in_addr)); | 79 | ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); |
| 78 | return m; | 80 | return m; |
| 79 | 81 | ||
| 80 | bad: | 82 | bad: |
| @@ -191,30 +193,33 @@ static void __send_subscribe(struct ceph_mon_client *monc) | |||
| 191 | struct ceph_msg *msg = monc->m_subscribe; | 193 | struct ceph_msg *msg = monc->m_subscribe; |
| 192 | struct ceph_mon_subscribe_item *i; | 194 | struct ceph_mon_subscribe_item *i; |
| 193 | void *p, *end; | 195 | void *p, *end; |
| 196 | int num; | ||
| 194 | 197 | ||
| 195 | p = msg->front.iov_base; | 198 | p = msg->front.iov_base; |
| 196 | end = p + msg->front_max; | 199 | end = p + msg->front_max; |
| 197 | 200 | ||
| 198 | dout("__send_subscribe to 'mdsmap' %u+\n", | 201 | num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; |
| 199 | (unsigned)monc->have_mdsmap); | 202 | ceph_encode_32(&p, num); |
| 203 | |||
| 200 | if (monc->want_next_osdmap) { | 204 | if (monc->want_next_osdmap) { |
| 201 | dout("__send_subscribe to 'osdmap' %u\n", | 205 | dout("__send_subscribe to 'osdmap' %u\n", |
| 202 | (unsigned)monc->have_osdmap); | 206 | (unsigned)monc->have_osdmap); |
| 203 | ceph_encode_32(&p, 3); | ||
| 204 | ceph_encode_string(&p, end, "osdmap", 6); | 207 | ceph_encode_string(&p, end, "osdmap", 6); |
| 205 | i = p; | 208 | i = p; |
| 206 | i->have = cpu_to_le64(monc->have_osdmap); | 209 | i->have = cpu_to_le64(monc->have_osdmap); |
| 207 | i->onetime = 1; | 210 | i->onetime = 1; |
| 208 | p += sizeof(*i); | 211 | p += sizeof(*i); |
| 209 | monc->want_next_osdmap = 2; /* requested */ | 212 | monc->want_next_osdmap = 2; /* requested */ |
| 210 | } else { | ||
| 211 | ceph_encode_32(&p, 2); | ||
| 212 | } | 213 | } |
| 213 | ceph_encode_string(&p, end, "mdsmap", 6); | 214 | if (monc->want_mdsmap) { |
| 214 | i = p; | 215 | dout("__send_subscribe to 'mdsmap' %u+\n", |
| 215 | i->have = cpu_to_le64(monc->have_mdsmap); | 216 | (unsigned)monc->have_mdsmap); |
| 216 | i->onetime = 0; | 217 | ceph_encode_string(&p, end, "mdsmap", 6); |
| 217 | p += sizeof(*i); | 218 | i = p; |
| 219 | i->have = cpu_to_le64(monc->have_mdsmap); | ||
| 220 | i->onetime = 0; | ||
| 221 | p += sizeof(*i); | ||
| 222 | } | ||
| 218 | ceph_encode_string(&p, end, "monmap", 6); | 223 | ceph_encode_string(&p, end, "monmap", 6); |
| 219 | i = p; | 224 | i = p; |
| 220 | i->have = 0; | 225 | i->have = 0; |
| @@ -243,7 +248,8 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc, | |||
| 243 | mutex_lock(&monc->mutex); | 248 | mutex_lock(&monc->mutex); |
| 244 | if (monc->hunting) { | 249 | if (monc->hunting) { |
| 245 | pr_info("mon%d %s session established\n", | 250 | pr_info("mon%d %s session established\n", |
| 246 | monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr)); | 251 | monc->cur_mon, |
| 252 | ceph_pr_addr(&monc->con->peer_addr.in_addr)); | ||
| 247 | monc->hunting = false; | 253 | monc->hunting = false; |
| 248 | } | 254 | } |
| 249 | dout("handle_subscribe_ack after %d seconds\n", seconds); | 255 | dout("handle_subscribe_ack after %d seconds\n", seconds); |
| @@ -266,6 +272,7 @@ int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) | |||
| 266 | mutex_unlock(&monc->mutex); | 272 | mutex_unlock(&monc->mutex); |
| 267 | return 0; | 273 | return 0; |
| 268 | } | 274 | } |
| 275 | EXPORT_SYMBOL(ceph_monc_got_mdsmap); | ||
| 269 | 276 | ||
| 270 | int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) | 277 | int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) |
| 271 | { | 278 | { |
| @@ -310,6 +317,7 @@ int ceph_monc_open_session(struct ceph_mon_client *monc) | |||
| 310 | mutex_unlock(&monc->mutex); | 317 | mutex_unlock(&monc->mutex); |
| 311 | return 0; | 318 | return 0; |
| 312 | } | 319 | } |
| 320 | EXPORT_SYMBOL(ceph_monc_open_session); | ||
| 313 | 321 | ||
| 314 | /* | 322 | /* |
| 315 | * The monitor responds with mount ack indicate mount success. The | 323 | * The monitor responds with mount ack indicate mount success. The |
| @@ -540,6 +548,7 @@ out: | |||
| 540 | kref_put(&req->kref, release_generic_request); | 548 | kref_put(&req->kref, release_generic_request); |
| 541 | return err; | 549 | return err; |
| 542 | } | 550 | } |
| 551 | EXPORT_SYMBOL(ceph_monc_do_statfs); | ||
| 543 | 552 | ||
| 544 | /* | 553 | /* |
| 545 | * pool ops | 554 | * pool ops |
| @@ -651,6 +660,7 @@ int ceph_monc_create_snapid(struct ceph_mon_client *monc, | |||
| 651 | pool, 0, (char *)snapid, sizeof(*snapid)); | 660 | pool, 0, (char *)snapid, sizeof(*snapid)); |
| 652 | 661 | ||
| 653 | } | 662 | } |
| 663 | EXPORT_SYMBOL(ceph_monc_create_snapid); | ||
| 654 | 664 | ||
| 655 | int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | 665 | int ceph_monc_delete_snapid(struct ceph_mon_client *monc, |
| 656 | u32 pool, u64 snapid) | 666 | u32 pool, u64 snapid) |
| @@ -708,9 +718,9 @@ static void delayed_work(struct work_struct *work) | |||
| 708 | */ | 718 | */ |
| 709 | static int build_initial_monmap(struct ceph_mon_client *monc) | 719 | static int build_initial_monmap(struct ceph_mon_client *monc) |
| 710 | { | 720 | { |
| 711 | struct ceph_mount_args *args = monc->client->mount_args; | 721 | struct ceph_options *opt = monc->client->options; |
| 712 | struct ceph_entity_addr *mon_addr = args->mon_addr; | 722 | struct ceph_entity_addr *mon_addr = opt->mon_addr; |
| 713 | int num_mon = args->num_mon; | 723 | int num_mon = opt->num_mon; |
| 714 | int i; | 724 | int i; |
| 715 | 725 | ||
| 716 | /* build initial monmap */ | 726 | /* build initial monmap */ |
| @@ -728,11 +738,6 @@ static int build_initial_monmap(struct ceph_mon_client *monc) | |||
| 728 | } | 738 | } |
| 729 | monc->monmap->num_mon = num_mon; | 739 | monc->monmap->num_mon = num_mon; |
| 730 | monc->have_fsid = false; | 740 | monc->have_fsid = false; |
| 731 | |||
| 732 | /* release addr memory */ | ||
| 733 | kfree(args->mon_addr); | ||
| 734 | args->mon_addr = NULL; | ||
| 735 | args->num_mon = 0; | ||
| 736 | return 0; | 741 | return 0; |
| 737 | } | 742 | } |
| 738 | 743 | ||
| @@ -753,8 +758,8 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) | |||
| 753 | monc->con = NULL; | 758 | monc->con = NULL; |
| 754 | 759 | ||
| 755 | /* authentication */ | 760 | /* authentication */ |
| 756 | monc->auth = ceph_auth_init(cl->mount_args->name, | 761 | monc->auth = ceph_auth_init(cl->options->name, |
| 757 | cl->mount_args->secret); | 762 | cl->options->secret); |
| 758 | if (IS_ERR(monc->auth)) | 763 | if (IS_ERR(monc->auth)) |
| 759 | return PTR_ERR(monc->auth); | 764 | return PTR_ERR(monc->auth); |
| 760 | monc->auth->want_keys = | 765 | monc->auth->want_keys = |
| @@ -808,6 +813,7 @@ out_monmap: | |||
| 808 | out: | 813 | out: |
| 809 | return err; | 814 | return err; |
| 810 | } | 815 | } |
| 816 | EXPORT_SYMBOL(ceph_monc_init); | ||
| 811 | 817 | ||
| 812 | void ceph_monc_stop(struct ceph_mon_client *monc) | 818 | void ceph_monc_stop(struct ceph_mon_client *monc) |
| 813 | { | 819 | { |
| @@ -832,6 +838,7 @@ void ceph_monc_stop(struct ceph_mon_client *monc) | |||
| 832 | 838 | ||
| 833 | kfree(monc->monmap); | 839 | kfree(monc->monmap); |
| 834 | } | 840 | } |
| 841 | EXPORT_SYMBOL(ceph_monc_stop); | ||
| 835 | 842 | ||
| 836 | static void handle_auth_reply(struct ceph_mon_client *monc, | 843 | static void handle_auth_reply(struct ceph_mon_client *monc, |
| 837 | struct ceph_msg *msg) | 844 | struct ceph_msg *msg) |
| @@ -889,6 +896,7 @@ int ceph_monc_validate_auth(struct ceph_mon_client *monc) | |||
| 889 | mutex_unlock(&monc->mutex); | 896 | mutex_unlock(&monc->mutex); |
| 890 | return ret; | 897 | return ret; |
| 891 | } | 898 | } |
| 899 | EXPORT_SYMBOL(ceph_monc_validate_auth); | ||
| 892 | 900 | ||
| 893 | /* | 901 | /* |
| 894 | * handle incoming message | 902 | * handle incoming message |
| @@ -922,15 +930,16 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |||
| 922 | ceph_monc_handle_map(monc, msg); | 930 | ceph_monc_handle_map(monc, msg); |
| 923 | break; | 931 | break; |
| 924 | 932 | ||
| 925 | case CEPH_MSG_MDS_MAP: | ||
| 926 | ceph_mdsc_handle_map(&monc->client->mdsc, msg); | ||
| 927 | break; | ||
| 928 | |||
| 929 | case CEPH_MSG_OSD_MAP: | 933 | case CEPH_MSG_OSD_MAP: |
| 930 | ceph_osdc_handle_map(&monc->client->osdc, msg); | 934 | ceph_osdc_handle_map(&monc->client->osdc, msg); |
| 931 | break; | 935 | break; |
| 932 | 936 | ||
| 933 | default: | 937 | default: |
| 938 | /* can the chained handler handle it? */ | ||
| 939 | if (monc->client->extra_mon_dispatch && | ||
| 940 | monc->client->extra_mon_dispatch(monc->client, msg) == 0) | ||
| 941 | break; | ||
| 942 | |||
| 934 | pr_err("received unknown message type %d %s\n", type, | 943 | pr_err("received unknown message type %d %s\n", type, |
| 935 | ceph_msg_type_name(type)); | 944 | ceph_msg_type_name(type)); |
| 936 | } | 945 | } |
| @@ -994,7 +1003,7 @@ static void mon_fault(struct ceph_connection *con) | |||
| 994 | if (monc->con && !monc->hunting) | 1003 | if (monc->con && !monc->hunting) |
| 995 | pr_info("mon%d %s session lost, " | 1004 | pr_info("mon%d %s session lost, " |
| 996 | "hunting for new mon\n", monc->cur_mon, | 1005 | "hunting for new mon\n", monc->cur_mon, |
| 997 | pr_addr(&monc->con->peer_addr.in_addr)); | 1006 | ceph_pr_addr(&monc->con->peer_addr.in_addr)); |
| 998 | 1007 | ||
| 999 | __close_session(monc); | 1008 | __close_session(monc); |
| 1000 | if (!monc->hunting) { | 1009 | if (!monc->hunting) { |
diff --git a/fs/ceph/msgpool.c b/net/ceph/msgpool.c index dd65a6438131..d5f2d97ac05c 100644 --- a/fs/ceph/msgpool.c +++ b/net/ceph/msgpool.c | |||
| @@ -1,11 +1,11 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/err.h> | 3 | #include <linux/err.h> |
| 4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
| 6 | #include <linux/vmalloc.h> | 6 | #include <linux/vmalloc.h> |
| 7 | 7 | ||
| 8 | #include "msgpool.h" | 8 | #include <linux/ceph/msgpool.h> |
| 9 | 9 | ||
| 10 | static void *alloc_fn(gfp_t gfp_mask, void *arg) | 10 | static void *alloc_fn(gfp_t gfp_mask, void *arg) |
| 11 | { | 11 | { |
diff --git a/fs/ceph/osd_client.c b/net/ceph/osd_client.c index dfced1dacbcd..79391994b3ed 100644 --- a/fs/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -1,17 +1,22 @@ | |||
| 1 | #include "ceph_debug.h" | 1 | #include <linux/ceph/ceph_debug.h> |
| 2 | 2 | ||
| 3 | #include <linux/module.h> | ||
| 3 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| 4 | #include <linux/highmem.h> | 5 | #include <linux/highmem.h> |
| 5 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
| 6 | #include <linux/pagemap.h> | 7 | #include <linux/pagemap.h> |
| 7 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 8 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
| 10 | #ifdef CONFIG_BLOCK | ||
| 11 | #include <linux/bio.h> | ||
| 12 | #endif | ||
| 9 | 13 | ||
| 10 | #include "super.h" | 14 | #include <linux/ceph/libceph.h> |
| 11 | #include "osd_client.h" | 15 | #include <linux/ceph/osd_client.h> |
| 12 | #include "messenger.h" | 16 | #include <linux/ceph/messenger.h> |
| 13 | #include "decode.h" | 17 | #include <linux/ceph/decode.h> |
| 14 | #include "auth.h" | 18 | #include <linux/ceph/auth.h> |
| 19 | #include <linux/ceph/pagelist.h> | ||
| 15 | 20 | ||
| 16 | #define OSD_OP_FRONT_LEN 4096 | 21 | #define OSD_OP_FRONT_LEN 4096 |
| 17 | #define OSD_OPREPLY_FRONT_LEN 512 | 22 | #define OSD_OPREPLY_FRONT_LEN 512 |
| @@ -22,6 +27,59 @@ static int __kick_requests(struct ceph_osd_client *osdc, | |||
| 22 | 27 | ||
| 23 | static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); | 28 | static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); |
| 24 | 29 | ||
| 30 | static int op_needs_trail(int op) | ||
| 31 | { | ||
| 32 | switch (op) { | ||
| 33 | case CEPH_OSD_OP_GETXATTR: | ||
| 34 | case CEPH_OSD_OP_SETXATTR: | ||
| 35 | case CEPH_OSD_OP_CMPXATTR: | ||
| 36 | case CEPH_OSD_OP_CALL: | ||
| 37 | return 1; | ||
| 38 | default: | ||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | } | ||
| 42 | |||
| 43 | static int op_has_extent(int op) | ||
| 44 | { | ||
| 45 | return (op == CEPH_OSD_OP_READ || | ||
| 46 | op == CEPH_OSD_OP_WRITE); | ||
| 47 | } | ||
| 48 | |||
| 49 | void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | ||
| 50 | struct ceph_file_layout *layout, | ||
| 51 | u64 snapid, | ||
| 52 | u64 off, u64 *plen, u64 *bno, | ||
| 53 | struct ceph_osd_request *req, | ||
| 54 | struct ceph_osd_req_op *op) | ||
| 55 | { | ||
| 56 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | ||
| 57 | u64 orig_len = *plen; | ||
| 58 | u64 objoff, objlen; /* extent in object */ | ||
| 59 | |||
| 60 | reqhead->snapid = cpu_to_le64(snapid); | ||
| 61 | |||
| 62 | /* object extent? */ | ||
| 63 | ceph_calc_file_object_mapping(layout, off, plen, bno, | ||
| 64 | &objoff, &objlen); | ||
| 65 | if (*plen < orig_len) | ||
| 66 | dout(" skipping last %llu, final file extent %llu~%llu\n", | ||
| 67 | orig_len - *plen, off, *plen); | ||
| 68 | |||
| 69 | if (op_has_extent(op->op)) { | ||
| 70 | op->extent.offset = objoff; | ||
| 71 | op->extent.length = objlen; | ||
| 72 | } | ||
| 73 | req->r_num_pages = calc_pages_for(off, *plen); | ||
| 74 | if (op->op == CEPH_OSD_OP_WRITE) | ||
| 75 | op->payload_len = *plen; | ||
| 76 | |||
| 77 | dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", | ||
| 78 | *bno, objoff, objlen, req->r_num_pages); | ||
| 79 | |||
| 80 | } | ||
| 81 | EXPORT_SYMBOL(ceph_calc_raw_layout); | ||
| 82 | |||
| 25 | /* | 83 | /* |
| 26 | * Implement client access to distributed object storage cluster. | 84 | * Implement client access to distributed object storage cluster. |
| 27 | * | 85 | * |
| @@ -48,34 +106,19 @@ static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); | |||
| 48 | * fill osd op in request message. | 106 | * fill osd op in request message. |
| 49 | */ | 107 | */ |
| 50 | static void calc_layout(struct ceph_osd_client *osdc, | 108 | static void calc_layout(struct ceph_osd_client *osdc, |
| 51 | struct ceph_vino vino, struct ceph_file_layout *layout, | 109 | struct ceph_vino vino, |
| 110 | struct ceph_file_layout *layout, | ||
| 52 | u64 off, u64 *plen, | 111 | u64 off, u64 *plen, |
| 53 | struct ceph_osd_request *req) | 112 | struct ceph_osd_request *req, |
| 113 | struct ceph_osd_req_op *op) | ||
| 54 | { | 114 | { |
| 55 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | ||
| 56 | struct ceph_osd_op *op = (void *)(reqhead + 1); | ||
| 57 | u64 orig_len = *plen; | ||
| 58 | u64 objoff, objlen; /* extent in object */ | ||
| 59 | u64 bno; | 115 | u64 bno; |
| 60 | 116 | ||
| 61 | reqhead->snapid = cpu_to_le64(vino.snap); | 117 | ceph_calc_raw_layout(osdc, layout, vino.snap, off, |
| 62 | 118 | plen, &bno, req, op); | |
| 63 | /* object extent? */ | ||
| 64 | ceph_calc_file_object_mapping(layout, off, plen, &bno, | ||
| 65 | &objoff, &objlen); | ||
| 66 | if (*plen < orig_len) | ||
| 67 | dout(" skipping last %llu, final file extent %llu~%llu\n", | ||
| 68 | orig_len - *plen, off, *plen); | ||
| 69 | 119 | ||
| 70 | sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); | 120 | sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); |
| 71 | req->r_oid_len = strlen(req->r_oid); | 121 | req->r_oid_len = strlen(req->r_oid); |
| 72 | |||
| 73 | op->extent.offset = cpu_to_le64(objoff); | ||
| 74 | op->extent.length = cpu_to_le64(objlen); | ||
| 75 | req->r_num_pages = calc_pages_for(off, *plen); | ||
| 76 | |||
| 77 | dout("calc_layout %s (%d) %llu~%llu (%d pages)\n", | ||
| 78 | req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages); | ||
| 79 | } | 122 | } |
| 80 | 123 | ||
| 81 | /* | 124 | /* |
| @@ -101,56 +144,66 @@ void ceph_osdc_release_request(struct kref *kref) | |||
| 101 | if (req->r_own_pages) | 144 | if (req->r_own_pages) |
| 102 | ceph_release_page_vector(req->r_pages, | 145 | ceph_release_page_vector(req->r_pages, |
| 103 | req->r_num_pages); | 146 | req->r_num_pages); |
| 147 | #ifdef CONFIG_BLOCK | ||
| 148 | if (req->r_bio) | ||
| 149 | bio_put(req->r_bio); | ||
| 150 | #endif | ||
| 104 | ceph_put_snap_context(req->r_snapc); | 151 | ceph_put_snap_context(req->r_snapc); |
| 152 | if (req->r_trail) { | ||
| 153 | ceph_pagelist_release(req->r_trail); | ||
| 154 | kfree(req->r_trail); | ||
| 155 | } | ||
| 105 | if (req->r_mempool) | 156 | if (req->r_mempool) |
| 106 | mempool_free(req, req->r_osdc->req_mempool); | 157 | mempool_free(req, req->r_osdc->req_mempool); |
| 107 | else | 158 | else |
| 108 | kfree(req); | 159 | kfree(req); |
| 109 | } | 160 | } |
| 161 | EXPORT_SYMBOL(ceph_osdc_release_request); | ||
| 110 | 162 | ||
| 111 | /* | 163 | static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail) |
| 112 | * build new request AND message, calculate layout, and adjust file | 164 | { |
| 113 | * extent as needed. | 165 | int i = 0; |
| 114 | * | 166 | |
| 115 | * if the file was recently truncated, we include information about its | 167 | if (needs_trail) |
| 116 | * old and new size so that the object can be updated appropriately. (we | 168 | *needs_trail = 0; |
| 117 | * avoid synchronously deleting truncated objects because it's slow.) | 169 | while (ops[i].op) { |
| 118 | * | 170 | if (needs_trail && op_needs_trail(ops[i].op)) |
| 119 | * if @do_sync, include a 'startsync' command so that the osd will flush | 171 | *needs_trail = 1; |
| 120 | * data quickly. | 172 | i++; |
| 121 | */ | 173 | } |
| 122 | struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | 174 | |
| 123 | struct ceph_file_layout *layout, | 175 | return i; |
| 124 | struct ceph_vino vino, | 176 | } |
| 125 | u64 off, u64 *plen, | 177 | |
| 126 | int opcode, int flags, | 178 | struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, |
| 179 | int flags, | ||
| 127 | struct ceph_snap_context *snapc, | 180 | struct ceph_snap_context *snapc, |
| 128 | int do_sync, | 181 | struct ceph_osd_req_op *ops, |
| 129 | u32 truncate_seq, | 182 | bool use_mempool, |
| 130 | u64 truncate_size, | 183 | gfp_t gfp_flags, |
| 131 | struct timespec *mtime, | 184 | struct page **pages, |
| 132 | bool use_mempool, int num_reply) | 185 | struct bio *bio) |
| 133 | { | 186 | { |
| 134 | struct ceph_osd_request *req; | 187 | struct ceph_osd_request *req; |
| 135 | struct ceph_msg *msg; | 188 | struct ceph_msg *msg; |
| 136 | struct ceph_osd_request_head *head; | 189 | int needs_trail; |
| 137 | struct ceph_osd_op *op; | 190 | int num_op = get_num_ops(ops, &needs_trail); |
| 138 | void *p; | 191 | size_t msg_size = sizeof(struct ceph_osd_request_head); |
| 139 | int num_op = 1 + do_sync; | 192 | |
| 140 | size_t msg_size = sizeof(*head) + num_op*sizeof(*op); | 193 | msg_size += num_op*sizeof(struct ceph_osd_op); |
| 141 | int i; | ||
| 142 | 194 | ||
| 143 | if (use_mempool) { | 195 | if (use_mempool) { |
| 144 | req = mempool_alloc(osdc->req_mempool, GFP_NOFS); | 196 | req = mempool_alloc(osdc->req_mempool, gfp_flags); |
| 145 | memset(req, 0, sizeof(*req)); | 197 | memset(req, 0, sizeof(*req)); |
| 146 | } else { | 198 | } else { |
| 147 | req = kzalloc(sizeof(*req), GFP_NOFS); | 199 | req = kzalloc(sizeof(*req), gfp_flags); |
| 148 | } | 200 | } |
| 149 | if (req == NULL) | 201 | if (req == NULL) |
| 150 | return NULL; | 202 | return NULL; |
| 151 | 203 | ||
| 152 | req->r_osdc = osdc; | 204 | req->r_osdc = osdc; |
| 153 | req->r_mempool = use_mempool; | 205 | req->r_mempool = use_mempool; |
| 206 | |||
| 154 | kref_init(&req->r_kref); | 207 | kref_init(&req->r_kref); |
| 155 | init_completion(&req->r_completion); | 208 | init_completion(&req->r_completion); |
| 156 | init_completion(&req->r_safe_completion); | 209 | init_completion(&req->r_safe_completion); |
| @@ -164,13 +217,22 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
| 164 | msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); | 217 | msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); |
| 165 | else | 218 | else |
| 166 | msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, | 219 | msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, |
| 167 | OSD_OPREPLY_FRONT_LEN, GFP_NOFS); | 220 | OSD_OPREPLY_FRONT_LEN, gfp_flags); |
| 168 | if (!msg) { | 221 | if (!msg) { |
| 169 | ceph_osdc_put_request(req); | 222 | ceph_osdc_put_request(req); |
| 170 | return NULL; | 223 | return NULL; |
| 171 | } | 224 | } |
| 172 | req->r_reply = msg; | 225 | req->r_reply = msg; |
| 173 | 226 | ||
| 227 | /* allocate space for the trailing data */ | ||
| 228 | if (needs_trail) { | ||
| 229 | req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags); | ||
| 230 | if (!req->r_trail) { | ||
| 231 | ceph_osdc_put_request(req); | ||
| 232 | return NULL; | ||
| 233 | } | ||
| 234 | ceph_pagelist_init(req->r_trail); | ||
| 235 | } | ||
| 174 | /* create request message; allow space for oid */ | 236 | /* create request message; allow space for oid */ |
| 175 | msg_size += 40; | 237 | msg_size += 40; |
| 176 | if (snapc) | 238 | if (snapc) |
| @@ -178,18 +240,115 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
| 178 | if (use_mempool) | 240 | if (use_mempool) |
| 179 | msg = ceph_msgpool_get(&osdc->msgpool_op, 0); | 241 | msg = ceph_msgpool_get(&osdc->msgpool_op, 0); |
| 180 | else | 242 | else |
| 181 | msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, GFP_NOFS); | 243 | msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags); |
| 182 | if (!msg) { | 244 | if (!msg) { |
| 183 | ceph_osdc_put_request(req); | 245 | ceph_osdc_put_request(req); |
| 184 | return NULL; | 246 | return NULL; |
| 185 | } | 247 | } |
| 248 | |||
| 186 | msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); | 249 | msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); |
| 187 | memset(msg->front.iov_base, 0, msg->front.iov_len); | 250 | memset(msg->front.iov_base, 0, msg->front.iov_len); |
| 251 | |||
| 252 | req->r_request = msg; | ||
| 253 | req->r_pages = pages; | ||
| 254 | #ifdef CONFIG_BLOCK | ||
| 255 | if (bio) { | ||
| 256 | req->r_bio = bio; | ||
| 257 | bio_get(req->r_bio); | ||
| 258 | } | ||
| 259 | #endif | ||
| 260 | |||
| 261 | return req; | ||
| 262 | } | ||
| 263 | EXPORT_SYMBOL(ceph_osdc_alloc_request); | ||
| 264 | |||
| 265 | static void osd_req_encode_op(struct ceph_osd_request *req, | ||
| 266 | struct ceph_osd_op *dst, | ||
| 267 | struct ceph_osd_req_op *src) | ||
| 268 | { | ||
| 269 | dst->op = cpu_to_le16(src->op); | ||
| 270 | |||
| 271 | switch (dst->op) { | ||
| 272 | case CEPH_OSD_OP_READ: | ||
| 273 | case CEPH_OSD_OP_WRITE: | ||
| 274 | dst->extent.offset = | ||
| 275 | cpu_to_le64(src->extent.offset); | ||
| 276 | dst->extent.length = | ||
| 277 | cpu_to_le64(src->extent.length); | ||
| 278 | dst->extent.truncate_size = | ||
| 279 | cpu_to_le64(src->extent.truncate_size); | ||
| 280 | dst->extent.truncate_seq = | ||
| 281 | cpu_to_le32(src->extent.truncate_seq); | ||
| 282 | break; | ||
| 283 | |||
| 284 | case CEPH_OSD_OP_GETXATTR: | ||
| 285 | case CEPH_OSD_OP_SETXATTR: | ||
| 286 | case CEPH_OSD_OP_CMPXATTR: | ||
| 287 | BUG_ON(!req->r_trail); | ||
| 288 | |||
| 289 | dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); | ||
| 290 | dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); | ||
| 291 | dst->xattr.cmp_op = src->xattr.cmp_op; | ||
| 292 | dst->xattr.cmp_mode = src->xattr.cmp_mode; | ||
| 293 | ceph_pagelist_append(req->r_trail, src->xattr.name, | ||
| 294 | src->xattr.name_len); | ||
| 295 | ceph_pagelist_append(req->r_trail, src->xattr.val, | ||
| 296 | src->xattr.value_len); | ||
| 297 | break; | ||
| 298 | case CEPH_OSD_OP_CALL: | ||
| 299 | BUG_ON(!req->r_trail); | ||
| 300 | |||
| 301 | dst->cls.class_len = src->cls.class_len; | ||
| 302 | dst->cls.method_len = src->cls.method_len; | ||
| 303 | dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); | ||
| 304 | |||
| 305 | ceph_pagelist_append(req->r_trail, src->cls.class_name, | ||
| 306 | src->cls.class_len); | ||
| 307 | ceph_pagelist_append(req->r_trail, src->cls.method_name, | ||
| 308 | src->cls.method_len); | ||
| 309 | ceph_pagelist_append(req->r_trail, src->cls.indata, | ||
| 310 | src->cls.indata_len); | ||
| 311 | break; | ||
| 312 | case CEPH_OSD_OP_ROLLBACK: | ||
| 313 | dst->snap.snapid = cpu_to_le64(src->snap.snapid); | ||
| 314 | break; | ||
| 315 | case CEPH_OSD_OP_STARTSYNC: | ||
| 316 | break; | ||
| 317 | default: | ||
| 318 | pr_err("unrecognized osd opcode %d\n", dst->op); | ||
| 319 | WARN_ON(1); | ||
| 320 | break; | ||
| 321 | } | ||
| 322 | dst->payload_len = cpu_to_le32(src->payload_len); | ||
| 323 | } | ||
| 324 | |||
| 325 | /* | ||
| 326 | * build new request AND message | ||
| 327 | * | ||
| 328 | */ | ||
| 329 | void ceph_osdc_build_request(struct ceph_osd_request *req, | ||
| 330 | u64 off, u64 *plen, | ||
| 331 | struct ceph_osd_req_op *src_ops, | ||
| 332 | struct ceph_snap_context *snapc, | ||
| 333 | struct timespec *mtime, | ||
| 334 | const char *oid, | ||
| 335 | int oid_len) | ||
| 336 | { | ||
| 337 | struct ceph_msg *msg = req->r_request; | ||
| 338 | struct ceph_osd_request_head *head; | ||
| 339 | struct ceph_osd_req_op *src_op; | ||
| 340 | struct ceph_osd_op *op; | ||
| 341 | void *p; | ||
| 342 | int num_op = get_num_ops(src_ops, NULL); | ||
| 343 | size_t msg_size = sizeof(*head) + num_op*sizeof(*op); | ||
| 344 | int flags = req->r_flags; | ||
| 345 | u64 data_len = 0; | ||
| 346 | int i; | ||
| 347 | |||
| 188 | head = msg->front.iov_base; | 348 | head = msg->front.iov_base; |
| 189 | op = (void *)(head + 1); | 349 | op = (void *)(head + 1); |
| 190 | p = (void *)(op + num_op); | 350 | p = (void *)(op + num_op); |
| 191 | 351 | ||
| 192 | req->r_request = msg; | ||
| 193 | req->r_snapc = ceph_get_snap_context(snapc); | 352 | req->r_snapc = ceph_get_snap_context(snapc); |
| 194 | 353 | ||
| 195 | head->client_inc = cpu_to_le32(1); /* always, for now. */ | 354 | head->client_inc = cpu_to_le32(1); /* always, for now. */ |
| @@ -197,29 +356,23 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
| 197 | if (flags & CEPH_OSD_FLAG_WRITE) | 356 | if (flags & CEPH_OSD_FLAG_WRITE) |
| 198 | ceph_encode_timespec(&head->mtime, mtime); | 357 | ceph_encode_timespec(&head->mtime, mtime); |
| 199 | head->num_ops = cpu_to_le16(num_op); | 358 | head->num_ops = cpu_to_le16(num_op); |
| 200 | op->op = cpu_to_le16(opcode); | ||
| 201 | 359 | ||
| 202 | /* calculate max write size */ | ||
| 203 | calc_layout(osdc, vino, layout, off, plen, req); | ||
| 204 | req->r_file_layout = *layout; /* keep a copy */ | ||
| 205 | |||
| 206 | if (flags & CEPH_OSD_FLAG_WRITE) { | ||
| 207 | req->r_request->hdr.data_off = cpu_to_le16(off); | ||
| 208 | req->r_request->hdr.data_len = cpu_to_le32(*plen); | ||
| 209 | op->payload_len = cpu_to_le32(*plen); | ||
| 210 | } | ||
| 211 | op->extent.truncate_size = cpu_to_le64(truncate_size); | ||
| 212 | op->extent.truncate_seq = cpu_to_le32(truncate_seq); | ||
| 213 | 360 | ||
| 214 | /* fill in oid */ | 361 | /* fill in oid */ |
| 215 | head->object_len = cpu_to_le32(req->r_oid_len); | 362 | head->object_len = cpu_to_le32(oid_len); |
| 216 | memcpy(p, req->r_oid, req->r_oid_len); | 363 | memcpy(p, oid, oid_len); |
| 217 | p += req->r_oid_len; | 364 | p += oid_len; |
| 218 | 365 | ||
| 219 | if (do_sync) { | 366 | src_op = src_ops; |
| 367 | while (src_op->op) { | ||
| 368 | osd_req_encode_op(req, op, src_op); | ||
| 369 | src_op++; | ||
| 220 | op++; | 370 | op++; |
| 221 | op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC); | ||
| 222 | } | 371 | } |
| 372 | |||
| 373 | if (req->r_trail) | ||
| 374 | data_len += req->r_trail->length; | ||
| 375 | |||
| 223 | if (snapc) { | 376 | if (snapc) { |
| 224 | head->snap_seq = cpu_to_le64(snapc->seq); | 377 | head->snap_seq = cpu_to_le64(snapc->seq); |
| 225 | head->num_snaps = cpu_to_le32(snapc->num_snaps); | 378 | head->num_snaps = cpu_to_le32(snapc->num_snaps); |
| @@ -229,12 +382,79 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
| 229 | } | 382 | } |
| 230 | } | 383 | } |
| 231 | 384 | ||
| 385 | if (flags & CEPH_OSD_FLAG_WRITE) { | ||
| 386 | req->r_request->hdr.data_off = cpu_to_le16(off); | ||
| 387 | req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len); | ||
| 388 | } else if (data_len) { | ||
| 389 | req->r_request->hdr.data_off = 0; | ||
| 390 | req->r_request->hdr.data_len = cpu_to_le32(data_len); | ||
| 391 | } | ||
| 392 | |||
| 232 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); | 393 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); |
| 233 | msg_size = p - msg->front.iov_base; | 394 | msg_size = p - msg->front.iov_base; |
| 234 | msg->front.iov_len = msg_size; | 395 | msg->front.iov_len = msg_size; |
| 235 | msg->hdr.front_len = cpu_to_le32(msg_size); | 396 | msg->hdr.front_len = cpu_to_le32(msg_size); |
| 397 | return; | ||
| 398 | } | ||
| 399 | EXPORT_SYMBOL(ceph_osdc_build_request); | ||
| 400 | |||
| 401 | /* | ||
| 402 | * build new request AND message, calculate layout, and adjust file | ||
| 403 | * extent as needed. | ||
| 404 | * | ||
| 405 | * if the file was recently truncated, we include information about its | ||
| 406 | * old and new size so that the object can be updated appropriately. (we | ||
| 407 | * avoid synchronously deleting truncated objects because it's slow.) | ||
| 408 | * | ||
| 409 | * if @do_sync, include a 'startsync' command so that the osd will flush | ||
| 410 | * data quickly. | ||
| 411 | */ | ||
| 412 | struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | ||
| 413 | struct ceph_file_layout *layout, | ||
| 414 | struct ceph_vino vino, | ||
| 415 | u64 off, u64 *plen, | ||
| 416 | int opcode, int flags, | ||
| 417 | struct ceph_snap_context *snapc, | ||
| 418 | int do_sync, | ||
| 419 | u32 truncate_seq, | ||
| 420 | u64 truncate_size, | ||
| 421 | struct timespec *mtime, | ||
| 422 | bool use_mempool, int num_reply) | ||
| 423 | { | ||
| 424 | struct ceph_osd_req_op ops[3]; | ||
| 425 | struct ceph_osd_request *req; | ||
| 426 | |||
| 427 | ops[0].op = opcode; | ||
| 428 | ops[0].extent.truncate_seq = truncate_seq; | ||
| 429 | ops[0].extent.truncate_size = truncate_size; | ||
| 430 | ops[0].payload_len = 0; | ||
| 431 | |||
| 432 | if (do_sync) { | ||
| 433 | ops[1].op = CEPH_OSD_OP_STARTSYNC; | ||
| 434 | ops[1].payload_len = 0; | ||
| 435 | ops[2].op = 0; | ||
| 436 | } else | ||
| 437 | ops[1].op = 0; | ||
| 438 | |||
| 439 | req = ceph_osdc_alloc_request(osdc, flags, | ||
| 440 | snapc, ops, | ||
| 441 | use_mempool, | ||
| 442 | GFP_NOFS, NULL, NULL); | ||
| 443 | if (IS_ERR(req)) | ||
| 444 | return req; | ||
| 445 | |||
| 446 | /* calculate max write size */ | ||
| 447 | calc_layout(osdc, vino, layout, off, plen, req, ops); | ||
| 448 | req->r_file_layout = *layout; /* keep a copy */ | ||
| 449 | |||
| 450 | ceph_osdc_build_request(req, off, plen, ops, | ||
| 451 | snapc, | ||
| 452 | mtime, | ||
| 453 | req->r_oid, req->r_oid_len); | ||
| 454 | |||
| 236 | return req; | 455 | return req; |
| 237 | } | 456 | } |
| 457 | EXPORT_SYMBOL(ceph_osdc_new_request); | ||
| 238 | 458 | ||
| 239 | /* | 459 | /* |
| 240 | * We keep osd requests in an rbtree, sorted by ->r_tid. | 460 | * We keep osd requests in an rbtree, sorted by ->r_tid. |
| @@ -389,7 +609,7 @@ static void __move_osd_to_lru(struct ceph_osd_client *osdc, | |||
| 389 | dout("__move_osd_to_lru %p\n", osd); | 609 | dout("__move_osd_to_lru %p\n", osd); |
| 390 | BUG_ON(!list_empty(&osd->o_osd_lru)); | 610 | BUG_ON(!list_empty(&osd->o_osd_lru)); |
| 391 | list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); | 611 | list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); |
| 392 | osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ; | 612 | osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; |
| 393 | } | 613 | } |
| 394 | 614 | ||
| 395 | static void __remove_osd_from_lru(struct ceph_osd *osd) | 615 | static void __remove_osd_from_lru(struct ceph_osd *osd) |
| @@ -483,7 +703,7 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) | |||
| 483 | static void __schedule_osd_timeout(struct ceph_osd_client *osdc) | 703 | static void __schedule_osd_timeout(struct ceph_osd_client *osdc) |
| 484 | { | 704 | { |
| 485 | schedule_delayed_work(&osdc->timeout_work, | 705 | schedule_delayed_work(&osdc->timeout_work, |
| 486 | osdc->client->mount_args->osd_keepalive_timeout * HZ); | 706 | osdc->client->options->osd_keepalive_timeout * HZ); |
| 487 | } | 707 | } |
| 488 | 708 | ||
| 489 | static void __cancel_osd_timeout(struct ceph_osd_client *osdc) | 709 | static void __cancel_osd_timeout(struct ceph_osd_client *osdc) |
| @@ -549,7 +769,7 @@ static void __unregister_request(struct ceph_osd_client *osdc, | |||
| 549 | */ | 769 | */ |
| 550 | static void __cancel_request(struct ceph_osd_request *req) | 770 | static void __cancel_request(struct ceph_osd_request *req) |
| 551 | { | 771 | { |
| 552 | if (req->r_sent) { | 772 | if (req->r_sent && req->r_osd) { |
| 553 | ceph_con_revoke(&req->r_osd->o_con, req->r_request); | 773 | ceph_con_revoke(&req->r_osd->o_con, req->r_request); |
| 554 | req->r_sent = 0; | 774 | req->r_sent = 0; |
| 555 | } | 775 | } |
| @@ -684,9 +904,9 @@ static void handle_timeout(struct work_struct *work) | |||
| 684 | container_of(work, struct ceph_osd_client, timeout_work.work); | 904 | container_of(work, struct ceph_osd_client, timeout_work.work); |
| 685 | struct ceph_osd_request *req, *last_req = NULL; | 905 | struct ceph_osd_request *req, *last_req = NULL; |
| 686 | struct ceph_osd *osd; | 906 | struct ceph_osd *osd; |
| 687 | unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ; | 907 | unsigned long timeout = osdc->client->options->osd_timeout * HZ; |
| 688 | unsigned long keepalive = | 908 | unsigned long keepalive = |
| 689 | osdc->client->mount_args->osd_keepalive_timeout * HZ; | 909 | osdc->client->options->osd_keepalive_timeout * HZ; |
| 690 | unsigned long last_stamp = 0; | 910 | unsigned long last_stamp = 0; |
| 691 | struct rb_node *p; | 911 | struct rb_node *p; |
| 692 | struct list_head slow_osds; | 912 | struct list_head slow_osds; |
| @@ -773,7 +993,7 @@ static void handle_osds_timeout(struct work_struct *work) | |||
| 773 | container_of(work, struct ceph_osd_client, | 993 | container_of(work, struct ceph_osd_client, |
| 774 | osds_timeout_work.work); | 994 | osds_timeout_work.work); |
| 775 | unsigned long delay = | 995 | unsigned long delay = |
| 776 | osdc->client->mount_args->osd_idle_ttl * HZ >> 2; | 996 | osdc->client->options->osd_idle_ttl * HZ >> 2; |
| 777 | 997 | ||
| 778 | dout("osds timeout\n"); | 998 | dout("osds timeout\n"); |
| 779 | down_read(&osdc->map_sem); | 999 | down_read(&osdc->map_sem); |
| @@ -1104,6 +1324,10 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
| 1104 | 1324 | ||
| 1105 | req->r_request->pages = req->r_pages; | 1325 | req->r_request->pages = req->r_pages; |
| 1106 | req->r_request->nr_pages = req->r_num_pages; | 1326 | req->r_request->nr_pages = req->r_num_pages; |
| 1327 | #ifdef CONFIG_BLOCK | ||
| 1328 | req->r_request->bio = req->r_bio; | ||
| 1329 | #endif | ||
| 1330 | req->r_request->trail = req->r_trail; | ||
| 1107 | 1331 | ||
| 1108 | register_request(osdc, req); | 1332 | register_request(osdc, req); |
| 1109 | 1333 | ||
| @@ -1131,6 +1355,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
| 1131 | up_read(&osdc->map_sem); | 1355 | up_read(&osdc->map_sem); |
| 1132 | return rc; | 1356 | return rc; |
| 1133 | } | 1357 | } |
| 1358 | EXPORT_SYMBOL(ceph_osdc_start_request); | ||
| 1134 | 1359 | ||
| 1135 | /* | 1360 | /* |
| 1136 | * wait for a request to complete | 1361 | * wait for a request to complete |
| @@ -1153,6 +1378,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc, | |||
| 1153 | dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); | 1378 | dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); |
| 1154 | return req->r_result; | 1379 | return req->r_result; |
| 1155 | } | 1380 | } |
| 1381 | EXPORT_SYMBOL(ceph_osdc_wait_request); | ||
| 1156 | 1382 | ||
| 1157 | /* | 1383 | /* |
| 1158 | * sync - wait for all in-flight requests to flush. avoid starvation. | 1384 | * sync - wait for all in-flight requests to flush. avoid starvation. |
| @@ -1186,6 +1412,7 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc) | |||
| 1186 | mutex_unlock(&osdc->request_mutex); | 1412 | mutex_unlock(&osdc->request_mutex); |
| 1187 | dout("sync done (thru tid %llu)\n", last_tid); | 1413 | dout("sync done (thru tid %llu)\n", last_tid); |
| 1188 | } | 1414 | } |
| 1415 | EXPORT_SYMBOL(ceph_osdc_sync); | ||
| 1189 | 1416 | ||
| 1190 | /* | 1417 | /* |
| 1191 | * init, shutdown | 1418 | * init, shutdown |
| @@ -1211,7 +1438,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) | |||
| 1211 | INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); | 1438 | INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); |
| 1212 | 1439 | ||
| 1213 | schedule_delayed_work(&osdc->osds_timeout_work, | 1440 | schedule_delayed_work(&osdc->osds_timeout_work, |
| 1214 | round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ)); | 1441 | round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); |
| 1215 | 1442 | ||
| 1216 | err = -ENOMEM; | 1443 | err = -ENOMEM; |
| 1217 | osdc->req_mempool = mempool_create_kmalloc_pool(10, | 1444 | osdc->req_mempool = mempool_create_kmalloc_pool(10, |
| @@ -1237,6 +1464,7 @@ out_mempool: | |||
| 1237 | out: | 1464 | out: |
| 1238 | return err; | 1465 | return err; |
| 1239 | } | 1466 | } |
| 1467 | EXPORT_SYMBOL(ceph_osdc_init); | ||
| 1240 | 1468 | ||
| 1241 | void ceph_osdc_stop(struct ceph_osd_client *osdc) | 1469 | void ceph_osdc_stop(struct ceph_osd_client *osdc) |
| 1242 | { | 1470 | { |
| @@ -1251,6 +1479,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) | |||
| 1251 | ceph_msgpool_destroy(&osdc->msgpool_op); | 1479 | ceph_msgpool_destroy(&osdc->msgpool_op); |
| 1252 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); | 1480 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); |
| 1253 | } | 1481 | } |
| 1482 | EXPORT_SYMBOL(ceph_osdc_stop); | ||
| 1254 | 1483 | ||
| 1255 | /* | 1484 | /* |
| 1256 | * Read some contiguous pages. If we cross a stripe boundary, shorten | 1485 | * Read some contiguous pages. If we cross a stripe boundary, shorten |
| @@ -1288,6 +1517,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |||
| 1288 | dout("readpages result %d\n", rc); | 1517 | dout("readpages result %d\n", rc); |
| 1289 | return rc; | 1518 | return rc; |
| 1290 | } | 1519 | } |
| 1520 | EXPORT_SYMBOL(ceph_osdc_readpages); | ||
| 1291 | 1521 | ||
| 1292 | /* | 1522 | /* |
| 1293 | * do a synchronous write on N pages | 1523 | * do a synchronous write on N pages |
| @@ -1330,6 +1560,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
| 1330 | dout("writepages result %d\n", rc); | 1560 | dout("writepages result %d\n", rc); |
| 1331 | return rc; | 1561 | return rc; |
| 1332 | } | 1562 | } |
| 1563 | EXPORT_SYMBOL(ceph_osdc_writepages); | ||
| 1333 | 1564 | ||
| 1334 | /* | 1565 | /* |
| 1335 | * handle incoming message | 1566 | * handle incoming message |
| @@ -1420,6 +1651,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
| 1420 | } | 1651 | } |
| 1421 | m->pages = req->r_pages; | 1652 | m->pages = req->r_pages; |
| 1422 | m->nr_pages = req->r_num_pages; | 1653 | m->nr_pages = req->r_num_pages; |
| 1654 | #ifdef CONFIG_BLOCK | ||
| 1655 | m->bio = req->r_bio; | ||
| 1656 | #endif | ||
| 1423 | } | 1657 | } |
| 1424 | *skip = 0; | 1658 | *skip = 0; |
| 1425 | req->r_con_filling_msg = ceph_con_get(con); | 1659 | req->r_con_filling_msg = ceph_con_get(con); |
diff --git a/fs/ceph/osdmap.c b/net/ceph/osdmap.c index e31f118f1392..d73f3f6efa36 100644 --- a/fs/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
| @@ -1,14 +1,15 @@ | |||
| 1 | 1 | ||
| 2 | #include "ceph_debug.h" | 2 | #include <linux/ceph/ceph_debug.h> |
| 3 | 3 | ||
| 4 | #include <linux/module.h> | ||
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 5 | #include <asm/div64.h> | 6 | #include <asm/div64.h> |
| 6 | 7 | ||
| 7 | #include "super.h" | 8 | #include <linux/ceph/libceph.h> |
| 8 | #include "osdmap.h" | 9 | #include <linux/ceph/osdmap.h> |
| 9 | #include "crush/hash.h" | 10 | #include <linux/ceph/decode.h> |
| 10 | #include "crush/mapper.h" | 11 | #include <linux/crush/hash.h> |
| 11 | #include "decode.h" | 12 | #include <linux/crush/mapper.h> |
| 12 | 13 | ||
| 13 | char *ceph_osdmap_state_str(char *str, int len, int state) | 14 | char *ceph_osdmap_state_str(char *str, int len, int state) |
| 14 | { | 15 | { |
| @@ -417,6 +418,20 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) | |||
| 417 | return NULL; | 418 | return NULL; |
| 418 | } | 419 | } |
| 419 | 420 | ||
| 421 | int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) | ||
| 422 | { | ||
| 423 | struct rb_node *rbp; | ||
| 424 | |||
| 425 | for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { | ||
| 426 | struct ceph_pg_pool_info *pi = | ||
| 427 | rb_entry(rbp, struct ceph_pg_pool_info, node); | ||
| 428 | if (pi->name && strcmp(pi->name, name) == 0) | ||
| 429 | return pi->id; | ||
| 430 | } | ||
| 431 | return -ENOENT; | ||
| 432 | } | ||
| 433 | EXPORT_SYMBOL(ceph_pg_poolid_by_name); | ||
| 434 | |||
| 420 | static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) | 435 | static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) |
| 421 | { | 436 | { |
| 422 | rb_erase(&pi->node, root); | 437 | rb_erase(&pi->node, root); |
| @@ -966,6 +981,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | |||
| 966 | 981 | ||
| 967 | dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); | 982 | dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); |
| 968 | } | 983 | } |
| 984 | EXPORT_SYMBOL(ceph_calc_file_object_mapping); | ||
| 969 | 985 | ||
| 970 | /* | 986 | /* |
| 971 | * calculate an object layout (i.e. pgid) from an oid, | 987 | * calculate an object layout (i.e. pgid) from an oid, |
| @@ -1011,6 +1027,7 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, | |||
| 1011 | ol->ol_stripe_unit = fl->fl_object_stripe_unit; | 1027 | ol->ol_stripe_unit = fl->fl_object_stripe_unit; |
| 1012 | return 0; | 1028 | return 0; |
| 1013 | } | 1029 | } |
| 1030 | EXPORT_SYMBOL(ceph_calc_object_layout); | ||
| 1014 | 1031 | ||
| 1015 | /* | 1032 | /* |
| 1016 | * Calculate raw osd vector for the given pgid. Return pointer to osd | 1033 | * Calculate raw osd vector for the given pgid. Return pointer to osd |
| @@ -1108,3 +1125,4 @@ int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) | |||
| 1108 | return osds[i]; | 1125 | return osds[i]; |
| 1109 | return -1; | 1126 | return -1; |
| 1110 | } | 1127 | } |
| 1128 | EXPORT_SYMBOL(ceph_calc_pg_primary); | ||
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c new file mode 100644 index 000000000000..13cb409a7bba --- /dev/null +++ b/net/ceph/pagelist.c | |||
| @@ -0,0 +1,154 @@ | |||
| 1 | |||
| 2 | #include <linux/module.h> | ||
| 3 | #include <linux/gfp.h> | ||
| 4 | #include <linux/pagemap.h> | ||
| 5 | #include <linux/highmem.h> | ||
| 6 | #include <linux/ceph/pagelist.h> | ||
| 7 | |||
| 8 | static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) | ||
| 9 | { | ||
| 10 | if (pl->mapped_tail) { | ||
| 11 | struct page *page = list_entry(pl->head.prev, struct page, lru); | ||
| 12 | kunmap(page); | ||
| 13 | pl->mapped_tail = NULL; | ||
| 14 | } | ||
| 15 | } | ||
| 16 | |||
| 17 | int ceph_pagelist_release(struct ceph_pagelist *pl) | ||
| 18 | { | ||
| 19 | ceph_pagelist_unmap_tail(pl); | ||
| 20 | while (!list_empty(&pl->head)) { | ||
| 21 | struct page *page = list_first_entry(&pl->head, struct page, | ||
| 22 | lru); | ||
| 23 | list_del(&page->lru); | ||
| 24 | __free_page(page); | ||
| 25 | } | ||
| 26 | ceph_pagelist_free_reserve(pl); | ||
| 27 | return 0; | ||
| 28 | } | ||
| 29 | EXPORT_SYMBOL(ceph_pagelist_release); | ||
| 30 | |||
| 31 | static int ceph_pagelist_addpage(struct ceph_pagelist *pl) | ||
| 32 | { | ||
| 33 | struct page *page; | ||
| 34 | |||
| 35 | if (!pl->num_pages_free) { | ||
| 36 | page = __page_cache_alloc(GFP_NOFS); | ||
| 37 | } else { | ||
| 38 | page = list_first_entry(&pl->free_list, struct page, lru); | ||
| 39 | list_del(&page->lru); | ||
| 40 | --pl->num_pages_free; | ||
| 41 | } | ||
| 42 | if (!page) | ||
| 43 | return -ENOMEM; | ||
| 44 | pl->room += PAGE_SIZE; | ||
| 45 | ceph_pagelist_unmap_tail(pl); | ||
| 46 | list_add_tail(&page->lru, &pl->head); | ||
| 47 | pl->mapped_tail = kmap(page); | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | |||
| 51 | int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) | ||
| 52 | { | ||
| 53 | while (pl->room < len) { | ||
| 54 | size_t bit = pl->room; | ||
| 55 | int ret; | ||
| 56 | |||
| 57 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), | ||
| 58 | buf, bit); | ||
| 59 | pl->length += bit; | ||
| 60 | pl->room -= bit; | ||
| 61 | buf += bit; | ||
| 62 | len -= bit; | ||
| 63 | ret = ceph_pagelist_addpage(pl); | ||
| 64 | if (ret) | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); | ||
| 69 | pl->length += len; | ||
| 70 | pl->room -= len; | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | EXPORT_SYMBOL(ceph_pagelist_append); | ||
| 74 | |||
| 75 | /** | ||
| 76 | * Allocate enough pages for a pagelist to append the given amount | ||
| 77 | * of data without without allocating. | ||
| 78 | * Returns: 0 on success, -ENOMEM on error. | ||
| 79 | */ | ||
| 80 | int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space) | ||
| 81 | { | ||
| 82 | if (space <= pl->room) | ||
| 83 | return 0; | ||
| 84 | space -= pl->room; | ||
| 85 | space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */ | ||
| 86 | |||
| 87 | while (space > pl->num_pages_free) { | ||
| 88 | struct page *page = __page_cache_alloc(GFP_NOFS); | ||
| 89 | if (!page) | ||
| 90 | return -ENOMEM; | ||
| 91 | list_add_tail(&page->lru, &pl->free_list); | ||
| 92 | ++pl->num_pages_free; | ||
| 93 | } | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | EXPORT_SYMBOL(ceph_pagelist_reserve); | ||
| 97 | |||
| 98 | /** | ||
| 99 | * Free any pages that have been preallocated. | ||
| 100 | */ | ||
| 101 | int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) | ||
| 102 | { | ||
| 103 | while (!list_empty(&pl->free_list)) { | ||
| 104 | struct page *page = list_first_entry(&pl->free_list, | ||
| 105 | struct page, lru); | ||
| 106 | list_del(&page->lru); | ||
| 107 | __free_page(page); | ||
| 108 | --pl->num_pages_free; | ||
| 109 | } | ||
| 110 | BUG_ON(pl->num_pages_free); | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | EXPORT_SYMBOL(ceph_pagelist_free_reserve); | ||
| 114 | |||
| 115 | /** | ||
| 116 | * Create a truncation point. | ||
| 117 | */ | ||
| 118 | void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, | ||
| 119 | struct ceph_pagelist_cursor *c) | ||
| 120 | { | ||
| 121 | c->pl = pl; | ||
| 122 | c->page_lru = pl->head.prev; | ||
| 123 | c->room = pl->room; | ||
| 124 | } | ||
| 125 | EXPORT_SYMBOL(ceph_pagelist_set_cursor); | ||
| 126 | |||
| 127 | /** | ||
| 128 | * Truncate a pagelist to the given point. Move extra pages to reserve. | ||
| 129 | * This won't sleep. | ||
| 130 | * Returns: 0 on success, | ||
| 131 | * -EINVAL if the pagelist doesn't match the trunc point pagelist | ||
| 132 | */ | ||
| 133 | int ceph_pagelist_truncate(struct ceph_pagelist *pl, | ||
| 134 | struct ceph_pagelist_cursor *c) | ||
| 135 | { | ||
| 136 | struct page *page; | ||
| 137 | |||
| 138 | if (pl != c->pl) | ||
| 139 | return -EINVAL; | ||
| 140 | ceph_pagelist_unmap_tail(pl); | ||
| 141 | while (pl->head.prev != c->page_lru) { | ||
| 142 | page = list_entry(pl->head.prev, struct page, lru); | ||
| 143 | list_del(&page->lru); /* remove from pagelist */ | ||
| 144 | list_add_tail(&page->lru, &pl->free_list); /* add to reserve */ | ||
| 145 | ++pl->num_pages_free; | ||
| 146 | } | ||
| 147 | pl->room = c->room; | ||
| 148 | if (!list_empty(&pl->head)) { | ||
| 149 | page = list_entry(pl->head.prev, struct page, lru); | ||
| 150 | pl->mapped_tail = kmap(page); | ||
| 151 | } | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | EXPORT_SYMBOL(ceph_pagelist_truncate); | ||
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c new file mode 100644 index 000000000000..54caf0687155 --- /dev/null +++ b/net/ceph/pagevec.c | |||
| @@ -0,0 +1,223 @@ | |||
| 1 | #include <linux/ceph/ceph_debug.h> | ||
| 2 | |||
| 3 | #include <linux/module.h> | ||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <linux/slab.h> | ||
| 6 | #include <linux/file.h> | ||
| 7 | #include <linux/namei.h> | ||
| 8 | #include <linux/writeback.h> | ||
| 9 | |||
| 10 | #include <linux/ceph/libceph.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * build a vector of user pages | ||
| 14 | */ | ||
| 15 | struct page **ceph_get_direct_page_vector(const char __user *data, | ||
| 16 | int num_pages, | ||
| 17 | loff_t off, size_t len) | ||
| 18 | { | ||
| 19 | struct page **pages; | ||
| 20 | int rc; | ||
| 21 | |||
| 22 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | ||
| 23 | if (!pages) | ||
| 24 | return ERR_PTR(-ENOMEM); | ||
| 25 | |||
| 26 | down_read(¤t->mm->mmap_sem); | ||
| 27 | rc = get_user_pages(current, current->mm, (unsigned long)data, | ||
| 28 | num_pages, 0, 0, pages, NULL); | ||
| 29 | up_read(¤t->mm->mmap_sem); | ||
| 30 | if (rc < 0) | ||
| 31 | goto fail; | ||
| 32 | return pages; | ||
| 33 | |||
| 34 | fail: | ||
| 35 | kfree(pages); | ||
| 36 | return ERR_PTR(rc); | ||
| 37 | } | ||
| 38 | EXPORT_SYMBOL(ceph_get_direct_page_vector); | ||
| 39 | |||
| 40 | void ceph_put_page_vector(struct page **pages, int num_pages) | ||
| 41 | { | ||
| 42 | int i; | ||
| 43 | |||
| 44 | for (i = 0; i < num_pages; i++) | ||
| 45 | put_page(pages[i]); | ||
| 46 | kfree(pages); | ||
| 47 | } | ||
| 48 | EXPORT_SYMBOL(ceph_put_page_vector); | ||
| 49 | |||
| 50 | void ceph_release_page_vector(struct page **pages, int num_pages) | ||
| 51 | { | ||
| 52 | int i; | ||
| 53 | |||
| 54 | for (i = 0; i < num_pages; i++) | ||
| 55 | __free_pages(pages[i], 0); | ||
| 56 | kfree(pages); | ||
| 57 | } | ||
| 58 | EXPORT_SYMBOL(ceph_release_page_vector); | ||
| 59 | |||
| 60 | /* | ||
| 61 | * allocate a vector new pages | ||
| 62 | */ | ||
| 63 | struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) | ||
| 64 | { | ||
| 65 | struct page **pages; | ||
| 66 | int i; | ||
| 67 | |||
| 68 | pages = kmalloc(sizeof(*pages) * num_pages, flags); | ||
| 69 | if (!pages) | ||
| 70 | return ERR_PTR(-ENOMEM); | ||
| 71 | for (i = 0; i < num_pages; i++) { | ||
| 72 | pages[i] = __page_cache_alloc(flags); | ||
| 73 | if (pages[i] == NULL) { | ||
| 74 | ceph_release_page_vector(pages, i); | ||
| 75 | return ERR_PTR(-ENOMEM); | ||
| 76 | } | ||
| 77 | } | ||
| 78 | return pages; | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL(ceph_alloc_page_vector); | ||
| 81 | |||
| 82 | /* | ||
| 83 | * copy user data into a page vector | ||
| 84 | */ | ||
| 85 | int ceph_copy_user_to_page_vector(struct page **pages, | ||
| 86 | const char __user *data, | ||
| 87 | loff_t off, size_t len) | ||
| 88 | { | ||
| 89 | int i = 0; | ||
| 90 | int po = off & ~PAGE_CACHE_MASK; | ||
| 91 | int left = len; | ||
| 92 | int l, bad; | ||
| 93 | |||
| 94 | while (left > 0) { | ||
| 95 | l = min_t(int, PAGE_CACHE_SIZE-po, left); | ||
| 96 | bad = copy_from_user(page_address(pages[i]) + po, data, l); | ||
| 97 | if (bad == l) | ||
| 98 | return -EFAULT; | ||
| 99 | data += l - bad; | ||
| 100 | left -= l - bad; | ||
| 101 | po += l - bad; | ||
| 102 | if (po == PAGE_CACHE_SIZE) { | ||
| 103 | po = 0; | ||
| 104 | i++; | ||
| 105 | } | ||
| 106 | } | ||
| 107 | return len; | ||
| 108 | } | ||
| 109 | EXPORT_SYMBOL(ceph_copy_user_to_page_vector); | ||
| 110 | |||
| 111 | int ceph_copy_to_page_vector(struct page **pages, | ||
| 112 | const char *data, | ||
| 113 | loff_t off, size_t len) | ||
| 114 | { | ||
| 115 | int i = 0; | ||
| 116 | size_t po = off & ~PAGE_CACHE_MASK; | ||
| 117 | size_t left = len; | ||
| 118 | size_t l; | ||
| 119 | |||
| 120 | while (left > 0) { | ||
| 121 | l = min_t(size_t, PAGE_CACHE_SIZE-po, left); | ||
| 122 | memcpy(page_address(pages[i]) + po, data, l); | ||
| 123 | data += l; | ||
| 124 | left -= l; | ||
| 125 | po += l; | ||
| 126 | if (po == PAGE_CACHE_SIZE) { | ||
| 127 | po = 0; | ||
| 128 | i++; | ||
| 129 | } | ||
| 130 | } | ||
| 131 | return len; | ||
| 132 | } | ||
| 133 | EXPORT_SYMBOL(ceph_copy_to_page_vector); | ||
| 134 | |||
| 135 | int ceph_copy_from_page_vector(struct page **pages, | ||
| 136 | char *data, | ||
| 137 | loff_t off, size_t len) | ||
| 138 | { | ||
| 139 | int i = 0; | ||
| 140 | size_t po = off & ~PAGE_CACHE_MASK; | ||
| 141 | size_t left = len; | ||
| 142 | size_t l; | ||
| 143 | |||
| 144 | while (left > 0) { | ||
| 145 | l = min_t(size_t, PAGE_CACHE_SIZE-po, left); | ||
| 146 | memcpy(data, page_address(pages[i]) + po, l); | ||
| 147 | data += l; | ||
| 148 | left -= l; | ||
| 149 | po += l; | ||
| 150 | if (po == PAGE_CACHE_SIZE) { | ||
| 151 | po = 0; | ||
| 152 | i++; | ||
| 153 | } | ||
| 154 | } | ||
| 155 | return len; | ||
| 156 | } | ||
| 157 | EXPORT_SYMBOL(ceph_copy_from_page_vector); | ||
| 158 | |||
| 159 | /* | ||
| 160 | * copy user data from a page vector into a user pointer | ||
| 161 | */ | ||
| 162 | int ceph_copy_page_vector_to_user(struct page **pages, | ||
| 163 | char __user *data, | ||
| 164 | loff_t off, size_t len) | ||
| 165 | { | ||
| 166 | int i = 0; | ||
| 167 | int po = off & ~PAGE_CACHE_MASK; | ||
| 168 | int left = len; | ||
| 169 | int l, bad; | ||
| 170 | |||
| 171 | while (left > 0) { | ||
| 172 | l = min_t(int, left, PAGE_CACHE_SIZE-po); | ||
| 173 | bad = copy_to_user(data, page_address(pages[i]) + po, l); | ||
| 174 | if (bad == l) | ||
| 175 | return -EFAULT; | ||
| 176 | data += l - bad; | ||
| 177 | left -= l - bad; | ||
| 178 | if (po) { | ||
| 179 | po += l - bad; | ||
| 180 | if (po == PAGE_CACHE_SIZE) | ||
| 181 | po = 0; | ||
| 182 | } | ||
| 183 | i++; | ||
| 184 | } | ||
| 185 | return len; | ||
| 186 | } | ||
| 187 | EXPORT_SYMBOL(ceph_copy_page_vector_to_user); | ||
| 188 | |||
| 189 | /* | ||
| 190 | * Zero an extent within a page vector. Offset is relative to the | ||
| 191 | * start of the first page. | ||
| 192 | */ | ||
| 193 | void ceph_zero_page_vector_range(int off, int len, struct page **pages) | ||
| 194 | { | ||
| 195 | int i = off >> PAGE_CACHE_SHIFT; | ||
| 196 | |||
| 197 | off &= ~PAGE_CACHE_MASK; | ||
| 198 | |||
| 199 | dout("zero_page_vector_page %u~%u\n", off, len); | ||
| 200 | |||
| 201 | /* leading partial page? */ | ||
| 202 | if (off) { | ||
| 203 | int end = min((int)PAGE_CACHE_SIZE, off + len); | ||
| 204 | dout("zeroing %d %p head from %d\n", i, pages[i], | ||
| 205 | (int)off); | ||
| 206 | zero_user_segment(pages[i], off, end); | ||
| 207 | len -= (end - off); | ||
| 208 | i++; | ||
| 209 | } | ||
| 210 | while (len >= PAGE_CACHE_SIZE) { | ||
| 211 | dout("zeroing %d %p len=%d\n", i, pages[i], len); | ||
| 212 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); | ||
| 213 | len -= PAGE_CACHE_SIZE; | ||
| 214 | i++; | ||
| 215 | } | ||
| 216 | /* trailing partial page? */ | ||
| 217 | if (len) { | ||
| 218 | dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); | ||
| 219 | zero_user_segment(pages[i], 0, len); | ||
| 220 | } | ||
| 221 | } | ||
| 222 | EXPORT_SYMBOL(ceph_zero_page_vector_range); | ||
| 223 | |||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 7a85367b3c2f..8451ab481095 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -348,7 +348,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
| 348 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { | 348 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { |
| 349 | if (info.rule_cnt > 0) { | 349 | if (info.rule_cnt > 0) { |
| 350 | if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) | 350 | if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) |
| 351 | rule_buf = kmalloc(info.rule_cnt * sizeof(u32), | 351 | rule_buf = kzalloc(info.rule_cnt * sizeof(u32), |
| 352 | GFP_USER); | 352 | GFP_USER); |
| 353 | if (!rule_buf) | 353 | if (!rule_buf) |
| 354 | return -ENOMEM; | 354 | return -ENOMEM; |
| @@ -397,7 +397,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, | |||
| 397 | (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) | 397 | (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) |
| 398 | return -ENOMEM; | 398 | return -ENOMEM; |
| 399 | full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; | 399 | full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; |
| 400 | indir = kmalloc(full_size, GFP_USER); | 400 | indir = kzalloc(full_size, GFP_USER); |
| 401 | if (!indir) | 401 | if (!indir) |
| 402 | return -ENOMEM; | 402 | return -ENOMEM; |
| 403 | 403 | ||
| @@ -538,7 +538,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
| 538 | 538 | ||
| 539 | gstrings.len = ret; | 539 | gstrings.len = ret; |
| 540 | 540 | ||
| 541 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | 541 | data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); |
| 542 | if (!data) | 542 | if (!data) |
| 543 | return -ENOMEM; | 543 | return -ENOMEM; |
| 544 | 544 | ||
| @@ -775,7 +775,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | |||
| 775 | if (regs.len > reglen) | 775 | if (regs.len > reglen) |
| 776 | regs.len = reglen; | 776 | regs.len = reglen; |
| 777 | 777 | ||
| 778 | regbuf = kmalloc(reglen, GFP_USER); | 778 | regbuf = kzalloc(reglen, GFP_USER); |
| 779 | if (!regbuf) | 779 | if (!regbuf) |
| 780 | return -ENOMEM; | 780 | return -ENOMEM; |
| 781 | 781 | ||
diff --git a/net/core/stream.c b/net/core/stream.c index d959e0f41528..f5df85dcd20b 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
| @@ -141,10 +141,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
| 141 | 141 | ||
| 142 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 142 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 143 | sk->sk_write_pending++; | 143 | sk->sk_write_pending++; |
| 144 | sk_wait_event(sk, ¤t_timeo, !sk->sk_err && | 144 | sk_wait_event(sk, ¤t_timeo, sk->sk_err || |
| 145 | !(sk->sk_shutdown & SEND_SHUTDOWN) && | 145 | (sk->sk_shutdown & SEND_SHUTDOWN) || |
| 146 | sk_stream_memory_free(sk) && | 146 | (sk_stream_memory_free(sk) && |
| 147 | vm_wait); | 147 | !vm_wait)); |
| 148 | sk->sk_write_pending--; | 148 | sk->sk_write_pending--; |
| 149 | 149 | ||
| 150 | if (vm_wait) { | 150 | if (vm_wait) { |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 571f8950ed06..7cd7760144f7 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
| @@ -217,6 +217,7 @@ config NET_IPIP | |||
| 217 | 217 | ||
| 218 | config NET_IPGRE | 218 | config NET_IPGRE |
| 219 | tristate "IP: GRE tunnels over IP" | 219 | tristate "IP: GRE tunnels over IP" |
| 220 | depends on IPV6 || IPV6=n | ||
| 220 | help | 221 | help |
| 221 | Tunneling means encapsulating data of one protocol type within | 222 | Tunneling means encapsulating data of one protocol type within |
| 222 | another protocol and sending it over a channel that understands the | 223 | another protocol and sending it over a channel that understands the |
| @@ -412,7 +413,7 @@ config INET_XFRM_MODE_BEET | |||
| 412 | If unsure, say Y. | 413 | If unsure, say Y. |
| 413 | 414 | ||
| 414 | config INET_LRO | 415 | config INET_LRO |
| 415 | bool "Large Receive Offload (ipv4/tcp)" | 416 | tristate "Large Receive Offload (ipv4/tcp)" |
| 416 | default y | 417 | default y |
| 417 | ---help--- | 418 | ---help--- |
| 418 | Support for Large Receive Offload (ipv4/tcp). | 419 | Support for Large Receive Offload (ipv4/tcp). |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1fdcacd36ce7..2a4bb76f2132 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
| 834 | int mark = 0; | 834 | int mark = 0; |
| 835 | 835 | ||
| 836 | 836 | ||
| 837 | if (len == 8 || IGMP_V2_SEEN(in_dev)) { | 837 | if (len == 8) { |
| 838 | if (ih->code == 0) { | 838 | if (ih->code == 0) { |
| 839 | /* Alas, old v1 router presents here. */ | 839 | /* Alas, old v1 router presents here. */ |
| 840 | 840 | ||
| @@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
| 856 | igmpv3_clear_delrec(in_dev); | 856 | igmpv3_clear_delrec(in_dev); |
| 857 | } else if (len < 12) { | 857 | } else if (len < 12) { |
| 858 | return; /* ignore bogus packet; freed by caller */ | 858 | return; /* ignore bogus packet; freed by caller */ |
| 859 | } else if (IGMP_V1_SEEN(in_dev)) { | ||
| 860 | /* This is a v3 query with v1 queriers present */ | ||
| 861 | max_delay = IGMP_Query_Response_Interval; | ||
| 862 | group = 0; | ||
| 863 | } else if (IGMP_V2_SEEN(in_dev)) { | ||
| 864 | /* this is a v3 query with v2 queriers present; | ||
| 865 | * Interpretation of the max_delay code is problematic here. | ||
| 866 | * A real v2 host would use ih_code directly, while v3 has a | ||
| 867 | * different encoding. We use the v3 encoding as more likely | ||
| 868 | * to be intended in a v3 query. | ||
| 869 | */ | ||
| 870 | max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); | ||
| 859 | } else { /* v3 */ | 871 | } else { /* v3 */ |
| 860 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) | 872 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) |
| 861 | return; | 873 | return; |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 244f7cb08d68..37f8adb68c79 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
| 12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
| 13 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
| 14 | #include <linux/security.h> | ||
| 14 | #include <net/net_namespace.h> | 15 | #include <net/net_namespace.h> |
| 15 | 16 | ||
| 16 | #include <linux/netfilter.h> | 17 | #include <linux/netfilter.h> |
| @@ -87,6 +88,29 @@ static void ct_seq_stop(struct seq_file *s, void *v) | |||
| 87 | rcu_read_unlock(); | 88 | rcu_read_unlock(); |
| 88 | } | 89 | } |
| 89 | 90 | ||
| 91 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | ||
| 92 | static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | ||
| 93 | { | ||
| 94 | int ret; | ||
| 95 | u32 len; | ||
| 96 | char *secctx; | ||
| 97 | |||
| 98 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); | ||
| 99 | if (ret) | ||
| 100 | return ret; | ||
| 101 | |||
| 102 | ret = seq_printf(s, "secctx=%s ", secctx); | ||
| 103 | |||
| 104 | security_release_secctx(secctx, len); | ||
| 105 | return ret; | ||
| 106 | } | ||
| 107 | #else | ||
| 108 | static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | ||
| 109 | { | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | #endif | ||
| 113 | |||
| 90 | static int ct_seq_show(struct seq_file *s, void *v) | 114 | static int ct_seq_show(struct seq_file *s, void *v) |
| 91 | { | 115 | { |
| 92 | struct nf_conntrack_tuple_hash *hash = v; | 116 | struct nf_conntrack_tuple_hash *hash = v; |
| @@ -148,10 +172,8 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
| 148 | goto release; | 172 | goto release; |
| 149 | #endif | 173 | #endif |
| 150 | 174 | ||
| 151 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 175 | if (ct_show_secctx(s, ct)) |
| 152 | if (seq_printf(s, "secmark=%u ", ct->secmark)) | ||
| 153 | goto release; | 176 | goto release; |
| 154 | #endif | ||
| 155 | 177 | ||
| 156 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) | 178 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) |
| 157 | goto release; | 179 | goto release; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c35b469e851c..74c54b30600f 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) | |||
| 135 | 135 | ||
| 136 | /* This function calculates a "timeout" which is equivalent to the timeout of a | 136 | /* This function calculates a "timeout" which is equivalent to the timeout of a |
| 137 | * TCP connection after "boundary" unsuccessful, exponentially backed-off | 137 | * TCP connection after "boundary" unsuccessful, exponentially backed-off |
| 138 | * retransmissions with an initial RTO of TCP_RTO_MIN. | 138 | * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if |
| 139 | * syn_set flag is set. | ||
| 139 | */ | 140 | */ |
| 140 | static bool retransmits_timed_out(struct sock *sk, | 141 | static bool retransmits_timed_out(struct sock *sk, |
| 141 | unsigned int boundary) | 142 | unsigned int boundary, |
| 143 | bool syn_set) | ||
| 142 | { | 144 | { |
| 143 | unsigned int timeout, linear_backoff_thresh; | 145 | unsigned int timeout, linear_backoff_thresh; |
| 144 | unsigned int start_ts; | 146 | unsigned int start_ts; |
| 147 | unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; | ||
| 145 | 148 | ||
| 146 | if (!inet_csk(sk)->icsk_retransmits) | 149 | if (!inet_csk(sk)->icsk_retransmits) |
| 147 | return false; | 150 | return false; |
| @@ -151,12 +154,12 @@ static bool retransmits_timed_out(struct sock *sk, | |||
| 151 | else | 154 | else |
| 152 | start_ts = tcp_sk(sk)->retrans_stamp; | 155 | start_ts = tcp_sk(sk)->retrans_stamp; |
| 153 | 156 | ||
| 154 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); | 157 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); |
| 155 | 158 | ||
| 156 | if (boundary <= linear_backoff_thresh) | 159 | if (boundary <= linear_backoff_thresh) |
| 157 | timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; | 160 | timeout = ((2 << boundary) - 1) * rto_base; |
| 158 | else | 161 | else |
| 159 | timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + | 162 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + |
| 160 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | 163 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
| 161 | 164 | ||
| 162 | return (tcp_time_stamp - start_ts) >= timeout; | 165 | return (tcp_time_stamp - start_ts) >= timeout; |
| @@ -167,14 +170,15 @@ static int tcp_write_timeout(struct sock *sk) | |||
| 167 | { | 170 | { |
| 168 | struct inet_connection_sock *icsk = inet_csk(sk); | 171 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 169 | int retry_until; | 172 | int retry_until; |
| 170 | bool do_reset; | 173 | bool do_reset, syn_set = 0; |
| 171 | 174 | ||
| 172 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 175 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
| 173 | if (icsk->icsk_retransmits) | 176 | if (icsk->icsk_retransmits) |
| 174 | dst_negative_advice(sk); | 177 | dst_negative_advice(sk); |
| 175 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 178 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
| 179 | syn_set = 1; | ||
| 176 | } else { | 180 | } else { |
| 177 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 181 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { |
| 178 | /* Black hole detection */ | 182 | /* Black hole detection */ |
| 179 | tcp_mtu_probing(icsk, sk); | 183 | tcp_mtu_probing(icsk, sk); |
| 180 | 184 | ||
| @@ -187,14 +191,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
| 187 | 191 | ||
| 188 | retry_until = tcp_orphan_retries(sk, alive); | 192 | retry_until = tcp_orphan_retries(sk, alive); |
| 189 | do_reset = alive || | 193 | do_reset = alive || |
| 190 | !retransmits_timed_out(sk, retry_until); | 194 | !retransmits_timed_out(sk, retry_until, 0); |
| 191 | 195 | ||
| 192 | if (tcp_out_of_resources(sk, do_reset)) | 196 | if (tcp_out_of_resources(sk, do_reset)) |
| 193 | return 1; | 197 | return 1; |
| 194 | } | 198 | } |
| 195 | } | 199 | } |
| 196 | 200 | ||
| 197 | if (retransmits_timed_out(sk, retry_until)) { | 201 | if (retransmits_timed_out(sk, retry_until, syn_set)) { |
| 198 | /* Has it gone just too far? */ | 202 | /* Has it gone just too far? */ |
| 199 | tcp_write_err(sk); | 203 | tcp_write_err(sk); |
| 200 | return 1; | 204 | return 1; |
| @@ -436,7 +440,7 @@ out_reset_timer: | |||
| 436 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | 440 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
| 437 | } | 441 | } |
| 438 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); | 442 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
| 439 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) | 443 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) |
| 440 | __sk_dst_reset(sk); | 444 | __sk_dst_reset(sk); |
| 441 | 445 | ||
| 442 | out:; | 446 | out:; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8323136bdc54..a275c6e1e25c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1556,14 +1556,13 @@ out: | |||
| 1556 | * i.e. Path MTU discovery | 1556 | * i.e. Path MTU discovery |
| 1557 | */ | 1557 | */ |
| 1558 | 1558 | ||
| 1559 | void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | 1559 | static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr, |
| 1560 | struct net_device *dev, u32 pmtu) | 1560 | struct net *net, u32 pmtu, int ifindex) |
| 1561 | { | 1561 | { |
| 1562 | struct rt6_info *rt, *nrt; | 1562 | struct rt6_info *rt, *nrt; |
| 1563 | struct net *net = dev_net(dev); | ||
| 1564 | int allfrag = 0; | 1563 | int allfrag = 0; |
| 1565 | 1564 | ||
| 1566 | rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0); | 1565 | rt = rt6_lookup(net, daddr, saddr, ifindex, 0); |
| 1567 | if (rt == NULL) | 1566 | if (rt == NULL) |
| 1568 | return; | 1567 | return; |
| 1569 | 1568 | ||
| @@ -1631,6 +1630,27 @@ out: | |||
| 1631 | dst_release(&rt->dst); | 1630 | dst_release(&rt->dst); |
| 1632 | } | 1631 | } |
| 1633 | 1632 | ||
| 1633 | void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | ||
| 1634 | struct net_device *dev, u32 pmtu) | ||
| 1635 | { | ||
| 1636 | struct net *net = dev_net(dev); | ||
| 1637 | |||
| 1638 | /* | ||
| 1639 | * RFC 1981 states that a node "MUST reduce the size of the packets it | ||
| 1640 | * is sending along the path" that caused the Packet Too Big message. | ||
| 1641 | * Since it's not possible in the general case to determine which | ||
| 1642 | * interface was used to send the original packet, we update the MTU | ||
| 1643 | * on the interface that will be used to send future packets. We also | ||
| 1644 | * update the MTU on the interface that received the Packet Too Big in | ||
| 1645 | * case the original packet was forced out that interface with | ||
| 1646 | * SO_BINDTODEVICE or similar. This is the next best thing to the | ||
| 1647 | * correct behaviour, which would be to update the MTU on all | ||
| 1648 | * interfaces. | ||
| 1649 | */ | ||
| 1650 | rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0); | ||
| 1651 | rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex); | ||
| 1652 | } | ||
| 1653 | |||
| 1634 | /* | 1654 | /* |
| 1635 | * Misc support functions | 1655 | * Misc support functions |
| 1636 | */ | 1656 | */ |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c893f236acea..8f23401832b7 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
| @@ -175,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
| 175 | 175 | ||
| 176 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | 176 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); |
| 177 | 177 | ||
| 178 | del_timer_sync(&tid_tx->addba_resp_timer); | ||
| 179 | |||
| 178 | /* | 180 | /* |
| 179 | * After this packets are no longer handed right through | 181 | * After this packets are no longer handed right through |
| 180 | * to the driver but are put onto tid_tx->pending instead, | 182 | * to the driver but are put onto tid_tx->pending instead, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fa0f37e4afe4..28624282c5f3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -2199,9 +2199,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
| 2199 | struct net_device *prev_dev = NULL; | 2199 | struct net_device *prev_dev = NULL; |
| 2200 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2200 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
| 2201 | 2201 | ||
| 2202 | if (status->flag & RX_FLAG_INTERNAL_CMTR) | ||
| 2203 | goto out_free_skb; | ||
| 2204 | |||
| 2205 | if (skb_headroom(skb) < sizeof(*rthdr) && | 2202 | if (skb_headroom(skb) < sizeof(*rthdr) && |
| 2206 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) | 2203 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) |
| 2207 | goto out_free_skb; | 2204 | goto out_free_skb; |
| @@ -2260,7 +2257,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
| 2260 | } else | 2257 | } else |
| 2261 | goto out_free_skb; | 2258 | goto out_free_skb; |
| 2262 | 2259 | ||
| 2263 | status->flag |= RX_FLAG_INTERNAL_CMTR; | ||
| 2264 | return; | 2260 | return; |
| 2265 | 2261 | ||
| 2266 | out_free_skb: | 2262 | out_free_skb: |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 10caec5ea8fa..34da67995d94 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
| @@ -377,7 +377,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 377 | skb2 = skb_clone(skb, GFP_ATOMIC); | 377 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 378 | if (skb2) { | 378 | if (skb2) { |
| 379 | skb2->dev = prev_dev; | 379 | skb2->dev = prev_dev; |
| 380 | netif_receive_skb(skb2); | 380 | netif_rx(skb2); |
| 381 | } | 381 | } |
| 382 | } | 382 | } |
| 383 | 383 | ||
| @@ -386,7 +386,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 386 | } | 386 | } |
| 387 | if (prev_dev) { | 387 | if (prev_dev) { |
| 388 | skb->dev = prev_dev; | 388 | skb->dev = prev_dev; |
| 389 | netif_receive_skb(skb); | 389 | netif_rx(skb); |
| 390 | skb = NULL; | 390 | skb = NULL; |
| 391 | } | 391 | } |
| 392 | rcu_read_unlock(); | 392 | rcu_read_unlock(); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 5bae1cd15eea..146476c6441a 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/rculist_nulls.h> | 22 | #include <linux/rculist_nulls.h> |
| 23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 24 | #include <linux/timer.h> | 24 | #include <linux/timer.h> |
| 25 | #include <linux/security.h> | ||
| 25 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
| 26 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
| 27 | #include <linux/netlink.h> | 28 | #include <linux/netlink.h> |
| @@ -245,16 +246,31 @@ nla_put_failure: | |||
| 245 | 246 | ||
| 246 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 247 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
| 247 | static inline int | 248 | static inline int |
| 248 | ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct) | 249 | ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) |
| 249 | { | 250 | { |
| 250 | NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark)); | 251 | struct nlattr *nest_secctx; |
| 251 | return 0; | 252 | int len, ret; |
| 253 | char *secctx; | ||
| 254 | |||
| 255 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); | ||
| 256 | if (ret) | ||
| 257 | return ret; | ||
| 258 | |||
| 259 | ret = -1; | ||
| 260 | nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED); | ||
| 261 | if (!nest_secctx) | ||
| 262 | goto nla_put_failure; | ||
| 263 | |||
| 264 | NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx); | ||
| 265 | nla_nest_end(skb, nest_secctx); | ||
| 252 | 266 | ||
| 267 | ret = 0; | ||
| 253 | nla_put_failure: | 268 | nla_put_failure: |
| 254 | return -1; | 269 | security_release_secctx(secctx, len); |
| 270 | return ret; | ||
| 255 | } | 271 | } |
| 256 | #else | 272 | #else |
| 257 | #define ctnetlink_dump_secmark(a, b) (0) | 273 | #define ctnetlink_dump_secctx(a, b) (0) |
| 258 | #endif | 274 | #endif |
| 259 | 275 | ||
| 260 | #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) | 276 | #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) |
| @@ -391,7 +407,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 391 | ctnetlink_dump_protoinfo(skb, ct) < 0 || | 407 | ctnetlink_dump_protoinfo(skb, ct) < 0 || |
| 392 | ctnetlink_dump_helpinfo(skb, ct) < 0 || | 408 | ctnetlink_dump_helpinfo(skb, ct) < 0 || |
| 393 | ctnetlink_dump_mark(skb, ct) < 0 || | 409 | ctnetlink_dump_mark(skb, ct) < 0 || |
| 394 | ctnetlink_dump_secmark(skb, ct) < 0 || | 410 | ctnetlink_dump_secctx(skb, ct) < 0 || |
| 395 | ctnetlink_dump_id(skb, ct) < 0 || | 411 | ctnetlink_dump_id(skb, ct) < 0 || |
| 396 | ctnetlink_dump_use(skb, ct) < 0 || | 412 | ctnetlink_dump_use(skb, ct) < 0 || |
| 397 | ctnetlink_dump_master(skb, ct) < 0 || | 413 | ctnetlink_dump_master(skb, ct) < 0 || |
| @@ -437,6 +453,17 @@ ctnetlink_counters_size(const struct nf_conn *ct) | |||
| 437 | ; | 453 | ; |
| 438 | } | 454 | } |
| 439 | 455 | ||
| 456 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | ||
| 457 | static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct) | ||
| 458 | { | ||
| 459 | int len; | ||
| 460 | |||
| 461 | security_secid_to_secctx(ct->secmark, NULL, &len); | ||
| 462 | |||
| 463 | return sizeof(char) * len; | ||
| 464 | } | ||
| 465 | #endif | ||
| 466 | |||
| 440 | static inline size_t | 467 | static inline size_t |
| 441 | ctnetlink_nlmsg_size(const struct nf_conn *ct) | 468 | ctnetlink_nlmsg_size(const struct nf_conn *ct) |
| 442 | { | 469 | { |
| @@ -453,7 +480,8 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct) | |||
| 453 | + nla_total_size(0) /* CTA_HELP */ | 480 | + nla_total_size(0) /* CTA_HELP */ |
| 454 | + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ | 481 | + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ |
| 455 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 482 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
| 456 | + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */ | 483 | + nla_total_size(0) /* CTA_SECCTX */ |
| 484 | + nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */ | ||
| 457 | #endif | 485 | #endif |
| 458 | #ifdef CONFIG_NF_NAT_NEEDED | 486 | #ifdef CONFIG_NF_NAT_NEEDED |
| 459 | + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ | 487 | + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ |
| @@ -556,7 +584,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) | |||
| 556 | 584 | ||
| 557 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 585 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
| 558 | if ((events & (1 << IPCT_SECMARK) || ct->secmark) | 586 | if ((events & (1 << IPCT_SECMARK) || ct->secmark) |
| 559 | && ctnetlink_dump_secmark(skb, ct) < 0) | 587 | && ctnetlink_dump_secctx(skb, ct) < 0) |
| 560 | goto nla_put_failure; | 588 | goto nla_put_failure; |
| 561 | #endif | 589 | #endif |
| 562 | 590 | ||
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index eb973fcd67ab..0fb65705b44b 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
| 16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
| 17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
| 18 | #include <linux/security.h> | ||
| 18 | #include <net/net_namespace.h> | 19 | #include <net/net_namespace.h> |
| 19 | #ifdef CONFIG_SYSCTL | 20 | #ifdef CONFIG_SYSCTL |
| 20 | #include <linux/sysctl.h> | 21 | #include <linux/sysctl.h> |
| @@ -108,6 +109,29 @@ static void ct_seq_stop(struct seq_file *s, void *v) | |||
| 108 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
| 109 | } | 110 | } |
| 110 | 111 | ||
| 112 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | ||
| 113 | static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | ||
| 114 | { | ||
| 115 | int ret; | ||
| 116 | u32 len; | ||
| 117 | char *secctx; | ||
| 118 | |||
| 119 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); | ||
| 120 | if (ret) | ||
| 121 | return ret; | ||
| 122 | |||
| 123 | ret = seq_printf(s, "secctx=%s ", secctx); | ||
| 124 | |||
| 125 | security_release_secctx(secctx, len); | ||
| 126 | return ret; | ||
| 127 | } | ||
| 128 | #else | ||
| 129 | static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | ||
| 130 | { | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | #endif | ||
| 134 | |||
| 111 | /* return 0 on success, 1 in case of error */ | 135 | /* return 0 on success, 1 in case of error */ |
| 112 | static int ct_seq_show(struct seq_file *s, void *v) | 136 | static int ct_seq_show(struct seq_file *s, void *v) |
| 113 | { | 137 | { |
| @@ -168,10 +192,8 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
| 168 | goto release; | 192 | goto release; |
| 169 | #endif | 193 | #endif |
| 170 | 194 | ||
| 171 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 195 | if (ct_show_secctx(s, ct)) |
| 172 | if (seq_printf(s, "secmark=%u ", ct->secmark)) | ||
| 173 | goto release; | 196 | goto release; |
| 174 | #endif | ||
| 175 | 197 | ||
| 176 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 198 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
| 177 | if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) | 199 | if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) |
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 0cb6053f02fd..782e51986a6f 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
| 11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
| 12 | #include <linux/selinux.h> | ||
| 13 | #include <linux/netfilter_ipv4/ip_tables.h> | 12 | #include <linux/netfilter_ipv4/ip_tables.h> |
| 14 | #include <linux/netfilter_ipv6/ip6_tables.h> | 13 | #include <linux/netfilter_ipv6/ip6_tables.h> |
| 15 | #include <linux/netfilter/x_tables.h> | 14 | #include <linux/netfilter/x_tables.h> |
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index 23b2d6c486b5..9faf5e050b79 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c | |||
| @@ -14,8 +14,8 @@ | |||
| 14 | */ | 14 | */ |
| 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/security.h> | ||
| 17 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
| 18 | #include <linux/selinux.h> | ||
| 19 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
| 20 | #include <linux/netfilter/xt_SECMARK.h> | 20 | #include <linux/netfilter/xt_SECMARK.h> |
| 21 | 21 | ||
| @@ -39,9 +39,8 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 39 | 39 | ||
| 40 | switch (mode) { | 40 | switch (mode) { |
| 41 | case SECMARK_MODE_SEL: | 41 | case SECMARK_MODE_SEL: |
| 42 | secmark = info->u.sel.selsid; | 42 | secmark = info->secid; |
| 43 | break; | 43 | break; |
| 44 | |||
| 45 | default: | 44 | default: |
| 46 | BUG(); | 45 | BUG(); |
| 47 | } | 46 | } |
| @@ -50,33 +49,33 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 50 | return XT_CONTINUE; | 49 | return XT_CONTINUE; |
| 51 | } | 50 | } |
| 52 | 51 | ||
| 53 | static int checkentry_selinux(struct xt_secmark_target_info *info) | 52 | static int checkentry_lsm(struct xt_secmark_target_info *info) |
| 54 | { | 53 | { |
| 55 | int err; | 54 | int err; |
| 56 | struct xt_secmark_target_selinux_info *sel = &info->u.sel; | ||
| 57 | 55 | ||
| 58 | sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; | 56 | info->secctx[SECMARK_SECCTX_MAX - 1] = '\0'; |
| 57 | info->secid = 0; | ||
| 59 | 58 | ||
| 60 | err = selinux_string_to_sid(sel->selctx, &sel->selsid); | 59 | err = security_secctx_to_secid(info->secctx, strlen(info->secctx), |
| 60 | &info->secid); | ||
| 61 | if (err) { | 61 | if (err) { |
| 62 | if (err == -EINVAL) | 62 | if (err == -EINVAL) |
| 63 | pr_info("invalid SELinux context \'%s\'\n", | 63 | pr_info("invalid security context \'%s\'\n", info->secctx); |
| 64 | sel->selctx); | ||
| 65 | return err; | 64 | return err; |
| 66 | } | 65 | } |
| 67 | 66 | ||
| 68 | if (!sel->selsid) { | 67 | if (!info->secid) { |
| 69 | pr_info("unable to map SELinux context \'%s\'\n", sel->selctx); | 68 | pr_info("unable to map security context \'%s\'\n", info->secctx); |
| 70 | return -ENOENT; | 69 | return -ENOENT; |
| 71 | } | 70 | } |
| 72 | 71 | ||
| 73 | err = selinux_secmark_relabel_packet_permission(sel->selsid); | 72 | err = security_secmark_relabel_packet(info->secid); |
| 74 | if (err) { | 73 | if (err) { |
| 75 | pr_info("unable to obtain relabeling permission\n"); | 74 | pr_info("unable to obtain relabeling permission\n"); |
| 76 | return err; | 75 | return err; |
| 77 | } | 76 | } |
| 78 | 77 | ||
| 79 | selinux_secmark_refcount_inc(); | 78 | security_secmark_refcount_inc(); |
| 80 | return 0; | 79 | return 0; |
| 81 | } | 80 | } |
| 82 | 81 | ||
| @@ -100,16 +99,16 @@ static int secmark_tg_check(const struct xt_tgchk_param *par) | |||
| 100 | 99 | ||
| 101 | switch (info->mode) { | 100 | switch (info->mode) { |
| 102 | case SECMARK_MODE_SEL: | 101 | case SECMARK_MODE_SEL: |
| 103 | err = checkentry_selinux(info); | ||
| 104 | if (err <= 0) | ||
| 105 | return err; | ||
| 106 | break; | 102 | break; |
| 107 | |||
| 108 | default: | 103 | default: |
| 109 | pr_info("invalid mode: %hu\n", info->mode); | 104 | pr_info("invalid mode: %hu\n", info->mode); |
| 110 | return -EINVAL; | 105 | return -EINVAL; |
| 111 | } | 106 | } |
| 112 | 107 | ||
| 108 | err = checkentry_lsm(info); | ||
| 109 | if (err) | ||
| 110 | return err; | ||
| 111 | |||
| 113 | if (!mode) | 112 | if (!mode) |
| 114 | mode = info->mode; | 113 | mode = info->mode; |
| 115 | return 0; | 114 | return 0; |
| @@ -119,7 +118,7 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par) | |||
| 119 | { | 118 | { |
| 120 | switch (mode) { | 119 | switch (mode) { |
| 121 | case SECMARK_MODE_SEL: | 120 | case SECMARK_MODE_SEL: |
| 122 | selinux_secmark_refcount_dec(); | 121 | security_secmark_refcount_dec(); |
| 123 | } | 122 | } |
| 124 | } | 123 | } |
| 125 | 124 | ||
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index b2a3ae6cad78..15003021f4f0 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
| @@ -225,12 +225,13 @@ static void pipe_grant_credits(struct sock *sk) | |||
| 225 | static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) | 225 | static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) |
| 226 | { | 226 | { |
| 227 | struct pep_sock *pn = pep_sk(sk); | 227 | struct pep_sock *pn = pep_sk(sk); |
| 228 | struct pnpipehdr *hdr = pnp_hdr(skb); | 228 | struct pnpipehdr *hdr; |
| 229 | int wake = 0; | 229 | int wake = 0; |
| 230 | 230 | ||
| 231 | if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) | 231 | if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) |
| 232 | return -EINVAL; | 232 | return -EINVAL; |
| 233 | 233 | ||
| 234 | hdr = pnp_hdr(skb); | ||
| 234 | if (hdr->data[0] != PN_PEP_TYPE_COMMON) { | 235 | if (hdr->data[0] != PN_PEP_TYPE_COMMON) { |
| 235 | LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", | 236 | LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", |
| 236 | (unsigned)hdr->data[0]); | 237 | (unsigned)hdr->data[0]); |
diff --git a/net/rds/page.c b/net/rds/page.c index 595a952d4b17..1dfbfea12e9b 100644 --- a/net/rds/page.c +++ b/net/rds/page.c | |||
| @@ -57,30 +57,17 @@ int rds_page_copy_user(struct page *page, unsigned long offset, | |||
| 57 | unsigned long ret; | 57 | unsigned long ret; |
| 58 | void *addr; | 58 | void *addr; |
| 59 | 59 | ||
| 60 | if (to_user) | 60 | addr = kmap(page); |
| 61 | if (to_user) { | ||
| 61 | rds_stats_add(s_copy_to_user, bytes); | 62 | rds_stats_add(s_copy_to_user, bytes); |
| 62 | else | 63 | ret = copy_to_user(ptr, addr + offset, bytes); |
| 64 | } else { | ||
| 63 | rds_stats_add(s_copy_from_user, bytes); | 65 | rds_stats_add(s_copy_from_user, bytes); |
| 64 | 66 | ret = copy_from_user(addr + offset, ptr, bytes); | |
| 65 | addr = kmap_atomic(page, KM_USER0); | ||
| 66 | if (to_user) | ||
| 67 | ret = __copy_to_user_inatomic(ptr, addr + offset, bytes); | ||
| 68 | else | ||
| 69 | ret = __copy_from_user_inatomic(addr + offset, ptr, bytes); | ||
| 70 | kunmap_atomic(addr, KM_USER0); | ||
| 71 | |||
| 72 | if (ret) { | ||
| 73 | addr = kmap(page); | ||
| 74 | if (to_user) | ||
| 75 | ret = copy_to_user(ptr, addr + offset, bytes); | ||
| 76 | else | ||
| 77 | ret = copy_from_user(addr + offset, ptr, bytes); | ||
| 78 | kunmap(page); | ||
| 79 | if (ret) | ||
| 80 | return -EFAULT; | ||
| 81 | } | 67 | } |
| 68 | kunmap(page); | ||
| 82 | 69 | ||
| 83 | return 0; | 70 | return ret ? -EFAULT : 0; |
| 84 | } | 71 | } |
| 85 | EXPORT_SYMBOL_GPL(rds_page_copy_user); | 72 | EXPORT_SYMBOL_GPL(rds_page_copy_user); |
| 86 | 73 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 7416a5c73b2a..b0c2a82178af 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -137,7 +137,7 @@ next_knode: | |||
| 137 | int toff = off + key->off + (off2 & key->offmask); | 137 | int toff = off + key->off + (off2 & key->offmask); |
| 138 | __be32 *data, _data; | 138 | __be32 *data, _data; |
| 139 | 139 | ||
| 140 | if (skb_headroom(skb) + toff < 0) | 140 | if (skb_headroom(skb) + toff > INT_MAX) |
| 141 | goto out; | 141 | goto out; |
| 142 | 142 | ||
| 143 | data = skb_header_pointer(skb, toff, 4, &_data); | 143 | data = skb_header_pointer(skb, toff, 4, &_data); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 86366390038a..ddbbf7c81fa1 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
| @@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) | |||
| 543 | id = ntohs(hmacs->hmac_ids[i]); | 543 | id = ntohs(hmacs->hmac_ids[i]); |
| 544 | 544 | ||
| 545 | /* Check the id is in the supported range */ | 545 | /* Check the id is in the supported range */ |
| 546 | if (id > SCTP_AUTH_HMAC_ID_MAX) | 546 | if (id > SCTP_AUTH_HMAC_ID_MAX) { |
| 547 | id = 0; | ||
| 547 | continue; | 548 | continue; |
| 549 | } | ||
| 548 | 550 | ||
| 549 | /* See is we support the id. Supported IDs have name and | 551 | /* See is we support the id. Supported IDs have name and |
| 550 | * length fields set, so that we can allocated and use | 552 | * length fields set, so that we can allocated and use |
| 551 | * them. We can safely just check for name, for without the | 553 | * them. We can safely just check for name, for without the |
| 552 | * name, we can't allocate the TFM. | 554 | * name, we can't allocate the TFM. |
| 553 | */ | 555 | */ |
| 554 | if (!sctp_hmac_list[id].hmac_name) | 556 | if (!sctp_hmac_list[id].hmac_name) { |
| 557 | id = 0; | ||
| 555 | continue; | 558 | continue; |
| 559 | } | ||
| 556 | 560 | ||
| 557 | break; | 561 | break; |
| 558 | } | 562 | } |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ca44917872d2..fbb70770ad05 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -916,6 +916,11 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, | |||
| 916 | /* Walk through the addrs buffer and count the number of addresses. */ | 916 | /* Walk through the addrs buffer and count the number of addresses. */ |
| 917 | addr_buf = kaddrs; | 917 | addr_buf = kaddrs; |
| 918 | while (walk_size < addrs_size) { | 918 | while (walk_size < addrs_size) { |
| 919 | if (walk_size + sizeof(sa_family_t) > addrs_size) { | ||
| 920 | kfree(kaddrs); | ||
| 921 | return -EINVAL; | ||
| 922 | } | ||
| 923 | |||
| 919 | sa_addr = (struct sockaddr *)addr_buf; | 924 | sa_addr = (struct sockaddr *)addr_buf; |
| 920 | af = sctp_get_af_specific(sa_addr->sa_family); | 925 | af = sctp_get_af_specific(sa_addr->sa_family); |
| 921 | 926 | ||
| @@ -1002,9 +1007,13 @@ static int __sctp_connect(struct sock* sk, | |||
| 1002 | /* Walk through the addrs buffer and count the number of addresses. */ | 1007 | /* Walk through the addrs buffer and count the number of addresses. */ |
| 1003 | addr_buf = kaddrs; | 1008 | addr_buf = kaddrs; |
| 1004 | while (walk_size < addrs_size) { | 1009 | while (walk_size < addrs_size) { |
| 1010 | if (walk_size + sizeof(sa_family_t) > addrs_size) { | ||
| 1011 | err = -EINVAL; | ||
| 1012 | goto out_free; | ||
| 1013 | } | ||
| 1014 | |||
| 1005 | sa_addr = (union sctp_addr *)addr_buf; | 1015 | sa_addr = (union sctp_addr *)addr_buf; |
| 1006 | af = sctp_get_af_specific(sa_addr->sa.sa_family); | 1016 | af = sctp_get_af_specific(sa_addr->sa.sa_family); |
| 1007 | port = ntohs(sa_addr->v4.sin_port); | ||
| 1008 | 1017 | ||
| 1009 | /* If the address family is not supported or if this address | 1018 | /* If the address family is not supported or if this address |
| 1010 | * causes the address buffer to overflow return EINVAL. | 1019 | * causes the address buffer to overflow return EINVAL. |
| @@ -1014,6 +1023,8 @@ static int __sctp_connect(struct sock* sk, | |||
| 1014 | goto out_free; | 1023 | goto out_free; |
| 1015 | } | 1024 | } |
| 1016 | 1025 | ||
| 1026 | port = ntohs(sa_addr->v4.sin_port); | ||
| 1027 | |||
| 1017 | /* Save current address so we can work with it */ | 1028 | /* Save current address so we can work with it */ |
| 1018 | memcpy(&to, sa_addr, af->sockaddr_len); | 1029 | memcpy(&to, sa_addr, af->sockaddr_len); |
| 1019 | 1030 | ||
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c index ee03a4f0b64f..06473791c08a 100644 --- a/samples/kfifo/dma-example.c +++ b/samples/kfifo/dma-example.c | |||
| @@ -24,6 +24,7 @@ static int __init example_init(void) | |||
| 24 | { | 24 | { |
| 25 | int i; | 25 | int i; |
| 26 | unsigned int ret; | 26 | unsigned int ret; |
| 27 | unsigned int nents; | ||
| 27 | struct scatterlist sg[10]; | 28 | struct scatterlist sg[10]; |
| 28 | 29 | ||
| 29 | printk(KERN_INFO "DMA fifo test start\n"); | 30 | printk(KERN_INFO "DMA fifo test start\n"); |
| @@ -61,9 +62,9 @@ static int __init example_init(void) | |||
| 61 | * byte at the beginning, after the kfifo_skip(). | 62 | * byte at the beginning, after the kfifo_skip(). |
| 62 | */ | 63 | */ |
| 63 | sg_init_table(sg, ARRAY_SIZE(sg)); | 64 | sg_init_table(sg, ARRAY_SIZE(sg)); |
| 64 | ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); | 65 | nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); |
| 65 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); | 66 | printk(KERN_INFO "DMA sgl entries: %d\n", nents); |
| 66 | if (!ret) { | 67 | if (!nents) { |
| 67 | /* fifo is full and no sgl was created */ | 68 | /* fifo is full and no sgl was created */ |
| 68 | printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); | 69 | printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); |
| 69 | return -EIO; | 70 | return -EIO; |
| @@ -71,7 +72,7 @@ static int __init example_init(void) | |||
| 71 | 72 | ||
| 72 | /* receive data */ | 73 | /* receive data */ |
| 73 | printk(KERN_INFO "scatterlist for receive:\n"); | 74 | printk(KERN_INFO "scatterlist for receive:\n"); |
| 74 | for (i = 0; i < ARRAY_SIZE(sg); i++) { | 75 | for (i = 0; i < nents; i++) { |
| 75 | printk(KERN_INFO | 76 | printk(KERN_INFO |
| 76 | "sg[%d] -> " | 77 | "sg[%d] -> " |
| 77 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", | 78 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", |
| @@ -91,16 +92,16 @@ static int __init example_init(void) | |||
| 91 | kfifo_dma_in_finish(&fifo, ret); | 92 | kfifo_dma_in_finish(&fifo, ret); |
| 92 | 93 | ||
| 93 | /* Prepare to transmit data, example: 8 bytes */ | 94 | /* Prepare to transmit data, example: 8 bytes */ |
| 94 | ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); | 95 | nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); |
| 95 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); | 96 | printk(KERN_INFO "DMA sgl entries: %d\n", nents); |
| 96 | if (!ret) { | 97 | if (!nents) { |
| 97 | /* no data was available and no sgl was created */ | 98 | /* no data was available and no sgl was created */ |
| 98 | printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); | 99 | printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); |
| 99 | return -EIO; | 100 | return -EIO; |
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | printk(KERN_INFO "scatterlist for transmit:\n"); | 103 | printk(KERN_INFO "scatterlist for transmit:\n"); |
| 103 | for (i = 0; i < ARRAY_SIZE(sg); i++) { | 104 | for (i = 0; i < nents; i++) { |
| 104 | printk(KERN_INFO | 105 | printk(KERN_INFO |
| 105 | "sg[%d] -> " | 106 | "sg[%d] -> " |
| 106 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", | 107 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", |
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 5b7c86ea43a1..7ef429cd5cb3 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c | |||
| @@ -427,7 +427,7 @@ static void check_conf(struct menu *menu) | |||
| 427 | if (sym->name && !sym_is_choice_value(sym)) { | 427 | if (sym->name && !sym_is_choice_value(sym)) { |
| 428 | printf("CONFIG_%s\n", sym->name); | 428 | printf("CONFIG_%s\n", sym->name); |
| 429 | } | 429 | } |
| 430 | } else { | 430 | } else if (input_mode != oldnoconfig) { |
| 431 | if (!conf_cnt++) | 431 | if (!conf_cnt++) |
| 432 | printf(_("*\n* Restart config...\n*\n")); | 432 | printf(_("*\n* Restart config...\n*\n")); |
| 433 | rootEntry = menu_get_parent_menu(menu); | 433 | rootEntry = menu_get_parent_menu(menu); |
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index 6ee2e4fb1481..170459c224a1 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h | |||
| @@ -165,7 +165,6 @@ struct menu { | |||
| 165 | struct symbol *sym; | 165 | struct symbol *sym; |
| 166 | struct property *prompt; | 166 | struct property *prompt; |
| 167 | struct expr *dep; | 167 | struct expr *dep; |
| 168 | struct expr *dir_dep; | ||
| 169 | unsigned int flags; | 168 | unsigned int flags; |
| 170 | char *help; | 169 | char *help; |
| 171 | struct file *file; | 170 | struct file *file; |
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index 4fb590247f33..edda8b49619d 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c | |||
| @@ -107,7 +107,6 @@ static struct expr *menu_check_dep(struct expr *e) | |||
| 107 | void menu_add_dep(struct expr *dep) | 107 | void menu_add_dep(struct expr *dep) |
| 108 | { | 108 | { |
| 109 | current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep)); | 109 | current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep)); |
| 110 | current_entry->dir_dep = current_entry->dep; | ||
| 111 | } | 110 | } |
| 112 | 111 | ||
| 113 | void menu_set_type(int type) | 112 | void menu_set_type(int type) |
| @@ -291,10 +290,6 @@ void menu_finalize(struct menu *parent) | |||
| 291 | for (menu = parent->list; menu; menu = menu->next) | 290 | for (menu = parent->list; menu; menu = menu->next) |
| 292 | menu_finalize(menu); | 291 | menu_finalize(menu); |
| 293 | } else if (sym) { | 292 | } else if (sym) { |
| 294 | /* ignore inherited dependencies for dir_dep */ | ||
| 295 | sym->dir_dep.expr = expr_transform(expr_copy(parent->dir_dep)); | ||
| 296 | sym->dir_dep.expr = expr_eliminate_dups(sym->dir_dep.expr); | ||
| 297 | |||
| 298 | basedep = parent->prompt ? parent->prompt->visible.expr : NULL; | 293 | basedep = parent->prompt ? parent->prompt->visible.expr : NULL; |
| 299 | basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); | 294 | basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); |
| 300 | basedep = expr_eliminate_dups(expr_transform(basedep)); | 295 | basedep = expr_eliminate_dups(expr_transform(basedep)); |
| @@ -325,6 +320,8 @@ void menu_finalize(struct menu *parent) | |||
| 325 | parent->next = last_menu->next; | 320 | parent->next = last_menu->next; |
| 326 | last_menu->next = NULL; | 321 | last_menu->next = NULL; |
| 327 | } | 322 | } |
| 323 | |||
| 324 | sym->dir_dep.expr = parent->dep; | ||
| 328 | } | 325 | } |
| 329 | for (menu = parent->list; menu; menu = menu->next) { | 326 | for (menu = parent->list; menu; menu = menu->next) { |
| 330 | if (sym && sym_is_choice(sym) && | 327 | if (sym && sym_is_choice(sym) && |
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index 943712ca6c0a..1f8b305449db 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c | |||
| @@ -350,6 +350,7 @@ void sym_calc_value(struct symbol *sym) | |||
| 350 | } | 350 | } |
| 351 | } | 351 | } |
| 352 | calc_newval: | 352 | calc_newval: |
| 353 | #if 0 | ||
| 353 | if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { | 354 | if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { |
| 354 | fprintf(stderr, "warning: ("); | 355 | fprintf(stderr, "warning: ("); |
| 355 | expr_fprint(sym->rev_dep.expr, stderr); | 356 | expr_fprint(sym->rev_dep.expr, stderr); |
| @@ -358,6 +359,7 @@ void sym_calc_value(struct symbol *sym) | |||
| 358 | expr_fprint(sym->dir_dep.expr, stderr); | 359 | expr_fprint(sym->dir_dep.expr, stderr); |
| 359 | fprintf(stderr, ")\n"); | 360 | fprintf(stderr, ")\n"); |
| 360 | } | 361 | } |
| 362 | #endif | ||
| 361 | newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); | 363 | newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); |
| 362 | } | 364 | } |
| 363 | if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN) | 365 | if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN) |
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore index 0a0a99f3b083..4d995aeaebc0 100644 --- a/security/apparmor/.gitignore +++ b/security/apparmor/.gitignore | |||
| @@ -3,3 +3,4 @@ | |||
| 3 | # | 3 | # |
| 4 | af_names.h | 4 | af_names.h |
| 5 | capability_names.h | 5 | capability_names.h |
| 6 | rlim_names.h | ||
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 7320331b44ab..544ff5837cb6 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | * aa_simple_write_to_buffer - common routine for getting policy from user | 29 | * aa_simple_write_to_buffer - common routine for getting policy from user |
| 30 | * @op: operation doing the user buffer copy | 30 | * @op: operation doing the user buffer copy |
| 31 | * @userbuf: user buffer to copy data from (NOT NULL) | 31 | * @userbuf: user buffer to copy data from (NOT NULL) |
| 32 | * @alloc_size: size of user buffer | 32 | * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size) |
| 33 | * @copy_size: size of data to copy from user buffer | 33 | * @copy_size: size of data to copy from user buffer |
| 34 | * @pos: position write is at in the file (NOT NULL) | 34 | * @pos: position write is at in the file (NOT NULL) |
| 35 | * | 35 | * |
| @@ -42,6 +42,8 @@ static char *aa_simple_write_to_buffer(int op, const char __user *userbuf, | |||
| 42 | { | 42 | { |
| 43 | char *data; | 43 | char *data; |
| 44 | 44 | ||
| 45 | BUG_ON(copy_size > alloc_size); | ||
| 46 | |||
| 45 | if (*pos != 0) | 47 | if (*pos != 0) |
| 46 | /* only writes from pos 0, that is complete writes */ | 48 | /* only writes from pos 0, that is complete writes */ |
| 47 | return ERR_PTR(-ESPIPE); | 49 | return ERR_PTR(-ESPIPE); |
diff --git a/security/capability.c b/security/capability.c index 95a6599a37bb..30ae00fbecd5 100644 --- a/security/capability.c +++ b/security/capability.c | |||
| @@ -677,7 +677,18 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb) | |||
| 677 | { | 677 | { |
| 678 | } | 678 | } |
| 679 | 679 | ||
| 680 | static int cap_secmark_relabel_packet(u32 secid) | ||
| 681 | { | ||
| 682 | return 0; | ||
| 683 | } | ||
| 680 | 684 | ||
| 685 | static void cap_secmark_refcount_inc(void) | ||
| 686 | { | ||
| 687 | } | ||
| 688 | |||
| 689 | static void cap_secmark_refcount_dec(void) | ||
| 690 | { | ||
| 691 | } | ||
| 681 | 692 | ||
| 682 | static void cap_req_classify_flow(const struct request_sock *req, | 693 | static void cap_req_classify_flow(const struct request_sock *req, |
| 683 | struct flowi *fl) | 694 | struct flowi *fl) |
| @@ -777,7 +788,8 @@ static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) | |||
| 777 | 788 | ||
| 778 | static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) | 789 | static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) |
| 779 | { | 790 | { |
| 780 | return -EOPNOTSUPP; | 791 | *secid = 0; |
| 792 | return 0; | ||
| 781 | } | 793 | } |
| 782 | 794 | ||
| 783 | static void cap_release_secctx(char *secdata, u32 seclen) | 795 | static void cap_release_secctx(char *secdata, u32 seclen) |
| @@ -1018,6 +1030,9 @@ void __init security_fixup_ops(struct security_operations *ops) | |||
| 1018 | set_to_cap_if_null(ops, inet_conn_request); | 1030 | set_to_cap_if_null(ops, inet_conn_request); |
| 1019 | set_to_cap_if_null(ops, inet_csk_clone); | 1031 | set_to_cap_if_null(ops, inet_csk_clone); |
| 1020 | set_to_cap_if_null(ops, inet_conn_established); | 1032 | set_to_cap_if_null(ops, inet_conn_established); |
| 1033 | set_to_cap_if_null(ops, secmark_relabel_packet); | ||
| 1034 | set_to_cap_if_null(ops, secmark_refcount_inc); | ||
| 1035 | set_to_cap_if_null(ops, secmark_refcount_dec); | ||
| 1021 | set_to_cap_if_null(ops, req_classify_flow); | 1036 | set_to_cap_if_null(ops, req_classify_flow); |
| 1022 | set_to_cap_if_null(ops, tun_dev_create); | 1037 | set_to_cap_if_null(ops, tun_dev_create); |
| 1023 | set_to_cap_if_null(ops, tun_dev_post_create); | 1038 | set_to_cap_if_null(ops, tun_dev_post_create); |
diff --git a/security/commoncap.c b/security/commoncap.c index 9d172e6e330c..5e632b4857e4 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
| @@ -719,14 +719,11 @@ static int cap_safe_nice(struct task_struct *p) | |||
| 719 | /** | 719 | /** |
| 720 | * cap_task_setscheduler - Detemine if scheduler policy change is permitted | 720 | * cap_task_setscheduler - Detemine if scheduler policy change is permitted |
| 721 | * @p: The task to affect | 721 | * @p: The task to affect |
| 722 | * @policy: The policy to effect | ||
| 723 | * @lp: The parameters to the scheduling policy | ||
| 724 | * | 722 | * |
| 725 | * Detemine if the requested scheduler policy change is permitted for the | 723 | * Detemine if the requested scheduler policy change is permitted for the |
| 726 | * specified task, returning 0 if permission is granted, -ve if denied. | 724 | * specified task, returning 0 if permission is granted, -ve if denied. |
| 727 | */ | 725 | */ |
| 728 | int cap_task_setscheduler(struct task_struct *p, int policy, | 726 | int cap_task_setscheduler(struct task_struct *p) |
| 729 | struct sched_param *lp) | ||
| 730 | { | 727 | { |
| 731 | return cap_safe_nice(p); | 728 | return cap_safe_nice(p); |
| 732 | } | 729 | } |
diff --git a/security/security.c b/security/security.c index c53949f17d9e..b50f472061a4 100644 --- a/security/security.c +++ b/security/security.c | |||
| @@ -89,20 +89,12 @@ __setup("security=", choose_lsm); | |||
| 89 | * Return true if: | 89 | * Return true if: |
| 90 | * -The passed LSM is the one chosen by user at boot time, | 90 | * -The passed LSM is the one chosen by user at boot time, |
| 91 | * -or the passed LSM is configured as the default and the user did not | 91 | * -or the passed LSM is configured as the default and the user did not |
| 92 | * choose an alternate LSM at boot time, | 92 | * choose an alternate LSM at boot time. |
| 93 | * -or there is no default LSM set and the user didn't specify a | ||
| 94 | * specific LSM and we're the first to ask for registration permission, | ||
| 95 | * -or the passed LSM is currently loaded. | ||
| 96 | * Otherwise, return false. | 93 | * Otherwise, return false. |
| 97 | */ | 94 | */ |
| 98 | int __init security_module_enable(struct security_operations *ops) | 95 | int __init security_module_enable(struct security_operations *ops) |
| 99 | { | 96 | { |
| 100 | if (!*chosen_lsm) | 97 | return !strcmp(ops->name, chosen_lsm); |
| 101 | strncpy(chosen_lsm, ops->name, SECURITY_NAME_MAX); | ||
| 102 | else if (strncmp(ops->name, chosen_lsm, SECURITY_NAME_MAX)) | ||
| 103 | return 0; | ||
| 104 | |||
| 105 | return 1; | ||
| 106 | } | 98 | } |
| 107 | 99 | ||
| 108 | /** | 100 | /** |
| @@ -786,10 +778,9 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource, | |||
| 786 | return security_ops->task_setrlimit(p, resource, new_rlim); | 778 | return security_ops->task_setrlimit(p, resource, new_rlim); |
| 787 | } | 779 | } |
| 788 | 780 | ||
| 789 | int security_task_setscheduler(struct task_struct *p, | 781 | int security_task_setscheduler(struct task_struct *p) |
| 790 | int policy, struct sched_param *lp) | ||
| 791 | { | 782 | { |
| 792 | return security_ops->task_setscheduler(p, policy, lp); | 783 | return security_ops->task_setscheduler(p); |
| 793 | } | 784 | } |
| 794 | 785 | ||
| 795 | int security_task_getscheduler(struct task_struct *p) | 786 | int security_task_getscheduler(struct task_struct *p) |
| @@ -1145,6 +1136,24 @@ void security_inet_conn_established(struct sock *sk, | |||
| 1145 | security_ops->inet_conn_established(sk, skb); | 1136 | security_ops->inet_conn_established(sk, skb); |
| 1146 | } | 1137 | } |
| 1147 | 1138 | ||
| 1139 | int security_secmark_relabel_packet(u32 secid) | ||
| 1140 | { | ||
| 1141 | return security_ops->secmark_relabel_packet(secid); | ||
| 1142 | } | ||
| 1143 | EXPORT_SYMBOL(security_secmark_relabel_packet); | ||
| 1144 | |||
| 1145 | void security_secmark_refcount_inc(void) | ||
| 1146 | { | ||
| 1147 | security_ops->secmark_refcount_inc(); | ||
| 1148 | } | ||
| 1149 | EXPORT_SYMBOL(security_secmark_refcount_inc); | ||
| 1150 | |||
| 1151 | void security_secmark_refcount_dec(void) | ||
| 1152 | { | ||
| 1153 | security_ops->secmark_refcount_dec(); | ||
| 1154 | } | ||
| 1155 | EXPORT_SYMBOL(security_secmark_refcount_dec); | ||
| 1156 | |||
| 1148 | int security_tun_dev_create(void) | 1157 | int security_tun_dev_create(void) |
| 1149 | { | 1158 | { |
| 1150 | return security_ops->tun_dev_create(); | 1159 | return security_ops->tun_dev_create(); |
diff --git a/security/selinux/Makefile b/security/selinux/Makefile index 58d80f3bd6f6..ad5cd76ec231 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile | |||
| @@ -2,25 +2,20 @@ | |||
| 2 | # Makefile for building the SELinux module as part of the kernel tree. | 2 | # Makefile for building the SELinux module as part of the kernel tree. |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/ | 5 | obj-$(CONFIG_SECURITY_SELINUX) := selinux.o |
| 6 | 6 | ||
| 7 | selinux-y := avc.o \ | 7 | selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \ |
| 8 | hooks.o \ | 8 | netnode.o netport.o exports.o \ |
| 9 | selinuxfs.o \ | 9 | ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \ |
| 10 | netlink.o \ | 10 | ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o |
| 11 | nlmsgtab.o \ | ||
| 12 | netif.o \ | ||
| 13 | netnode.o \ | ||
| 14 | netport.o \ | ||
| 15 | exports.o | ||
| 16 | 11 | ||
| 17 | selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o | 12 | selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o |
| 18 | 13 | ||
| 19 | selinux-$(CONFIG_NETLABEL) += netlabel.o | 14 | selinux-$(CONFIG_NETLABEL) += netlabel.o |
| 20 | 15 | ||
| 21 | EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include | 16 | ccflags-y := -Isecurity/selinux -Isecurity/selinux/include |
| 22 | 17 | ||
| 23 | $(obj)/avc.o: $(obj)/flask.h | 18 | $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h |
| 24 | 19 | ||
| 25 | quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h | 20 | quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h |
| 26 | cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h | 21 | cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h |
diff --git a/security/selinux/exports.c b/security/selinux/exports.c index c0a454aee1e0..90664385dead 100644 --- a/security/selinux/exports.c +++ b/security/selinux/exports.c | |||
| @@ -11,58 +11,9 @@ | |||
| 11 | * it under the terms of the GNU General Public License version 2, | 11 | * it under the terms of the GNU General Public License version 2, |
| 12 | * as published by the Free Software Foundation. | 12 | * as published by the Free Software Foundation. |
| 13 | */ | 13 | */ |
| 14 | #include <linux/types.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 17 | #include <linux/selinux.h> | ||
| 18 | #include <linux/fs.h> | ||
| 19 | #include <linux/ipc.h> | ||
| 20 | #include <asm/atomic.h> | ||
| 21 | 15 | ||
| 22 | #include "security.h" | 16 | #include "security.h" |
| 23 | #include "objsec.h" | ||
| 24 | |||
| 25 | /* SECMARK reference count */ | ||
| 26 | extern atomic_t selinux_secmark_refcount; | ||
| 27 | |||
| 28 | int selinux_string_to_sid(char *str, u32 *sid) | ||
| 29 | { | ||
| 30 | if (selinux_enabled) | ||
| 31 | return security_context_to_sid(str, strlen(str), sid); | ||
| 32 | else { | ||
| 33 | *sid = 0; | ||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | EXPORT_SYMBOL_GPL(selinux_string_to_sid); | ||
| 38 | |||
| 39 | int selinux_secmark_relabel_packet_permission(u32 sid) | ||
| 40 | { | ||
| 41 | if (selinux_enabled) { | ||
| 42 | const struct task_security_struct *__tsec; | ||
| 43 | u32 tsid; | ||
| 44 | |||
| 45 | __tsec = current_security(); | ||
| 46 | tsid = __tsec->sid; | ||
| 47 | |||
| 48 | return avc_has_perm(tsid, sid, SECCLASS_PACKET, | ||
| 49 | PACKET__RELABELTO, NULL); | ||
| 50 | } | ||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission); | ||
| 54 | |||
| 55 | void selinux_secmark_refcount_inc(void) | ||
| 56 | { | ||
| 57 | atomic_inc(&selinux_secmark_refcount); | ||
| 58 | } | ||
| 59 | EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc); | ||
| 60 | |||
| 61 | void selinux_secmark_refcount_dec(void) | ||
| 62 | { | ||
| 63 | atomic_dec(&selinux_secmark_refcount); | ||
| 64 | } | ||
| 65 | EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec); | ||
| 66 | 17 | ||
| 67 | bool selinux_is_enabled(void) | 18 | bool selinux_is_enabled(void) |
| 68 | { | 19 | { |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4796ddd4e721..d9154cf90ae1 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
| @@ -3354,11 +3354,11 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource, | |||
| 3354 | return 0; | 3354 | return 0; |
| 3355 | } | 3355 | } |
| 3356 | 3356 | ||
| 3357 | static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp) | 3357 | static int selinux_task_setscheduler(struct task_struct *p) |
| 3358 | { | 3358 | { |
| 3359 | int rc; | 3359 | int rc; |
| 3360 | 3360 | ||
| 3361 | rc = cap_task_setscheduler(p, policy, lp); | 3361 | rc = cap_task_setscheduler(p); |
| 3362 | if (rc) | 3362 | if (rc) |
| 3363 | return rc; | 3363 | return rc; |
| 3364 | 3364 | ||
| @@ -4279,6 +4279,27 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) | |||
| 4279 | selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); | 4279 | selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); |
| 4280 | } | 4280 | } |
| 4281 | 4281 | ||
| 4282 | static int selinux_secmark_relabel_packet(u32 sid) | ||
| 4283 | { | ||
| 4284 | const struct task_security_struct *__tsec; | ||
| 4285 | u32 tsid; | ||
| 4286 | |||
| 4287 | __tsec = current_security(); | ||
| 4288 | tsid = __tsec->sid; | ||
| 4289 | |||
| 4290 | return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL); | ||
| 4291 | } | ||
| 4292 | |||
| 4293 | static void selinux_secmark_refcount_inc(void) | ||
| 4294 | { | ||
| 4295 | atomic_inc(&selinux_secmark_refcount); | ||
| 4296 | } | ||
| 4297 | |||
| 4298 | static void selinux_secmark_refcount_dec(void) | ||
| 4299 | { | ||
| 4300 | atomic_dec(&selinux_secmark_refcount); | ||
| 4301 | } | ||
| 4302 | |||
| 4282 | static void selinux_req_classify_flow(const struct request_sock *req, | 4303 | static void selinux_req_classify_flow(const struct request_sock *req, |
| 4283 | struct flowi *fl) | 4304 | struct flowi *fl) |
| 4284 | { | 4305 | { |
| @@ -5533,6 +5554,9 @@ static struct security_operations selinux_ops = { | |||
| 5533 | .inet_conn_request = selinux_inet_conn_request, | 5554 | .inet_conn_request = selinux_inet_conn_request, |
| 5534 | .inet_csk_clone = selinux_inet_csk_clone, | 5555 | .inet_csk_clone = selinux_inet_csk_clone, |
| 5535 | .inet_conn_established = selinux_inet_conn_established, | 5556 | .inet_conn_established = selinux_inet_conn_established, |
| 5557 | .secmark_relabel_packet = selinux_secmark_relabel_packet, | ||
| 5558 | .secmark_refcount_inc = selinux_secmark_refcount_inc, | ||
| 5559 | .secmark_refcount_dec = selinux_secmark_refcount_dec, | ||
| 5536 | .req_classify_flow = selinux_req_classify_flow, | 5560 | .req_classify_flow = selinux_req_classify_flow, |
| 5537 | .tun_dev_create = selinux_tun_dev_create, | 5561 | .tun_dev_create = selinux_tun_dev_create, |
| 5538 | .tun_dev_post_create = selinux_tun_dev_post_create, | 5562 | .tun_dev_post_create = selinux_tun_dev_post_create, |
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index b4c9eb4bd6f9..8858d2b2d4b6 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h | |||
| @@ -17,7 +17,7 @@ struct security_class_mapping secclass_map[] = { | |||
| 17 | { "compute_av", "compute_create", "compute_member", | 17 | { "compute_av", "compute_create", "compute_member", |
| 18 | "check_context", "load_policy", "compute_relabel", | 18 | "check_context", "load_policy", "compute_relabel", |
| 19 | "compute_user", "setenforce", "setbool", "setsecparam", | 19 | "compute_user", "setenforce", "setbool", "setsecparam", |
| 20 | "setcheckreqprot", NULL } }, | 20 | "setcheckreqprot", "read_policy", NULL } }, |
| 21 | { "process", | 21 | { "process", |
| 22 | { "fork", "transition", "sigchld", "sigkill", | 22 | { "fork", "transition", "sigchld", "sigkill", |
| 23 | "sigstop", "signull", "signal", "ptrace", "getsched", "setsched", | 23 | "sigstop", "signull", "signal", "ptrace", "getsched", "setsched", |
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 1f7c2491d3dc..671273eb1115 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #define _SELINUX_SECURITY_H_ | 9 | #define _SELINUX_SECURITY_H_ |
| 10 | 10 | ||
| 11 | #include <linux/magic.h> | 11 | #include <linux/magic.h> |
| 12 | #include <linux/types.h> | ||
| 12 | #include "flask.h" | 13 | #include "flask.h" |
| 13 | 14 | ||
| 14 | #define SECSID_NULL 0x00000000 /* unspecified SID */ | 15 | #define SECSID_NULL 0x00000000 /* unspecified SID */ |
| @@ -82,6 +83,8 @@ extern int selinux_policycap_openperm; | |||
| 82 | int security_mls_enabled(void); | 83 | int security_mls_enabled(void); |
| 83 | 84 | ||
| 84 | int security_load_policy(void *data, size_t len); | 85 | int security_load_policy(void *data, size_t len); |
| 86 | int security_read_policy(void **data, ssize_t *len); | ||
| 87 | size_t security_policydb_len(void); | ||
| 85 | 88 | ||
| 86 | int security_policycap_supported(unsigned int req_cap); | 89 | int security_policycap_supported(unsigned int req_cap); |
| 87 | 90 | ||
| @@ -191,5 +194,25 @@ static inline int security_netlbl_sid_to_secattr(u32 sid, | |||
| 191 | 194 | ||
| 192 | const char *security_get_initial_sid_context(u32 sid); | 195 | const char *security_get_initial_sid_context(u32 sid); |
| 193 | 196 | ||
| 197 | /* | ||
| 198 | * status notifier using mmap interface | ||
| 199 | */ | ||
| 200 | extern struct page *selinux_kernel_status_page(void); | ||
| 201 | |||
| 202 | #define SELINUX_KERNEL_STATUS_VERSION 1 | ||
| 203 | struct selinux_kernel_status { | ||
| 204 | u32 version; /* version number of thie structure */ | ||
| 205 | u32 sequence; /* sequence number of seqlock logic */ | ||
| 206 | u32 enforcing; /* current setting of enforcing mode */ | ||
| 207 | u32 policyload; /* times of policy reloaded */ | ||
| 208 | u32 deny_unknown; /* current setting of deny_unknown */ | ||
| 209 | /* | ||
| 210 | * The version > 0 supports above members. | ||
| 211 | */ | ||
| 212 | } __attribute__((packed)); | ||
| 213 | |||
| 214 | extern void selinux_status_update_setenforce(int enforcing); | ||
| 215 | extern void selinux_status_update_policyload(int seqno); | ||
| 216 | |||
| 194 | #endif /* _SELINUX_SECURITY_H_ */ | 217 | #endif /* _SELINUX_SECURITY_H_ */ |
| 195 | 218 | ||
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 79a1bb635662..87e0556bae70 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
| @@ -68,6 +68,8 @@ static int *bool_pending_values; | |||
| 68 | static struct dentry *class_dir; | 68 | static struct dentry *class_dir; |
| 69 | static unsigned long last_class_ino; | 69 | static unsigned long last_class_ino; |
| 70 | 70 | ||
| 71 | static char policy_opened; | ||
| 72 | |||
| 71 | /* global data for policy capabilities */ | 73 | /* global data for policy capabilities */ |
| 72 | static struct dentry *policycap_dir; | 74 | static struct dentry *policycap_dir; |
| 73 | 75 | ||
| @@ -110,6 +112,8 @@ enum sel_inos { | |||
| 110 | SEL_COMPAT_NET, /* whether to use old compat network packet controls */ | 112 | SEL_COMPAT_NET, /* whether to use old compat network packet controls */ |
| 111 | SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */ | 113 | SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */ |
| 112 | SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */ | 114 | SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */ |
| 115 | SEL_STATUS, /* export current status using mmap() */ | ||
| 116 | SEL_POLICY, /* allow userspace to read the in kernel policy */ | ||
| 113 | SEL_INO_NEXT, /* The next inode number to use */ | 117 | SEL_INO_NEXT, /* The next inode number to use */ |
| 114 | }; | 118 | }; |
| 115 | 119 | ||
| @@ -171,6 +175,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, | |||
| 171 | if (selinux_enforcing) | 175 | if (selinux_enforcing) |
| 172 | avc_ss_reset(0); | 176 | avc_ss_reset(0); |
| 173 | selnl_notify_setenforce(selinux_enforcing); | 177 | selnl_notify_setenforce(selinux_enforcing); |
| 178 | selinux_status_update_setenforce(selinux_enforcing); | ||
| 174 | } | 179 | } |
| 175 | length = count; | 180 | length = count; |
| 176 | out: | 181 | out: |
| @@ -205,6 +210,59 @@ static const struct file_operations sel_handle_unknown_ops = { | |||
| 205 | .llseek = generic_file_llseek, | 210 | .llseek = generic_file_llseek, |
| 206 | }; | 211 | }; |
| 207 | 212 | ||
| 213 | static int sel_open_handle_status(struct inode *inode, struct file *filp) | ||
| 214 | { | ||
| 215 | struct page *status = selinux_kernel_status_page(); | ||
| 216 | |||
| 217 | if (!status) | ||
| 218 | return -ENOMEM; | ||
| 219 | |||
| 220 | filp->private_data = status; | ||
| 221 | |||
| 222 | return 0; | ||
| 223 | } | ||
| 224 | |||
| 225 | static ssize_t sel_read_handle_status(struct file *filp, char __user *buf, | ||
| 226 | size_t count, loff_t *ppos) | ||
| 227 | { | ||
| 228 | struct page *status = filp->private_data; | ||
| 229 | |||
| 230 | BUG_ON(!status); | ||
| 231 | |||
| 232 | return simple_read_from_buffer(buf, count, ppos, | ||
| 233 | page_address(status), | ||
| 234 | sizeof(struct selinux_kernel_status)); | ||
| 235 | } | ||
| 236 | |||
| 237 | static int sel_mmap_handle_status(struct file *filp, | ||
| 238 | struct vm_area_struct *vma) | ||
| 239 | { | ||
| 240 | struct page *status = filp->private_data; | ||
| 241 | unsigned long size = vma->vm_end - vma->vm_start; | ||
| 242 | |||
| 243 | BUG_ON(!status); | ||
| 244 | |||
| 245 | /* only allows one page from the head */ | ||
| 246 | if (vma->vm_pgoff > 0 || size != PAGE_SIZE) | ||
| 247 | return -EIO; | ||
| 248 | /* disallow writable mapping */ | ||
| 249 | if (vma->vm_flags & VM_WRITE) | ||
| 250 | return -EPERM; | ||
| 251 | /* disallow mprotect() turns it into writable */ | ||
| 252 | vma->vm_flags &= ~VM_MAYWRITE; | ||
| 253 | |||
| 254 | return remap_pfn_range(vma, vma->vm_start, | ||
| 255 | page_to_pfn(status), | ||
| 256 | size, vma->vm_page_prot); | ||
| 257 | } | ||
| 258 | |||
| 259 | static const struct file_operations sel_handle_status_ops = { | ||
| 260 | .open = sel_open_handle_status, | ||
| 261 | .read = sel_read_handle_status, | ||
| 262 | .mmap = sel_mmap_handle_status, | ||
| 263 | .llseek = generic_file_llseek, | ||
| 264 | }; | ||
| 265 | |||
| 208 | #ifdef CONFIG_SECURITY_SELINUX_DISABLE | 266 | #ifdef CONFIG_SECURITY_SELINUX_DISABLE |
| 209 | static ssize_t sel_write_disable(struct file *file, const char __user *buf, | 267 | static ssize_t sel_write_disable(struct file *file, const char __user *buf, |
| 210 | size_t count, loff_t *ppos) | 268 | size_t count, loff_t *ppos) |
| @@ -296,6 +354,141 @@ static const struct file_operations sel_mls_ops = { | |||
| 296 | .llseek = generic_file_llseek, | 354 | .llseek = generic_file_llseek, |
| 297 | }; | 355 | }; |
| 298 | 356 | ||
| 357 | struct policy_load_memory { | ||
| 358 | size_t len; | ||
| 359 | void *data; | ||
| 360 | }; | ||
| 361 | |||
| 362 | static int sel_open_policy(struct inode *inode, struct file *filp) | ||
| 363 | { | ||
| 364 | struct policy_load_memory *plm = NULL; | ||
| 365 | int rc; | ||
| 366 | |||
| 367 | BUG_ON(filp->private_data); | ||
| 368 | |||
| 369 | mutex_lock(&sel_mutex); | ||
| 370 | |||
| 371 | rc = task_has_security(current, SECURITY__READ_POLICY); | ||
| 372 | if (rc) | ||
| 373 | goto err; | ||
| 374 | |||
| 375 | rc = -EBUSY; | ||
| 376 | if (policy_opened) | ||
| 377 | goto err; | ||
| 378 | |||
| 379 | rc = -ENOMEM; | ||
| 380 | plm = kzalloc(sizeof(*plm), GFP_KERNEL); | ||
| 381 | if (!plm) | ||
| 382 | goto err; | ||
| 383 | |||
| 384 | if (i_size_read(inode) != security_policydb_len()) { | ||
| 385 | mutex_lock(&inode->i_mutex); | ||
| 386 | i_size_write(inode, security_policydb_len()); | ||
| 387 | mutex_unlock(&inode->i_mutex); | ||
| 388 | } | ||
| 389 | |||
| 390 | rc = security_read_policy(&plm->data, &plm->len); | ||
| 391 | if (rc) | ||
| 392 | goto err; | ||
| 393 | |||
| 394 | policy_opened = 1; | ||
| 395 | |||
| 396 | filp->private_data = plm; | ||
| 397 | |||
| 398 | mutex_unlock(&sel_mutex); | ||
| 399 | |||
| 400 | return 0; | ||
| 401 | err: | ||
| 402 | mutex_unlock(&sel_mutex); | ||
| 403 | |||
| 404 | if (plm) | ||
| 405 | vfree(plm->data); | ||
| 406 | kfree(plm); | ||
| 407 | return rc; | ||
| 408 | } | ||
| 409 | |||
| 410 | static int sel_release_policy(struct inode *inode, struct file *filp) | ||
| 411 | { | ||
| 412 | struct policy_load_memory *plm = filp->private_data; | ||
| 413 | |||
| 414 | BUG_ON(!plm); | ||
| 415 | |||
| 416 | policy_opened = 0; | ||
| 417 | |||
| 418 | vfree(plm->data); | ||
| 419 | kfree(plm); | ||
| 420 | |||
| 421 | return 0; | ||
| 422 | } | ||
| 423 | |||
| 424 | static ssize_t sel_read_policy(struct file *filp, char __user *buf, | ||
| 425 | size_t count, loff_t *ppos) | ||
| 426 | { | ||
| 427 | struct policy_load_memory *plm = filp->private_data; | ||
| 428 | int ret; | ||
| 429 | |||
| 430 | mutex_lock(&sel_mutex); | ||
| 431 | |||
| 432 | ret = task_has_security(current, SECURITY__READ_POLICY); | ||
| 433 | if (ret) | ||
| 434 | goto out; | ||
| 435 | |||
| 436 | ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len); | ||
| 437 | out: | ||
| 438 | mutex_unlock(&sel_mutex); | ||
| 439 | return ret; | ||
| 440 | } | ||
| 441 | |||
| 442 | static int sel_mmap_policy_fault(struct vm_area_struct *vma, | ||
| 443 | struct vm_fault *vmf) | ||
| 444 | { | ||
| 445 | struct policy_load_memory *plm = vma->vm_file->private_data; | ||
| 446 | unsigned long offset; | ||
| 447 | struct page *page; | ||
| 448 | |||
| 449 | if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE)) | ||
| 450 | return VM_FAULT_SIGBUS; | ||
| 451 | |||
| 452 | offset = vmf->pgoff << PAGE_SHIFT; | ||
| 453 | if (offset >= roundup(plm->len, PAGE_SIZE)) | ||
| 454 | return VM_FAULT_SIGBUS; | ||
| 455 | |||
| 456 | page = vmalloc_to_page(plm->data + offset); | ||
| 457 | get_page(page); | ||
| 458 | |||
| 459 | vmf->page = page; | ||
| 460 | |||
| 461 | return 0; | ||
| 462 | } | ||
| 463 | |||
| 464 | static struct vm_operations_struct sel_mmap_policy_ops = { | ||
| 465 | .fault = sel_mmap_policy_fault, | ||
| 466 | .page_mkwrite = sel_mmap_policy_fault, | ||
| 467 | }; | ||
| 468 | |||
| 469 | int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) | ||
| 470 | { | ||
| 471 | if (vma->vm_flags & VM_SHARED) { | ||
| 472 | /* do not allow mprotect to make mapping writable */ | ||
| 473 | vma->vm_flags &= ~VM_MAYWRITE; | ||
| 474 | |||
| 475 | if (vma->vm_flags & VM_WRITE) | ||
| 476 | return -EACCES; | ||
| 477 | } | ||
| 478 | |||
| 479 | vma->vm_flags |= VM_RESERVED; | ||
| 480 | vma->vm_ops = &sel_mmap_policy_ops; | ||
| 481 | |||
| 482 | return 0; | ||
| 483 | } | ||
| 484 | |||
| 485 | static const struct file_operations sel_policy_ops = { | ||
| 486 | .open = sel_open_policy, | ||
| 487 | .read = sel_read_policy, | ||
| 488 | .mmap = sel_mmap_policy, | ||
| 489 | .release = sel_release_policy, | ||
| 490 | }; | ||
| 491 | |||
| 299 | static ssize_t sel_write_load(struct file *file, const char __user *buf, | 492 | static ssize_t sel_write_load(struct file *file, const char __user *buf, |
| 300 | size_t count, loff_t *ppos) | 493 | size_t count, loff_t *ppos) |
| 301 | 494 | ||
| @@ -1612,6 +1805,8 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent) | |||
| 1612 | [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR}, | 1805 | [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR}, |
| 1613 | [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO}, | 1806 | [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO}, |
| 1614 | [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO}, | 1807 | [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO}, |
| 1808 | [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO}, | ||
| 1809 | [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR}, | ||
| 1615 | /* last one */ {""} | 1810 | /* last one */ {""} |
| 1616 | }; | 1811 | }; |
| 1617 | ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files); | 1812 | ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files); |
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile deleted file mode 100644 index 15d4e62917de..000000000000 --- a/security/selinux/ss/Makefile +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Makefile for building the SELinux security server as part of the kernel tree. | ||
| 3 | # | ||
| 4 | |||
| 5 | EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include | ||
| 6 | obj-y := ss.o | ||
| 7 | |||
| 8 | ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o | ||
| 9 | |||
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c index 929480c6c430..a3dd9faa19c0 100644 --- a/security/selinux/ss/avtab.c +++ b/security/selinux/ss/avtab.c | |||
| @@ -266,8 +266,8 @@ int avtab_alloc(struct avtab *h, u32 nrules) | |||
| 266 | if (shift > 2) | 266 | if (shift > 2) |
| 267 | shift = shift - 2; | 267 | shift = shift - 2; |
| 268 | nslot = 1 << shift; | 268 | nslot = 1 << shift; |
| 269 | if (nslot > MAX_AVTAB_SIZE) | 269 | if (nslot > MAX_AVTAB_HASH_BUCKETS) |
| 270 | nslot = MAX_AVTAB_SIZE; | 270 | nslot = MAX_AVTAB_HASH_BUCKETS; |
| 271 | mask = nslot - 1; | 271 | mask = nslot - 1; |
| 272 | 272 | ||
| 273 | h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL); | 273 | h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL); |
| @@ -501,6 +501,48 @@ bad: | |||
| 501 | goto out; | 501 | goto out; |
| 502 | } | 502 | } |
| 503 | 503 | ||
| 504 | int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp) | ||
| 505 | { | ||
| 506 | __le16 buf16[4]; | ||
| 507 | __le32 buf32[1]; | ||
| 508 | int rc; | ||
| 509 | |||
| 510 | buf16[0] = cpu_to_le16(cur->key.source_type); | ||
| 511 | buf16[1] = cpu_to_le16(cur->key.target_type); | ||
| 512 | buf16[2] = cpu_to_le16(cur->key.target_class); | ||
| 513 | buf16[3] = cpu_to_le16(cur->key.specified); | ||
| 514 | rc = put_entry(buf16, sizeof(u16), 4, fp); | ||
| 515 | if (rc) | ||
| 516 | return rc; | ||
| 517 | buf32[0] = cpu_to_le32(cur->datum.data); | ||
| 518 | rc = put_entry(buf32, sizeof(u32), 1, fp); | ||
| 519 | if (rc) | ||
| 520 | return rc; | ||
| 521 | return 0; | ||
| 522 | } | ||
| 523 | |||
| 524 | int avtab_write(struct policydb *p, struct avtab *a, void *fp) | ||
| 525 | { | ||
| 526 | unsigned int i; | ||
| 527 | int rc = 0; | ||
| 528 | struct avtab_node *cur; | ||
| 529 | __le32 buf[1]; | ||
| 530 | |||
| 531 | buf[0] = cpu_to_le32(a->nel); | ||
| 532 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 533 | if (rc) | ||
| 534 | return rc; | ||
| 535 | |||
| 536 | for (i = 0; i < a->nslot; i++) { | ||
| 537 | for (cur = a->htable[i]; cur; cur = cur->next) { | ||
| 538 | rc = avtab_write_item(p, cur, fp); | ||
| 539 | if (rc) | ||
| 540 | return rc; | ||
| 541 | } | ||
| 542 | } | ||
| 543 | |||
| 544 | return rc; | ||
| 545 | } | ||
| 504 | void avtab_cache_init(void) | 546 | void avtab_cache_init(void) |
| 505 | { | 547 | { |
| 506 | avtab_node_cachep = kmem_cache_create("avtab_node", | 548 | avtab_node_cachep = kmem_cache_create("avtab_node", |
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index cd4f734e2749..dff0c75345c1 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h | |||
| @@ -71,6 +71,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol, | |||
| 71 | void *p); | 71 | void *p); |
| 72 | 72 | ||
| 73 | int avtab_read(struct avtab *a, void *fp, struct policydb *pol); | 73 | int avtab_read(struct avtab *a, void *fp, struct policydb *pol); |
| 74 | int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp); | ||
| 75 | int avtab_write(struct policydb *p, struct avtab *a, void *fp); | ||
| 74 | 76 | ||
| 75 | struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, | 77 | struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, |
| 76 | struct avtab_datum *datum); | 78 | struct avtab_datum *datum); |
| @@ -85,7 +87,6 @@ void avtab_cache_destroy(void); | |||
| 85 | #define MAX_AVTAB_HASH_BITS 11 | 87 | #define MAX_AVTAB_HASH_BITS 11 |
| 86 | #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) | 88 | #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) |
| 87 | #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) | 89 | #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) |
| 88 | #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS | ||
| 89 | 90 | ||
| 90 | #endif /* _SS_AVTAB_H_ */ | 91 | #endif /* _SS_AVTAB_H_ */ |
| 91 | 92 | ||
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index c91e150c3087..655fe1c6cc69 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c | |||
| @@ -490,6 +490,129 @@ err: | |||
| 490 | return rc; | 490 | return rc; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | int cond_write_bool(void *vkey, void *datum, void *ptr) | ||
| 494 | { | ||
| 495 | char *key = vkey; | ||
| 496 | struct cond_bool_datum *booldatum = datum; | ||
| 497 | struct policy_data *pd = ptr; | ||
| 498 | void *fp = pd->fp; | ||
| 499 | __le32 buf[3]; | ||
| 500 | u32 len; | ||
| 501 | int rc; | ||
| 502 | |||
| 503 | len = strlen(key); | ||
| 504 | buf[0] = cpu_to_le32(booldatum->value); | ||
| 505 | buf[1] = cpu_to_le32(booldatum->state); | ||
| 506 | buf[2] = cpu_to_le32(len); | ||
| 507 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 508 | if (rc) | ||
| 509 | return rc; | ||
| 510 | rc = put_entry(key, 1, len, fp); | ||
| 511 | if (rc) | ||
| 512 | return rc; | ||
| 513 | return 0; | ||
| 514 | } | ||
| 515 | |||
| 516 | /* | ||
| 517 | * cond_write_cond_av_list doesn't write out the av_list nodes. | ||
| 518 | * Instead it writes out the key/value pairs from the avtab. This | ||
| 519 | * is necessary because there is no way to uniquely identifying rules | ||
| 520 | * in the avtab so it is not possible to associate individual rules | ||
| 521 | * in the avtab with a conditional without saving them as part of | ||
| 522 | * the conditional. This means that the avtab with the conditional | ||
| 523 | * rules will not be saved but will be rebuilt on policy load. | ||
| 524 | */ | ||
| 525 | static int cond_write_av_list(struct policydb *p, | ||
| 526 | struct cond_av_list *list, struct policy_file *fp) | ||
| 527 | { | ||
| 528 | __le32 buf[1]; | ||
| 529 | struct cond_av_list *cur_list; | ||
| 530 | u32 len; | ||
| 531 | int rc; | ||
| 532 | |||
| 533 | len = 0; | ||
| 534 | for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) | ||
| 535 | len++; | ||
| 536 | |||
| 537 | buf[0] = cpu_to_le32(len); | ||
| 538 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 539 | if (rc) | ||
| 540 | return rc; | ||
| 541 | |||
| 542 | if (len == 0) | ||
| 543 | return 0; | ||
| 544 | |||
| 545 | for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) { | ||
| 546 | rc = avtab_write_item(p, cur_list->node, fp); | ||
| 547 | if (rc) | ||
| 548 | return rc; | ||
| 549 | } | ||
| 550 | |||
| 551 | return 0; | ||
| 552 | } | ||
| 553 | |||
| 554 | int cond_write_node(struct policydb *p, struct cond_node *node, | ||
| 555 | struct policy_file *fp) | ||
| 556 | { | ||
| 557 | struct cond_expr *cur_expr; | ||
| 558 | __le32 buf[2]; | ||
| 559 | int rc; | ||
| 560 | u32 len = 0; | ||
| 561 | |||
| 562 | buf[0] = cpu_to_le32(node->cur_state); | ||
| 563 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 564 | if (rc) | ||
| 565 | return rc; | ||
| 566 | |||
| 567 | for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) | ||
| 568 | len++; | ||
| 569 | |||
| 570 | buf[0] = cpu_to_le32(len); | ||
| 571 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 572 | if (rc) | ||
| 573 | return rc; | ||
| 574 | |||
| 575 | for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) { | ||
| 576 | buf[0] = cpu_to_le32(cur_expr->expr_type); | ||
| 577 | buf[1] = cpu_to_le32(cur_expr->bool); | ||
| 578 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 579 | if (rc) | ||
| 580 | return rc; | ||
| 581 | } | ||
| 582 | |||
| 583 | rc = cond_write_av_list(p, node->true_list, fp); | ||
| 584 | if (rc) | ||
| 585 | return rc; | ||
| 586 | rc = cond_write_av_list(p, node->false_list, fp); | ||
| 587 | if (rc) | ||
| 588 | return rc; | ||
| 589 | |||
| 590 | return 0; | ||
| 591 | } | ||
| 592 | |||
| 593 | int cond_write_list(struct policydb *p, struct cond_node *list, void *fp) | ||
| 594 | { | ||
| 595 | struct cond_node *cur; | ||
| 596 | u32 len; | ||
| 597 | __le32 buf[1]; | ||
| 598 | int rc; | ||
| 599 | |||
| 600 | len = 0; | ||
| 601 | for (cur = list; cur != NULL; cur = cur->next) | ||
| 602 | len++; | ||
| 603 | buf[0] = cpu_to_le32(len); | ||
| 604 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 605 | if (rc) | ||
| 606 | return rc; | ||
| 607 | |||
| 608 | for (cur = list; cur != NULL; cur = cur->next) { | ||
| 609 | rc = cond_write_node(p, cur, fp); | ||
| 610 | if (rc) | ||
| 611 | return rc; | ||
| 612 | } | ||
| 613 | |||
| 614 | return 0; | ||
| 615 | } | ||
| 493 | /* Determine whether additional permissions are granted by the conditional | 616 | /* Determine whether additional permissions are granted by the conditional |
| 494 | * av table, and if so, add them to the result | 617 | * av table, and if so, add them to the result |
| 495 | */ | 618 | */ |
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h index 53ddb013ae57..3f209c635295 100644 --- a/security/selinux/ss/conditional.h +++ b/security/selinux/ss/conditional.h | |||
| @@ -69,6 +69,8 @@ int cond_index_bool(void *key, void *datum, void *datap); | |||
| 69 | 69 | ||
| 70 | int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp); | 70 | int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp); |
| 71 | int cond_read_list(struct policydb *p, void *fp); | 71 | int cond_read_list(struct policydb *p, void *fp); |
| 72 | int cond_write_bool(void *key, void *datum, void *ptr); | ||
| 73 | int cond_write_list(struct policydb *p, struct cond_node *list, void *fp); | ||
| 72 | 74 | ||
| 73 | void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd); | 75 | void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd); |
| 74 | 76 | ||
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index 04b6145d767f..d42951fcbe87 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #include "ebitmap.h" | 22 | #include "ebitmap.h" |
| 23 | #include "policydb.h" | 23 | #include "policydb.h" |
| 24 | 24 | ||
| 25 | #define BITS_PER_U64 (sizeof(u64) * 8) | ||
| 26 | |||
| 25 | int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) | 27 | int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) |
| 26 | { | 28 | { |
| 27 | struct ebitmap_node *n1, *n2; | 29 | struct ebitmap_node *n1, *n2; |
| @@ -363,10 +365,10 @@ int ebitmap_read(struct ebitmap *e, void *fp) | |||
| 363 | e->highbit = le32_to_cpu(buf[1]); | 365 | e->highbit = le32_to_cpu(buf[1]); |
| 364 | count = le32_to_cpu(buf[2]); | 366 | count = le32_to_cpu(buf[2]); |
| 365 | 367 | ||
| 366 | if (mapunit != sizeof(u64) * 8) { | 368 | if (mapunit != BITS_PER_U64) { |
| 367 | printk(KERN_ERR "SELinux: ebitmap: map size %u does not " | 369 | printk(KERN_ERR "SELinux: ebitmap: map size %u does not " |
| 368 | "match my size %Zd (high bit was %d)\n", | 370 | "match my size %Zd (high bit was %d)\n", |
| 369 | mapunit, sizeof(u64) * 8, e->highbit); | 371 | mapunit, BITS_PER_U64, e->highbit); |
| 370 | goto bad; | 372 | goto bad; |
| 371 | } | 373 | } |
| 372 | 374 | ||
| @@ -446,3 +448,78 @@ bad: | |||
| 446 | ebitmap_destroy(e); | 448 | ebitmap_destroy(e); |
| 447 | goto out; | 449 | goto out; |
| 448 | } | 450 | } |
| 451 | |||
| 452 | int ebitmap_write(struct ebitmap *e, void *fp) | ||
| 453 | { | ||
| 454 | struct ebitmap_node *n; | ||
| 455 | u32 count; | ||
| 456 | __le32 buf[3]; | ||
| 457 | u64 map; | ||
| 458 | int bit, last_bit, last_startbit, rc; | ||
| 459 | |||
| 460 | buf[0] = cpu_to_le32(BITS_PER_U64); | ||
| 461 | |||
| 462 | count = 0; | ||
| 463 | last_bit = 0; | ||
| 464 | last_startbit = -1; | ||
| 465 | ebitmap_for_each_positive_bit(e, n, bit) { | ||
| 466 | if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { | ||
| 467 | count++; | ||
| 468 | last_startbit = rounddown(bit, BITS_PER_U64); | ||
| 469 | } | ||
| 470 | last_bit = roundup(bit + 1, BITS_PER_U64); | ||
| 471 | } | ||
| 472 | buf[1] = cpu_to_le32(last_bit); | ||
| 473 | buf[2] = cpu_to_le32(count); | ||
| 474 | |||
| 475 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 476 | if (rc) | ||
| 477 | return rc; | ||
| 478 | |||
| 479 | map = 0; | ||
| 480 | last_startbit = INT_MIN; | ||
| 481 | ebitmap_for_each_positive_bit(e, n, bit) { | ||
| 482 | if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { | ||
| 483 | __le64 buf64[1]; | ||
| 484 | |||
| 485 | /* this is the very first bit */ | ||
| 486 | if (!map) { | ||
| 487 | last_startbit = rounddown(bit, BITS_PER_U64); | ||
| 488 | map = (u64)1 << (bit - last_startbit); | ||
| 489 | continue; | ||
| 490 | } | ||
| 491 | |||
| 492 | /* write the last node */ | ||
| 493 | buf[0] = cpu_to_le32(last_startbit); | ||
| 494 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 495 | if (rc) | ||
| 496 | return rc; | ||
| 497 | |||
| 498 | buf64[0] = cpu_to_le64(map); | ||
| 499 | rc = put_entry(buf64, sizeof(u64), 1, fp); | ||
| 500 | if (rc) | ||
| 501 | return rc; | ||
| 502 | |||
| 503 | /* set up for the next node */ | ||
| 504 | map = 0; | ||
| 505 | last_startbit = rounddown(bit, BITS_PER_U64); | ||
| 506 | } | ||
| 507 | map |= (u64)1 << (bit - last_startbit); | ||
| 508 | } | ||
| 509 | /* write the last node */ | ||
| 510 | if (map) { | ||
| 511 | __le64 buf64[1]; | ||
| 512 | |||
| 513 | /* write the last node */ | ||
| 514 | buf[0] = cpu_to_le32(last_startbit); | ||
| 515 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 516 | if (rc) | ||
| 517 | return rc; | ||
| 518 | |||
| 519 | buf64[0] = cpu_to_le64(map); | ||
| 520 | rc = put_entry(buf64, sizeof(u64), 1, fp); | ||
| 521 | if (rc) | ||
| 522 | return rc; | ||
| 523 | } | ||
| 524 | return 0; | ||
| 525 | } | ||
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h index f283b4367f54..1f4e93c2ae86 100644 --- a/security/selinux/ss/ebitmap.h +++ b/security/selinux/ss/ebitmap.h | |||
| @@ -123,6 +123,7 @@ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit); | |||
| 123 | int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); | 123 | int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); |
| 124 | void ebitmap_destroy(struct ebitmap *e); | 124 | void ebitmap_destroy(struct ebitmap *e); |
| 125 | int ebitmap_read(struct ebitmap *e, void *fp); | 125 | int ebitmap_read(struct ebitmap *e, void *fp); |
| 126 | int ebitmap_write(struct ebitmap *e, void *fp); | ||
| 126 | 127 | ||
| 127 | #ifdef CONFIG_NETLABEL | 128 | #ifdef CONFIG_NETLABEL |
| 128 | int ebitmap_netlbl_export(struct ebitmap *ebmap, | 129 | int ebitmap_netlbl_export(struct ebitmap *ebmap, |
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 3a29704be8ce..94f630d93a5c 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "policydb.h" | 37 | #include "policydb.h" |
| 38 | #include "conditional.h" | 38 | #include "conditional.h" |
| 39 | #include "mls.h" | 39 | #include "mls.h" |
| 40 | #include "services.h" | ||
| 40 | 41 | ||
| 41 | #define _DEBUG_HASHES | 42 | #define _DEBUG_HASHES |
| 42 | 43 | ||
| @@ -185,9 +186,19 @@ static u32 rangetr_hash(struct hashtab *h, const void *k) | |||
| 185 | static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2) | 186 | static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2) |
| 186 | { | 187 | { |
| 187 | const struct range_trans *key1 = k1, *key2 = k2; | 188 | const struct range_trans *key1 = k1, *key2 = k2; |
| 188 | return (key1->source_type != key2->source_type || | 189 | int v; |
| 189 | key1->target_type != key2->target_type || | 190 | |
| 190 | key1->target_class != key2->target_class); | 191 | v = key1->source_type - key2->source_type; |
| 192 | if (v) | ||
| 193 | return v; | ||
| 194 | |||
| 195 | v = key1->target_type - key2->target_type; | ||
| 196 | if (v) | ||
| 197 | return v; | ||
| 198 | |||
| 199 | v = key1->target_class - key2->target_class; | ||
| 200 | |||
| 201 | return v; | ||
| 191 | } | 202 | } |
| 192 | 203 | ||
| 193 | /* | 204 | /* |
| @@ -1624,11 +1635,11 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap) | |||
| 1624 | 1635 | ||
| 1625 | static int type_bounds_sanity_check(void *key, void *datum, void *datap) | 1636 | static int type_bounds_sanity_check(void *key, void *datum, void *datap) |
| 1626 | { | 1637 | { |
| 1627 | struct type_datum *upper, *type; | 1638 | struct type_datum *upper; |
| 1628 | struct policydb *p = datap; | 1639 | struct policydb *p = datap; |
| 1629 | int depth = 0; | 1640 | int depth = 0; |
| 1630 | 1641 | ||
| 1631 | upper = type = datum; | 1642 | upper = datum; |
| 1632 | while (upper->bounds) { | 1643 | while (upper->bounds) { |
| 1633 | if (++depth == POLICYDB_BOUNDS_MAXDEPTH) { | 1644 | if (++depth == POLICYDB_BOUNDS_MAXDEPTH) { |
| 1634 | printk(KERN_ERR "SELinux: type %s: " | 1645 | printk(KERN_ERR "SELinux: type %s: " |
| @@ -2306,3 +2317,843 @@ bad: | |||
| 2306 | policydb_destroy(p); | 2317 | policydb_destroy(p); |
| 2307 | goto out; | 2318 | goto out; |
| 2308 | } | 2319 | } |
| 2320 | |||
| 2321 | /* | ||
| 2322 | * Write a MLS level structure to a policydb binary | ||
| 2323 | * representation file. | ||
| 2324 | */ | ||
| 2325 | static int mls_write_level(struct mls_level *l, void *fp) | ||
| 2326 | { | ||
| 2327 | __le32 buf[1]; | ||
| 2328 | int rc; | ||
| 2329 | |||
| 2330 | buf[0] = cpu_to_le32(l->sens); | ||
| 2331 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2332 | if (rc) | ||
| 2333 | return rc; | ||
| 2334 | |||
| 2335 | rc = ebitmap_write(&l->cat, fp); | ||
| 2336 | if (rc) | ||
| 2337 | return rc; | ||
| 2338 | |||
| 2339 | return 0; | ||
| 2340 | } | ||
| 2341 | |||
| 2342 | /* | ||
| 2343 | * Write a MLS range structure to a policydb binary | ||
| 2344 | * representation file. | ||
| 2345 | */ | ||
| 2346 | static int mls_write_range_helper(struct mls_range *r, void *fp) | ||
| 2347 | { | ||
| 2348 | __le32 buf[3]; | ||
| 2349 | size_t items; | ||
| 2350 | int rc, eq; | ||
| 2351 | |||
| 2352 | eq = mls_level_eq(&r->level[1], &r->level[0]); | ||
| 2353 | |||
| 2354 | if (eq) | ||
| 2355 | items = 2; | ||
| 2356 | else | ||
| 2357 | items = 3; | ||
| 2358 | buf[0] = cpu_to_le32(items-1); | ||
| 2359 | buf[1] = cpu_to_le32(r->level[0].sens); | ||
| 2360 | if (!eq) | ||
| 2361 | buf[2] = cpu_to_le32(r->level[1].sens); | ||
| 2362 | |||
| 2363 | BUG_ON(items > (sizeof(buf)/sizeof(buf[0]))); | ||
| 2364 | |||
| 2365 | rc = put_entry(buf, sizeof(u32), items, fp); | ||
| 2366 | if (rc) | ||
| 2367 | return rc; | ||
| 2368 | |||
| 2369 | rc = ebitmap_write(&r->level[0].cat, fp); | ||
| 2370 | if (rc) | ||
| 2371 | return rc; | ||
| 2372 | if (!eq) { | ||
| 2373 | rc = ebitmap_write(&r->level[1].cat, fp); | ||
| 2374 | if (rc) | ||
| 2375 | return rc; | ||
| 2376 | } | ||
| 2377 | |||
| 2378 | return 0; | ||
| 2379 | } | ||
| 2380 | |||
| 2381 | static int sens_write(void *vkey, void *datum, void *ptr) | ||
| 2382 | { | ||
| 2383 | char *key = vkey; | ||
| 2384 | struct level_datum *levdatum = datum; | ||
| 2385 | struct policy_data *pd = ptr; | ||
| 2386 | void *fp = pd->fp; | ||
| 2387 | __le32 buf[2]; | ||
| 2388 | size_t len; | ||
| 2389 | int rc; | ||
| 2390 | |||
| 2391 | len = strlen(key); | ||
| 2392 | buf[0] = cpu_to_le32(len); | ||
| 2393 | buf[1] = cpu_to_le32(levdatum->isalias); | ||
| 2394 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 2395 | if (rc) | ||
| 2396 | return rc; | ||
| 2397 | |||
| 2398 | rc = put_entry(key, 1, len, fp); | ||
| 2399 | if (rc) | ||
| 2400 | return rc; | ||
| 2401 | |||
| 2402 | rc = mls_write_level(levdatum->level, fp); | ||
| 2403 | if (rc) | ||
| 2404 | return rc; | ||
| 2405 | |||
| 2406 | return 0; | ||
| 2407 | } | ||
| 2408 | |||
| 2409 | static int cat_write(void *vkey, void *datum, void *ptr) | ||
| 2410 | { | ||
| 2411 | char *key = vkey; | ||
| 2412 | struct cat_datum *catdatum = datum; | ||
| 2413 | struct policy_data *pd = ptr; | ||
| 2414 | void *fp = pd->fp; | ||
| 2415 | __le32 buf[3]; | ||
| 2416 | size_t len; | ||
| 2417 | int rc; | ||
| 2418 | |||
| 2419 | len = strlen(key); | ||
| 2420 | buf[0] = cpu_to_le32(len); | ||
| 2421 | buf[1] = cpu_to_le32(catdatum->value); | ||
| 2422 | buf[2] = cpu_to_le32(catdatum->isalias); | ||
| 2423 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 2424 | if (rc) | ||
| 2425 | return rc; | ||
| 2426 | |||
| 2427 | rc = put_entry(key, 1, len, fp); | ||
| 2428 | if (rc) | ||
| 2429 | return rc; | ||
| 2430 | |||
| 2431 | return 0; | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | static int role_trans_write(struct role_trans *r, void *fp) | ||
| 2435 | { | ||
| 2436 | struct role_trans *tr; | ||
| 2437 | u32 buf[3]; | ||
| 2438 | size_t nel; | ||
| 2439 | int rc; | ||
| 2440 | |||
| 2441 | nel = 0; | ||
| 2442 | for (tr = r; tr; tr = tr->next) | ||
| 2443 | nel++; | ||
| 2444 | buf[0] = cpu_to_le32(nel); | ||
| 2445 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2446 | if (rc) | ||
| 2447 | return rc; | ||
| 2448 | for (tr = r; tr; tr = tr->next) { | ||
| 2449 | buf[0] = cpu_to_le32(tr->role); | ||
| 2450 | buf[1] = cpu_to_le32(tr->type); | ||
| 2451 | buf[2] = cpu_to_le32(tr->new_role); | ||
| 2452 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 2453 | if (rc) | ||
| 2454 | return rc; | ||
| 2455 | } | ||
| 2456 | |||
| 2457 | return 0; | ||
| 2458 | } | ||
| 2459 | |||
| 2460 | static int role_allow_write(struct role_allow *r, void *fp) | ||
| 2461 | { | ||
| 2462 | struct role_allow *ra; | ||
| 2463 | u32 buf[2]; | ||
| 2464 | size_t nel; | ||
| 2465 | int rc; | ||
| 2466 | |||
| 2467 | nel = 0; | ||
| 2468 | for (ra = r; ra; ra = ra->next) | ||
| 2469 | nel++; | ||
| 2470 | buf[0] = cpu_to_le32(nel); | ||
| 2471 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2472 | if (rc) | ||
| 2473 | return rc; | ||
| 2474 | for (ra = r; ra; ra = ra->next) { | ||
| 2475 | buf[0] = cpu_to_le32(ra->role); | ||
| 2476 | buf[1] = cpu_to_le32(ra->new_role); | ||
| 2477 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 2478 | if (rc) | ||
| 2479 | return rc; | ||
| 2480 | } | ||
| 2481 | return 0; | ||
| 2482 | } | ||
| 2483 | |||
| 2484 | /* | ||
| 2485 | * Write a security context structure | ||
| 2486 | * to a policydb binary representation file. | ||
| 2487 | */ | ||
| 2488 | static int context_write(struct policydb *p, struct context *c, | ||
| 2489 | void *fp) | ||
| 2490 | { | ||
| 2491 | int rc; | ||
| 2492 | __le32 buf[3]; | ||
| 2493 | |||
| 2494 | buf[0] = cpu_to_le32(c->user); | ||
| 2495 | buf[1] = cpu_to_le32(c->role); | ||
| 2496 | buf[2] = cpu_to_le32(c->type); | ||
| 2497 | |||
| 2498 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 2499 | if (rc) | ||
| 2500 | return rc; | ||
| 2501 | |||
| 2502 | rc = mls_write_range_helper(&c->range, fp); | ||
| 2503 | if (rc) | ||
| 2504 | return rc; | ||
| 2505 | |||
| 2506 | return 0; | ||
| 2507 | } | ||
| 2508 | |||
| 2509 | /* | ||
| 2510 | * The following *_write functions are used to | ||
| 2511 | * write the symbol data to a policy database | ||
| 2512 | * binary representation file. | ||
| 2513 | */ | ||
| 2514 | |||
| 2515 | static int perm_write(void *vkey, void *datum, void *fp) | ||
| 2516 | { | ||
| 2517 | char *key = vkey; | ||
| 2518 | struct perm_datum *perdatum = datum; | ||
| 2519 | __le32 buf[2]; | ||
| 2520 | size_t len; | ||
| 2521 | int rc; | ||
| 2522 | |||
| 2523 | len = strlen(key); | ||
| 2524 | buf[0] = cpu_to_le32(len); | ||
| 2525 | buf[1] = cpu_to_le32(perdatum->value); | ||
| 2526 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 2527 | if (rc) | ||
| 2528 | return rc; | ||
| 2529 | |||
| 2530 | rc = put_entry(key, 1, len, fp); | ||
| 2531 | if (rc) | ||
| 2532 | return rc; | ||
| 2533 | |||
| 2534 | return 0; | ||
| 2535 | } | ||
| 2536 | |||
| 2537 | static int common_write(void *vkey, void *datum, void *ptr) | ||
| 2538 | { | ||
| 2539 | char *key = vkey; | ||
| 2540 | struct common_datum *comdatum = datum; | ||
| 2541 | struct policy_data *pd = ptr; | ||
| 2542 | void *fp = pd->fp; | ||
| 2543 | __le32 buf[4]; | ||
| 2544 | size_t len; | ||
| 2545 | int rc; | ||
| 2546 | |||
| 2547 | len = strlen(key); | ||
| 2548 | buf[0] = cpu_to_le32(len); | ||
| 2549 | buf[1] = cpu_to_le32(comdatum->value); | ||
| 2550 | buf[2] = cpu_to_le32(comdatum->permissions.nprim); | ||
| 2551 | buf[3] = cpu_to_le32(comdatum->permissions.table->nel); | ||
| 2552 | rc = put_entry(buf, sizeof(u32), 4, fp); | ||
| 2553 | if (rc) | ||
| 2554 | return rc; | ||
| 2555 | |||
| 2556 | rc = put_entry(key, 1, len, fp); | ||
| 2557 | if (rc) | ||
| 2558 | return rc; | ||
| 2559 | |||
| 2560 | rc = hashtab_map(comdatum->permissions.table, perm_write, fp); | ||
| 2561 | if (rc) | ||
| 2562 | return rc; | ||
| 2563 | |||
| 2564 | return 0; | ||
| 2565 | } | ||
| 2566 | |||
| 2567 | static int write_cons_helper(struct policydb *p, struct constraint_node *node, | ||
| 2568 | void *fp) | ||
| 2569 | { | ||
| 2570 | struct constraint_node *c; | ||
| 2571 | struct constraint_expr *e; | ||
| 2572 | __le32 buf[3]; | ||
| 2573 | u32 nel; | ||
| 2574 | int rc; | ||
| 2575 | |||
| 2576 | for (c = node; c; c = c->next) { | ||
| 2577 | nel = 0; | ||
| 2578 | for (e = c->expr; e; e = e->next) | ||
| 2579 | nel++; | ||
| 2580 | buf[0] = cpu_to_le32(c->permissions); | ||
| 2581 | buf[1] = cpu_to_le32(nel); | ||
| 2582 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 2583 | if (rc) | ||
| 2584 | return rc; | ||
| 2585 | for (e = c->expr; e; e = e->next) { | ||
| 2586 | buf[0] = cpu_to_le32(e->expr_type); | ||
| 2587 | buf[1] = cpu_to_le32(e->attr); | ||
| 2588 | buf[2] = cpu_to_le32(e->op); | ||
| 2589 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 2590 | if (rc) | ||
| 2591 | return rc; | ||
| 2592 | |||
| 2593 | switch (e->expr_type) { | ||
| 2594 | case CEXPR_NAMES: | ||
| 2595 | rc = ebitmap_write(&e->names, fp); | ||
| 2596 | if (rc) | ||
| 2597 | return rc; | ||
| 2598 | break; | ||
| 2599 | default: | ||
| 2600 | break; | ||
| 2601 | } | ||
| 2602 | } | ||
| 2603 | } | ||
| 2604 | |||
| 2605 | return 0; | ||
| 2606 | } | ||
| 2607 | |||
| 2608 | static int class_write(void *vkey, void *datum, void *ptr) | ||
| 2609 | { | ||
| 2610 | char *key = vkey; | ||
| 2611 | struct class_datum *cladatum = datum; | ||
| 2612 | struct policy_data *pd = ptr; | ||
| 2613 | void *fp = pd->fp; | ||
| 2614 | struct policydb *p = pd->p; | ||
| 2615 | struct constraint_node *c; | ||
| 2616 | __le32 buf[6]; | ||
| 2617 | u32 ncons; | ||
| 2618 | size_t len, len2; | ||
| 2619 | int rc; | ||
| 2620 | |||
| 2621 | len = strlen(key); | ||
| 2622 | if (cladatum->comkey) | ||
| 2623 | len2 = strlen(cladatum->comkey); | ||
| 2624 | else | ||
| 2625 | len2 = 0; | ||
| 2626 | |||
| 2627 | ncons = 0; | ||
| 2628 | for (c = cladatum->constraints; c; c = c->next) | ||
| 2629 | ncons++; | ||
| 2630 | |||
| 2631 | buf[0] = cpu_to_le32(len); | ||
| 2632 | buf[1] = cpu_to_le32(len2); | ||
| 2633 | buf[2] = cpu_to_le32(cladatum->value); | ||
| 2634 | buf[3] = cpu_to_le32(cladatum->permissions.nprim); | ||
| 2635 | if (cladatum->permissions.table) | ||
| 2636 | buf[4] = cpu_to_le32(cladatum->permissions.table->nel); | ||
| 2637 | else | ||
| 2638 | buf[4] = 0; | ||
| 2639 | buf[5] = cpu_to_le32(ncons); | ||
| 2640 | rc = put_entry(buf, sizeof(u32), 6, fp); | ||
| 2641 | if (rc) | ||
| 2642 | return rc; | ||
| 2643 | |||
| 2644 | rc = put_entry(key, 1, len, fp); | ||
| 2645 | if (rc) | ||
| 2646 | return rc; | ||
| 2647 | |||
| 2648 | if (cladatum->comkey) { | ||
| 2649 | rc = put_entry(cladatum->comkey, 1, len2, fp); | ||
| 2650 | if (rc) | ||
| 2651 | return rc; | ||
| 2652 | } | ||
| 2653 | |||
| 2654 | rc = hashtab_map(cladatum->permissions.table, perm_write, fp); | ||
| 2655 | if (rc) | ||
| 2656 | return rc; | ||
| 2657 | |||
| 2658 | rc = write_cons_helper(p, cladatum->constraints, fp); | ||
| 2659 | if (rc) | ||
| 2660 | return rc; | ||
| 2661 | |||
| 2662 | /* write out the validatetrans rule */ | ||
| 2663 | ncons = 0; | ||
| 2664 | for (c = cladatum->validatetrans; c; c = c->next) | ||
| 2665 | ncons++; | ||
| 2666 | |||
| 2667 | buf[0] = cpu_to_le32(ncons); | ||
| 2668 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2669 | if (rc) | ||
| 2670 | return rc; | ||
| 2671 | |||
| 2672 | rc = write_cons_helper(p, cladatum->validatetrans, fp); | ||
| 2673 | if (rc) | ||
| 2674 | return rc; | ||
| 2675 | |||
| 2676 | return 0; | ||
| 2677 | } | ||
| 2678 | |||
| 2679 | static int role_write(void *vkey, void *datum, void *ptr) | ||
| 2680 | { | ||
| 2681 | char *key = vkey; | ||
| 2682 | struct role_datum *role = datum; | ||
| 2683 | struct policy_data *pd = ptr; | ||
| 2684 | void *fp = pd->fp; | ||
| 2685 | struct policydb *p = pd->p; | ||
| 2686 | __le32 buf[3]; | ||
| 2687 | size_t items, len; | ||
| 2688 | int rc; | ||
| 2689 | |||
| 2690 | len = strlen(key); | ||
| 2691 | items = 0; | ||
| 2692 | buf[items++] = cpu_to_le32(len); | ||
| 2693 | buf[items++] = cpu_to_le32(role->value); | ||
| 2694 | if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) | ||
| 2695 | buf[items++] = cpu_to_le32(role->bounds); | ||
| 2696 | |||
| 2697 | BUG_ON(items > (sizeof(buf)/sizeof(buf[0]))); | ||
| 2698 | |||
| 2699 | rc = put_entry(buf, sizeof(u32), items, fp); | ||
| 2700 | if (rc) | ||
| 2701 | return rc; | ||
| 2702 | |||
| 2703 | rc = put_entry(key, 1, len, fp); | ||
| 2704 | if (rc) | ||
| 2705 | return rc; | ||
| 2706 | |||
| 2707 | rc = ebitmap_write(&role->dominates, fp); | ||
| 2708 | if (rc) | ||
| 2709 | return rc; | ||
| 2710 | |||
| 2711 | rc = ebitmap_write(&role->types, fp); | ||
| 2712 | if (rc) | ||
| 2713 | return rc; | ||
| 2714 | |||
| 2715 | return 0; | ||
| 2716 | } | ||
| 2717 | |||
| 2718 | static int type_write(void *vkey, void *datum, void *ptr) | ||
| 2719 | { | ||
| 2720 | char *key = vkey; | ||
| 2721 | struct type_datum *typdatum = datum; | ||
| 2722 | struct policy_data *pd = ptr; | ||
| 2723 | struct policydb *p = pd->p; | ||
| 2724 | void *fp = pd->fp; | ||
| 2725 | __le32 buf[4]; | ||
| 2726 | int rc; | ||
| 2727 | size_t items, len; | ||
| 2728 | |||
| 2729 | len = strlen(key); | ||
| 2730 | items = 0; | ||
| 2731 | buf[items++] = cpu_to_le32(len); | ||
| 2732 | buf[items++] = cpu_to_le32(typdatum->value); | ||
| 2733 | if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) { | ||
| 2734 | u32 properties = 0; | ||
| 2735 | |||
| 2736 | if (typdatum->primary) | ||
| 2737 | properties |= TYPEDATUM_PROPERTY_PRIMARY; | ||
| 2738 | |||
| 2739 | if (typdatum->attribute) | ||
| 2740 | properties |= TYPEDATUM_PROPERTY_ATTRIBUTE; | ||
| 2741 | |||
| 2742 | buf[items++] = cpu_to_le32(properties); | ||
| 2743 | buf[items++] = cpu_to_le32(typdatum->bounds); | ||
| 2744 | } else { | ||
| 2745 | buf[items++] = cpu_to_le32(typdatum->primary); | ||
| 2746 | } | ||
| 2747 | BUG_ON(items > (sizeof(buf) / sizeof(buf[0]))); | ||
| 2748 | rc = put_entry(buf, sizeof(u32), items, fp); | ||
| 2749 | if (rc) | ||
| 2750 | return rc; | ||
| 2751 | |||
| 2752 | rc = put_entry(key, 1, len, fp); | ||
| 2753 | if (rc) | ||
| 2754 | return rc; | ||
| 2755 | |||
| 2756 | return 0; | ||
| 2757 | } | ||
| 2758 | |||
| 2759 | static int user_write(void *vkey, void *datum, void *ptr) | ||
| 2760 | { | ||
| 2761 | char *key = vkey; | ||
| 2762 | struct user_datum *usrdatum = datum; | ||
| 2763 | struct policy_data *pd = ptr; | ||
| 2764 | struct policydb *p = pd->p; | ||
| 2765 | void *fp = pd->fp; | ||
| 2766 | __le32 buf[3]; | ||
| 2767 | size_t items, len; | ||
| 2768 | int rc; | ||
| 2769 | |||
| 2770 | len = strlen(key); | ||
| 2771 | items = 0; | ||
| 2772 | buf[items++] = cpu_to_le32(len); | ||
| 2773 | buf[items++] = cpu_to_le32(usrdatum->value); | ||
| 2774 | if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) | ||
| 2775 | buf[items++] = cpu_to_le32(usrdatum->bounds); | ||
| 2776 | BUG_ON(items > (sizeof(buf) / sizeof(buf[0]))); | ||
| 2777 | rc = put_entry(buf, sizeof(u32), items, fp); | ||
| 2778 | if (rc) | ||
| 2779 | return rc; | ||
| 2780 | |||
| 2781 | rc = put_entry(key, 1, len, fp); | ||
| 2782 | if (rc) | ||
| 2783 | return rc; | ||
| 2784 | |||
| 2785 | rc = ebitmap_write(&usrdatum->roles, fp); | ||
| 2786 | if (rc) | ||
| 2787 | return rc; | ||
| 2788 | |||
| 2789 | rc = mls_write_range_helper(&usrdatum->range, fp); | ||
| 2790 | if (rc) | ||
| 2791 | return rc; | ||
| 2792 | |||
| 2793 | rc = mls_write_level(&usrdatum->dfltlevel, fp); | ||
| 2794 | if (rc) | ||
| 2795 | return rc; | ||
| 2796 | |||
| 2797 | return 0; | ||
| 2798 | } | ||
| 2799 | |||
| 2800 | static int (*write_f[SYM_NUM]) (void *key, void *datum, | ||
| 2801 | void *datap) = | ||
| 2802 | { | ||
| 2803 | common_write, | ||
| 2804 | class_write, | ||
| 2805 | role_write, | ||
| 2806 | type_write, | ||
| 2807 | user_write, | ||
| 2808 | cond_write_bool, | ||
| 2809 | sens_write, | ||
| 2810 | cat_write, | ||
| 2811 | }; | ||
| 2812 | |||
| 2813 | static int ocontext_write(struct policydb *p, struct policydb_compat_info *info, | ||
| 2814 | void *fp) | ||
| 2815 | { | ||
| 2816 | unsigned int i, j, rc; | ||
| 2817 | size_t nel, len; | ||
| 2818 | __le32 buf[3]; | ||
| 2819 | u32 nodebuf[8]; | ||
| 2820 | struct ocontext *c; | ||
| 2821 | for (i = 0; i < info->ocon_num; i++) { | ||
| 2822 | nel = 0; | ||
| 2823 | for (c = p->ocontexts[i]; c; c = c->next) | ||
| 2824 | nel++; | ||
| 2825 | buf[0] = cpu_to_le32(nel); | ||
| 2826 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2827 | if (rc) | ||
| 2828 | return rc; | ||
| 2829 | for (c = p->ocontexts[i]; c; c = c->next) { | ||
| 2830 | switch (i) { | ||
| 2831 | case OCON_ISID: | ||
| 2832 | buf[0] = cpu_to_le32(c->sid[0]); | ||
| 2833 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2834 | if (rc) | ||
| 2835 | return rc; | ||
| 2836 | rc = context_write(p, &c->context[0], fp); | ||
| 2837 | if (rc) | ||
| 2838 | return rc; | ||
| 2839 | break; | ||
| 2840 | case OCON_FS: | ||
| 2841 | case OCON_NETIF: | ||
| 2842 | len = strlen(c->u.name); | ||
| 2843 | buf[0] = cpu_to_le32(len); | ||
| 2844 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2845 | if (rc) | ||
| 2846 | return rc; | ||
| 2847 | rc = put_entry(c->u.name, 1, len, fp); | ||
| 2848 | if (rc) | ||
| 2849 | return rc; | ||
| 2850 | rc = context_write(p, &c->context[0], fp); | ||
| 2851 | if (rc) | ||
| 2852 | return rc; | ||
| 2853 | rc = context_write(p, &c->context[1], fp); | ||
| 2854 | if (rc) | ||
| 2855 | return rc; | ||
| 2856 | break; | ||
| 2857 | case OCON_PORT: | ||
| 2858 | buf[0] = cpu_to_le32(c->u.port.protocol); | ||
| 2859 | buf[1] = cpu_to_le32(c->u.port.low_port); | ||
| 2860 | buf[2] = cpu_to_le32(c->u.port.high_port); | ||
| 2861 | rc = put_entry(buf, sizeof(u32), 3, fp); | ||
| 2862 | if (rc) | ||
| 2863 | return rc; | ||
| 2864 | rc = context_write(p, &c->context[0], fp); | ||
| 2865 | if (rc) | ||
| 2866 | return rc; | ||
| 2867 | break; | ||
| 2868 | case OCON_NODE: | ||
| 2869 | nodebuf[0] = c->u.node.addr; /* network order */ | ||
| 2870 | nodebuf[1] = c->u.node.mask; /* network order */ | ||
| 2871 | rc = put_entry(nodebuf, sizeof(u32), 2, fp); | ||
| 2872 | if (rc) | ||
| 2873 | return rc; | ||
| 2874 | rc = context_write(p, &c->context[0], fp); | ||
| 2875 | if (rc) | ||
| 2876 | return rc; | ||
| 2877 | break; | ||
| 2878 | case OCON_FSUSE: | ||
| 2879 | buf[0] = cpu_to_le32(c->v.behavior); | ||
| 2880 | len = strlen(c->u.name); | ||
| 2881 | buf[1] = cpu_to_le32(len); | ||
| 2882 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 2883 | if (rc) | ||
| 2884 | return rc; | ||
| 2885 | rc = put_entry(c->u.name, 1, len, fp); | ||
| 2886 | if (rc) | ||
| 2887 | return rc; | ||
| 2888 | rc = context_write(p, &c->context[0], fp); | ||
| 2889 | if (rc) | ||
| 2890 | return rc; | ||
| 2891 | break; | ||
| 2892 | case OCON_NODE6: | ||
| 2893 | for (j = 0; j < 4; j++) | ||
| 2894 | nodebuf[j] = c->u.node6.addr[j]; /* network order */ | ||
| 2895 | for (j = 0; j < 4; j++) | ||
| 2896 | nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */ | ||
| 2897 | rc = put_entry(nodebuf, sizeof(u32), 8, fp); | ||
| 2898 | if (rc) | ||
| 2899 | return rc; | ||
| 2900 | rc = context_write(p, &c->context[0], fp); | ||
| 2901 | if (rc) | ||
| 2902 | return rc; | ||
| 2903 | break; | ||
| 2904 | } | ||
| 2905 | } | ||
| 2906 | } | ||
| 2907 | return 0; | ||
| 2908 | } | ||
| 2909 | |||
| 2910 | static int genfs_write(struct policydb *p, void *fp) | ||
| 2911 | { | ||
| 2912 | struct genfs *genfs; | ||
| 2913 | struct ocontext *c; | ||
| 2914 | size_t len; | ||
| 2915 | __le32 buf[1]; | ||
| 2916 | int rc; | ||
| 2917 | |||
| 2918 | len = 0; | ||
| 2919 | for (genfs = p->genfs; genfs; genfs = genfs->next) | ||
| 2920 | len++; | ||
| 2921 | buf[0] = cpu_to_le32(len); | ||
| 2922 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2923 | if (rc) | ||
| 2924 | return rc; | ||
| 2925 | for (genfs = p->genfs; genfs; genfs = genfs->next) { | ||
| 2926 | len = strlen(genfs->fstype); | ||
| 2927 | buf[0] = cpu_to_le32(len); | ||
| 2928 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2929 | if (rc) | ||
| 2930 | return rc; | ||
| 2931 | rc = put_entry(genfs->fstype, 1, len, fp); | ||
| 2932 | if (rc) | ||
| 2933 | return rc; | ||
| 2934 | len = 0; | ||
| 2935 | for (c = genfs->head; c; c = c->next) | ||
| 2936 | len++; | ||
| 2937 | buf[0] = cpu_to_le32(len); | ||
| 2938 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2939 | if (rc) | ||
| 2940 | return rc; | ||
| 2941 | for (c = genfs->head; c; c = c->next) { | ||
| 2942 | len = strlen(c->u.name); | ||
| 2943 | buf[0] = cpu_to_le32(len); | ||
| 2944 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2945 | if (rc) | ||
| 2946 | return rc; | ||
| 2947 | rc = put_entry(c->u.name, 1, len, fp); | ||
| 2948 | if (rc) | ||
| 2949 | return rc; | ||
| 2950 | buf[0] = cpu_to_le32(c->v.sclass); | ||
| 2951 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2952 | if (rc) | ||
| 2953 | return rc; | ||
| 2954 | rc = context_write(p, &c->context[0], fp); | ||
| 2955 | if (rc) | ||
| 2956 | return rc; | ||
| 2957 | } | ||
| 2958 | } | ||
| 2959 | return 0; | ||
| 2960 | } | ||
| 2961 | |||
| 2962 | static int range_count(void *key, void *data, void *ptr) | ||
| 2963 | { | ||
| 2964 | int *cnt = ptr; | ||
| 2965 | *cnt = *cnt + 1; | ||
| 2966 | |||
| 2967 | return 0; | ||
| 2968 | } | ||
| 2969 | |||
| 2970 | static int range_write_helper(void *key, void *data, void *ptr) | ||
| 2971 | { | ||
| 2972 | __le32 buf[2]; | ||
| 2973 | struct range_trans *rt = key; | ||
| 2974 | struct mls_range *r = data; | ||
| 2975 | struct policy_data *pd = ptr; | ||
| 2976 | void *fp = pd->fp; | ||
| 2977 | struct policydb *p = pd->p; | ||
| 2978 | int rc; | ||
| 2979 | |||
| 2980 | buf[0] = cpu_to_le32(rt->source_type); | ||
| 2981 | buf[1] = cpu_to_le32(rt->target_type); | ||
| 2982 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 2983 | if (rc) | ||
| 2984 | return rc; | ||
| 2985 | if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) { | ||
| 2986 | buf[0] = cpu_to_le32(rt->target_class); | ||
| 2987 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 2988 | if (rc) | ||
| 2989 | return rc; | ||
| 2990 | } | ||
| 2991 | rc = mls_write_range_helper(r, fp); | ||
| 2992 | if (rc) | ||
| 2993 | return rc; | ||
| 2994 | |||
| 2995 | return 0; | ||
| 2996 | } | ||
| 2997 | |||
| 2998 | static int range_write(struct policydb *p, void *fp) | ||
| 2999 | { | ||
| 3000 | size_t nel; | ||
| 3001 | __le32 buf[1]; | ||
| 3002 | int rc; | ||
| 3003 | struct policy_data pd; | ||
| 3004 | |||
| 3005 | pd.p = p; | ||
| 3006 | pd.fp = fp; | ||
| 3007 | |||
| 3008 | /* count the number of entries in the hashtab */ | ||
| 3009 | nel = 0; | ||
| 3010 | rc = hashtab_map(p->range_tr, range_count, &nel); | ||
| 3011 | if (rc) | ||
| 3012 | return rc; | ||
| 3013 | |||
| 3014 | buf[0] = cpu_to_le32(nel); | ||
| 3015 | rc = put_entry(buf, sizeof(u32), 1, fp); | ||
| 3016 | if (rc) | ||
| 3017 | return rc; | ||
| 3018 | |||
| 3019 | /* actually write all of the entries */ | ||
| 3020 | rc = hashtab_map(p->range_tr, range_write_helper, &pd); | ||
| 3021 | if (rc) | ||
| 3022 | return rc; | ||
| 3023 | |||
| 3024 | return 0; | ||
| 3025 | } | ||
| 3026 | |||
| 3027 | /* | ||
| 3028 | * Write the configuration data in a policy database | ||
| 3029 | * structure to a policy database binary representation | ||
| 3030 | * file. | ||
| 3031 | */ | ||
| 3032 | int policydb_write(struct policydb *p, void *fp) | ||
| 3033 | { | ||
| 3034 | unsigned int i, num_syms; | ||
| 3035 | int rc; | ||
| 3036 | __le32 buf[4]; | ||
| 3037 | u32 config; | ||
| 3038 | size_t len; | ||
| 3039 | struct policydb_compat_info *info; | ||
| 3040 | |||
| 3041 | /* | ||
| 3042 | * refuse to write policy older than compressed avtab | ||
| 3043 | * to simplify the writer. There are other tests dropped | ||
| 3044 | * since we assume this throughout the writer code. Be | ||
| 3045 | * careful if you ever try to remove this restriction | ||
| 3046 | */ | ||
| 3047 | if (p->policyvers < POLICYDB_VERSION_AVTAB) { | ||
| 3048 | printk(KERN_ERR "SELinux: refusing to write policy version %d." | ||
| 3049 | " Because it is less than version %d\n", p->policyvers, | ||
| 3050 | POLICYDB_VERSION_AVTAB); | ||
| 3051 | return -EINVAL; | ||
| 3052 | } | ||
| 3053 | |||
| 3054 | config = 0; | ||
| 3055 | if (p->mls_enabled) | ||
| 3056 | config |= POLICYDB_CONFIG_MLS; | ||
| 3057 | |||
| 3058 | if (p->reject_unknown) | ||
| 3059 | config |= REJECT_UNKNOWN; | ||
| 3060 | if (p->allow_unknown) | ||
| 3061 | config |= ALLOW_UNKNOWN; | ||
| 3062 | |||
| 3063 | /* Write the magic number and string identifiers. */ | ||
| 3064 | buf[0] = cpu_to_le32(POLICYDB_MAGIC); | ||
| 3065 | len = strlen(POLICYDB_STRING); | ||
| 3066 | buf[1] = cpu_to_le32(len); | ||
| 3067 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 3068 | if (rc) | ||
| 3069 | return rc; | ||
| 3070 | rc = put_entry(POLICYDB_STRING, 1, len, fp); | ||
| 3071 | if (rc) | ||
| 3072 | return rc; | ||
| 3073 | |||
| 3074 | /* Write the version, config, and table sizes. */ | ||
| 3075 | info = policydb_lookup_compat(p->policyvers); | ||
| 3076 | if (!info) { | ||
| 3077 | printk(KERN_ERR "SELinux: compatibility lookup failed for policy " | ||
| 3078 | "version %d", p->policyvers); | ||
| 3079 | return rc; | ||
| 3080 | } | ||
| 3081 | |||
| 3082 | buf[0] = cpu_to_le32(p->policyvers); | ||
| 3083 | buf[1] = cpu_to_le32(config); | ||
| 3084 | buf[2] = cpu_to_le32(info->sym_num); | ||
| 3085 | buf[3] = cpu_to_le32(info->ocon_num); | ||
| 3086 | |||
| 3087 | rc = put_entry(buf, sizeof(u32), 4, fp); | ||
| 3088 | if (rc) | ||
| 3089 | return rc; | ||
| 3090 | |||
| 3091 | if (p->policyvers >= POLICYDB_VERSION_POLCAP) { | ||
| 3092 | rc = ebitmap_write(&p->policycaps, fp); | ||
| 3093 | if (rc) | ||
| 3094 | return rc; | ||
| 3095 | } | ||
| 3096 | |||
| 3097 | if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) { | ||
| 3098 | rc = ebitmap_write(&p->permissive_map, fp); | ||
| 3099 | if (rc) | ||
| 3100 | return rc; | ||
| 3101 | } | ||
| 3102 | |||
| 3103 | num_syms = info->sym_num; | ||
| 3104 | for (i = 0; i < num_syms; i++) { | ||
| 3105 | struct policy_data pd; | ||
| 3106 | |||
| 3107 | pd.fp = fp; | ||
| 3108 | pd.p = p; | ||
| 3109 | |||
| 3110 | buf[0] = cpu_to_le32(p->symtab[i].nprim); | ||
| 3111 | buf[1] = cpu_to_le32(p->symtab[i].table->nel); | ||
| 3112 | |||
| 3113 | rc = put_entry(buf, sizeof(u32), 2, fp); | ||
| 3114 | if (rc) | ||
| 3115 | return rc; | ||
| 3116 | rc = hashtab_map(p->symtab[i].table, write_f[i], &pd); | ||
| 3117 | if (rc) | ||
| 3118 | return rc; | ||
| 3119 | } | ||
| 3120 | |||
| 3121 | rc = avtab_write(p, &p->te_avtab, fp); | ||
| 3122 | if (rc) | ||
| 3123 | return rc; | ||
| 3124 | |||
| 3125 | rc = cond_write_list(p, p->cond_list, fp); | ||
| 3126 | if (rc) | ||
| 3127 | return rc; | ||
| 3128 | |||
| 3129 | rc = role_trans_write(p->role_tr, fp); | ||
| 3130 | if (rc) | ||
| 3131 | return rc; | ||
| 3132 | |||
| 3133 | rc = role_allow_write(p->role_allow, fp); | ||
| 3134 | if (rc) | ||
| 3135 | return rc; | ||
| 3136 | |||
| 3137 | rc = ocontext_write(p, info, fp); | ||
| 3138 | if (rc) | ||
| 3139 | return rc; | ||
| 3140 | |||
| 3141 | rc = genfs_write(p, fp); | ||
| 3142 | if (rc) | ||
| 3143 | return rc; | ||
| 3144 | |||
| 3145 | rc = range_write(p, fp); | ||
| 3146 | if (rc) | ||
| 3147 | return rc; | ||
| 3148 | |||
| 3149 | for (i = 0; i < p->p_types.nprim; i++) { | ||
| 3150 | struct ebitmap *e = flex_array_get(p->type_attr_map_array, i); | ||
| 3151 | |||
| 3152 | BUG_ON(!e); | ||
| 3153 | rc = ebitmap_write(e, fp); | ||
| 3154 | if (rc) | ||
| 3155 | return rc; | ||
| 3156 | } | ||
| 3157 | |||
| 3158 | return 0; | ||
| 3159 | } | ||
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index 310e94442cb8..95d3d7de361e 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h | |||
| @@ -254,6 +254,9 @@ struct policydb { | |||
| 254 | 254 | ||
| 255 | struct ebitmap permissive_map; | 255 | struct ebitmap permissive_map; |
| 256 | 256 | ||
| 257 | /* length of this policy when it was loaded */ | ||
| 258 | size_t len; | ||
| 259 | |||
| 257 | unsigned int policyvers; | 260 | unsigned int policyvers; |
| 258 | 261 | ||
| 259 | unsigned int reject_unknown : 1; | 262 | unsigned int reject_unknown : 1; |
| @@ -270,6 +273,7 @@ extern int policydb_class_isvalid(struct policydb *p, unsigned int class); | |||
| 270 | extern int policydb_type_isvalid(struct policydb *p, unsigned int type); | 273 | extern int policydb_type_isvalid(struct policydb *p, unsigned int type); |
| 271 | extern int policydb_role_isvalid(struct policydb *p, unsigned int role); | 274 | extern int policydb_role_isvalid(struct policydb *p, unsigned int role); |
| 272 | extern int policydb_read(struct policydb *p, void *fp); | 275 | extern int policydb_read(struct policydb *p, void *fp); |
| 276 | extern int policydb_write(struct policydb *p, void *fp); | ||
| 273 | 277 | ||
| 274 | #define PERM_SYMTAB_SIZE 32 | 278 | #define PERM_SYMTAB_SIZE 32 |
| 275 | 279 | ||
| @@ -290,6 +294,11 @@ struct policy_file { | |||
| 290 | size_t len; | 294 | size_t len; |
| 291 | }; | 295 | }; |
| 292 | 296 | ||
| 297 | struct policy_data { | ||
| 298 | struct policydb *p; | ||
| 299 | void *fp; | ||
| 300 | }; | ||
| 301 | |||
| 293 | static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) | 302 | static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) |
| 294 | { | 303 | { |
| 295 | if (bytes > fp->len) | 304 | if (bytes > fp->len) |
| @@ -301,6 +310,17 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) | |||
| 301 | return 0; | 310 | return 0; |
| 302 | } | 311 | } |
| 303 | 312 | ||
| 313 | static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp) | ||
| 314 | { | ||
| 315 | size_t len = bytes * num; | ||
| 316 | |||
| 317 | memcpy(fp->data, buf, len); | ||
| 318 | fp->data += len; | ||
| 319 | fp->len -= len; | ||
| 320 | |||
| 321 | return 0; | ||
| 322 | } | ||
| 323 | |||
| 304 | extern u16 string_to_security_class(struct policydb *p, const char *name); | 324 | extern u16 string_to_security_class(struct policydb *p, const char *name); |
| 305 | extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); | 325 | extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); |
| 306 | 326 | ||
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 9ea2feca3cd4..223c1ff6ef23 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/mutex.h> | 51 | #include <linux/mutex.h> |
| 52 | #include <linux/selinux.h> | 52 | #include <linux/selinux.h> |
| 53 | #include <linux/flex_array.h> | 53 | #include <linux/flex_array.h> |
| 54 | #include <linux/vmalloc.h> | ||
| 54 | #include <net/netlabel.h> | 55 | #include <net/netlabel.h> |
| 55 | 56 | ||
| 56 | #include "flask.h" | 57 | #include "flask.h" |
| @@ -991,7 +992,8 @@ static int context_struct_to_string(struct context *context, char **scontext, u3 | |||
| 991 | { | 992 | { |
| 992 | char *scontextp; | 993 | char *scontextp; |
| 993 | 994 | ||
| 994 | *scontext = NULL; | 995 | if (scontext) |
| 996 | *scontext = NULL; | ||
| 995 | *scontext_len = 0; | 997 | *scontext_len = 0; |
| 996 | 998 | ||
| 997 | if (context->len) { | 999 | if (context->len) { |
| @@ -1008,6 +1010,9 @@ static int context_struct_to_string(struct context *context, char **scontext, u3 | |||
| 1008 | *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1; | 1010 | *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1; |
| 1009 | *scontext_len += mls_compute_context_len(context); | 1011 | *scontext_len += mls_compute_context_len(context); |
| 1010 | 1012 | ||
| 1013 | if (!scontext) | ||
| 1014 | return 0; | ||
| 1015 | |||
| 1011 | /* Allocate space for the context; caller must free this space. */ | 1016 | /* Allocate space for the context; caller must free this space. */ |
| 1012 | scontextp = kmalloc(*scontext_len, GFP_ATOMIC); | 1017 | scontextp = kmalloc(*scontext_len, GFP_ATOMIC); |
| 1013 | if (!scontextp) | 1018 | if (!scontextp) |
| @@ -1047,7 +1052,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext, | |||
| 1047 | struct context *context; | 1052 | struct context *context; |
| 1048 | int rc = 0; | 1053 | int rc = 0; |
| 1049 | 1054 | ||
| 1050 | *scontext = NULL; | 1055 | if (scontext) |
| 1056 | *scontext = NULL; | ||
| 1051 | *scontext_len = 0; | 1057 | *scontext_len = 0; |
| 1052 | 1058 | ||
| 1053 | if (!ss_initialized) { | 1059 | if (!ss_initialized) { |
| @@ -1055,6 +1061,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext, | |||
| 1055 | char *scontextp; | 1061 | char *scontextp; |
| 1056 | 1062 | ||
| 1057 | *scontext_len = strlen(initial_sid_to_string[sid]) + 1; | 1063 | *scontext_len = strlen(initial_sid_to_string[sid]) + 1; |
| 1064 | if (!scontext) | ||
| 1065 | goto out; | ||
| 1058 | scontextp = kmalloc(*scontext_len, GFP_ATOMIC); | 1066 | scontextp = kmalloc(*scontext_len, GFP_ATOMIC); |
| 1059 | if (!scontextp) { | 1067 | if (!scontextp) { |
| 1060 | rc = -ENOMEM; | 1068 | rc = -ENOMEM; |
| @@ -1769,6 +1777,7 @@ int security_load_policy(void *data, size_t len) | |||
| 1769 | return rc; | 1777 | return rc; |
| 1770 | } | 1778 | } |
| 1771 | 1779 | ||
| 1780 | policydb.len = len; | ||
| 1772 | rc = selinux_set_mapping(&policydb, secclass_map, | 1781 | rc = selinux_set_mapping(&policydb, secclass_map, |
| 1773 | ¤t_mapping, | 1782 | ¤t_mapping, |
| 1774 | ¤t_mapping_size); | 1783 | ¤t_mapping_size); |
| @@ -1791,6 +1800,7 @@ int security_load_policy(void *data, size_t len) | |||
| 1791 | selinux_complete_init(); | 1800 | selinux_complete_init(); |
| 1792 | avc_ss_reset(seqno); | 1801 | avc_ss_reset(seqno); |
| 1793 | selnl_notify_policyload(seqno); | 1802 | selnl_notify_policyload(seqno); |
| 1803 | selinux_status_update_policyload(seqno); | ||
| 1794 | selinux_netlbl_cache_invalidate(); | 1804 | selinux_netlbl_cache_invalidate(); |
| 1795 | selinux_xfrm_notify_policyload(); | 1805 | selinux_xfrm_notify_policyload(); |
| 1796 | return 0; | 1806 | return 0; |
| @@ -1804,6 +1814,7 @@ int security_load_policy(void *data, size_t len) | |||
| 1804 | if (rc) | 1814 | if (rc) |
| 1805 | return rc; | 1815 | return rc; |
| 1806 | 1816 | ||
| 1817 | newpolicydb.len = len; | ||
| 1807 | /* If switching between different policy types, log MLS status */ | 1818 | /* If switching between different policy types, log MLS status */ |
| 1808 | if (policydb.mls_enabled && !newpolicydb.mls_enabled) | 1819 | if (policydb.mls_enabled && !newpolicydb.mls_enabled) |
| 1809 | printk(KERN_INFO "SELinux: Disabling MLS support...\n"); | 1820 | printk(KERN_INFO "SELinux: Disabling MLS support...\n"); |
| @@ -1870,6 +1881,7 @@ int security_load_policy(void *data, size_t len) | |||
| 1870 | 1881 | ||
| 1871 | avc_ss_reset(seqno); | 1882 | avc_ss_reset(seqno); |
| 1872 | selnl_notify_policyload(seqno); | 1883 | selnl_notify_policyload(seqno); |
| 1884 | selinux_status_update_policyload(seqno); | ||
| 1873 | selinux_netlbl_cache_invalidate(); | 1885 | selinux_netlbl_cache_invalidate(); |
| 1874 | selinux_xfrm_notify_policyload(); | 1886 | selinux_xfrm_notify_policyload(); |
| 1875 | 1887 | ||
| @@ -1883,6 +1895,17 @@ err: | |||
| 1883 | 1895 | ||
| 1884 | } | 1896 | } |
| 1885 | 1897 | ||
| 1898 | size_t security_policydb_len(void) | ||
| 1899 | { | ||
| 1900 | size_t len; | ||
| 1901 | |||
| 1902 | read_lock(&policy_rwlock); | ||
| 1903 | len = policydb.len; | ||
| 1904 | read_unlock(&policy_rwlock); | ||
| 1905 | |||
| 1906 | return len; | ||
| 1907 | } | ||
| 1908 | |||
| 1886 | /** | 1909 | /** |
| 1887 | * security_port_sid - Obtain the SID for a port. | 1910 | * security_port_sid - Obtain the SID for a port. |
| 1888 | * @protocol: protocol number | 1911 | * @protocol: protocol number |
| @@ -2374,6 +2397,7 @@ out: | |||
| 2374 | if (!rc) { | 2397 | if (!rc) { |
| 2375 | avc_ss_reset(seqno); | 2398 | avc_ss_reset(seqno); |
| 2376 | selnl_notify_policyload(seqno); | 2399 | selnl_notify_policyload(seqno); |
| 2400 | selinux_status_update_policyload(seqno); | ||
| 2377 | selinux_xfrm_notify_policyload(); | 2401 | selinux_xfrm_notify_policyload(); |
| 2378 | } | 2402 | } |
| 2379 | return rc; | 2403 | return rc; |
| @@ -3129,3 +3153,38 @@ netlbl_sid_to_secattr_failure: | |||
| 3129 | return rc; | 3153 | return rc; |
| 3130 | } | 3154 | } |
| 3131 | #endif /* CONFIG_NETLABEL */ | 3155 | #endif /* CONFIG_NETLABEL */ |
| 3156 | |||
| 3157 | /** | ||
| 3158 | * security_read_policy - read the policy. | ||
| 3159 | * @data: binary policy data | ||
| 3160 | * @len: length of data in bytes | ||
| 3161 | * | ||
| 3162 | */ | ||
| 3163 | int security_read_policy(void **data, ssize_t *len) | ||
| 3164 | { | ||
| 3165 | int rc; | ||
| 3166 | struct policy_file fp; | ||
| 3167 | |||
| 3168 | if (!ss_initialized) | ||
| 3169 | return -EINVAL; | ||
| 3170 | |||
| 3171 | *len = security_policydb_len(); | ||
| 3172 | |||
| 3173 | *data = vmalloc_user(*len); | ||
| 3174 | if (!*data) | ||
| 3175 | return -ENOMEM; | ||
| 3176 | |||
| 3177 | fp.data = *data; | ||
| 3178 | fp.len = *len; | ||
| 3179 | |||
| 3180 | read_lock(&policy_rwlock); | ||
| 3181 | rc = policydb_write(&policydb, &fp); | ||
| 3182 | read_unlock(&policy_rwlock); | ||
| 3183 | |||
| 3184 | if (rc) | ||
| 3185 | return rc; | ||
| 3186 | |||
| 3187 | *len = (unsigned long)fp.data - (unsigned long)*data; | ||
| 3188 | return 0; | ||
| 3189 | |||
| 3190 | } | ||
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c new file mode 100644 index 000000000000..d982365f9d1a --- /dev/null +++ b/security/selinux/ss/status.c | |||
| @@ -0,0 +1,126 @@ | |||
| 1 | /* | ||
| 2 | * mmap based event notifications for SELinux | ||
| 3 | * | ||
| 4 | * Author: KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 5 | * | ||
| 6 | * Copyright (C) 2010 NEC corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2, | ||
| 10 | * as published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/gfp.h> | ||
| 14 | #include <linux/mm.h> | ||
| 15 | #include <linux/mutex.h> | ||
| 16 | #include "avc.h" | ||
| 17 | #include "services.h" | ||
| 18 | |||
| 19 | /* | ||
| 20 | * The selinux_status_page shall be exposed to userspace applications | ||
| 21 | * using mmap interface on /selinux/status. | ||
| 22 | * It enables to notify applications a few events that will cause reset | ||
| 23 | * of userspace access vector without context switching. | ||
| 24 | * | ||
| 25 | * The selinux_kernel_status structure on the head of status page is | ||
| 26 | * protected from concurrent accesses using seqlock logic, so userspace | ||
| 27 | * application should reference the status page according to the seqlock | ||
| 28 | * logic. | ||
| 29 | * | ||
| 30 | * Typically, application checks status->sequence at the head of access | ||
| 31 | * control routine. If it is odd-number, kernel is updating the status, | ||
| 32 | * so please wait for a moment. If it is changed from the last sequence | ||
| 33 | * number, it means something happen, so application will reset userspace | ||
| 34 | * avc, if needed. | ||
| 35 | * In most cases, application shall confirm the kernel status is not | ||
| 36 | * changed without any system call invocations. | ||
| 37 | */ | ||
| 38 | static struct page *selinux_status_page; | ||
| 39 | static DEFINE_MUTEX(selinux_status_lock); | ||
| 40 | |||
| 41 | /* | ||
| 42 | * selinux_kernel_status_page | ||
| 43 | * | ||
| 44 | * It returns a reference to selinux_status_page. If the status page is | ||
| 45 | * not allocated yet, it also tries to allocate it at the first time. | ||
| 46 | */ | ||
| 47 | struct page *selinux_kernel_status_page(void) | ||
| 48 | { | ||
| 49 | struct selinux_kernel_status *status; | ||
| 50 | struct page *result = NULL; | ||
| 51 | |||
| 52 | mutex_lock(&selinux_status_lock); | ||
| 53 | if (!selinux_status_page) { | ||
| 54 | selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO); | ||
| 55 | |||
| 56 | if (selinux_status_page) { | ||
| 57 | status = page_address(selinux_status_page); | ||
| 58 | |||
| 59 | status->version = SELINUX_KERNEL_STATUS_VERSION; | ||
| 60 | status->sequence = 0; | ||
| 61 | status->enforcing = selinux_enforcing; | ||
| 62 | /* | ||
| 63 | * NOTE: the next policyload event shall set | ||
| 64 | * a positive value on the status->policyload, | ||
| 65 | * although it may not be 1, but never zero. | ||
| 66 | * So, application can know it was updated. | ||
| 67 | */ | ||
| 68 | status->policyload = 0; | ||
| 69 | status->deny_unknown = !security_get_allow_unknown(); | ||
| 70 | } | ||
| 71 | } | ||
| 72 | result = selinux_status_page; | ||
| 73 | mutex_unlock(&selinux_status_lock); | ||
| 74 | |||
| 75 | return result; | ||
| 76 | } | ||
| 77 | |||
| 78 | /* | ||
| 79 | * selinux_status_update_setenforce | ||
| 80 | * | ||
| 81 | * It updates status of the current enforcing/permissive mode. | ||
| 82 | */ | ||
| 83 | void selinux_status_update_setenforce(int enforcing) | ||
| 84 | { | ||
| 85 | struct selinux_kernel_status *status; | ||
| 86 | |||
| 87 | mutex_lock(&selinux_status_lock); | ||
| 88 | if (selinux_status_page) { | ||
| 89 | status = page_address(selinux_status_page); | ||
| 90 | |||
| 91 | status->sequence++; | ||
| 92 | smp_wmb(); | ||
| 93 | |||
| 94 | status->enforcing = enforcing; | ||
| 95 | |||
| 96 | smp_wmb(); | ||
| 97 | status->sequence++; | ||
| 98 | } | ||
| 99 | mutex_unlock(&selinux_status_lock); | ||
| 100 | } | ||
| 101 | |||
| 102 | /* | ||
| 103 | * selinux_status_update_policyload | ||
| 104 | * | ||
| 105 | * It updates status of the times of policy reloaded, and current | ||
| 106 | * setting of deny_unknown. | ||
| 107 | */ | ||
| 108 | void selinux_status_update_policyload(int seqno) | ||
| 109 | { | ||
| 110 | struct selinux_kernel_status *status; | ||
| 111 | |||
| 112 | mutex_lock(&selinux_status_lock); | ||
| 113 | if (selinux_status_page) { | ||
| 114 | status = page_address(selinux_status_page); | ||
| 115 | |||
| 116 | status->sequence++; | ||
| 117 | smp_wmb(); | ||
| 118 | |||
| 119 | status->policyload = seqno; | ||
| 120 | status->deny_unknown = !security_get_allow_unknown(); | ||
| 121 | |||
| 122 | smp_wmb(); | ||
| 123 | status->sequence++; | ||
| 124 | } | ||
| 125 | mutex_unlock(&selinux_status_lock); | ||
| 126 | } | ||
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index c448d57ae2b7..bc39f4067af6 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
| @@ -1281,12 +1281,11 @@ static int smack_task_getioprio(struct task_struct *p) | |||
| 1281 | * | 1281 | * |
| 1282 | * Return 0 if read access is permitted | 1282 | * Return 0 if read access is permitted |
| 1283 | */ | 1283 | */ |
| 1284 | static int smack_task_setscheduler(struct task_struct *p, int policy, | 1284 | static int smack_task_setscheduler(struct task_struct *p) |
| 1285 | struct sched_param *lp) | ||
| 1286 | { | 1285 | { |
| 1287 | int rc; | 1286 | int rc; |
| 1288 | 1287 | ||
| 1289 | rc = cap_task_setscheduler(p, policy, lp); | 1288 | rc = cap_task_setscheduler(p); |
| 1290 | if (rc == 0) | 1289 | if (rc == 0) |
| 1291 | rc = smk_curacc_on_task(p, MAY_WRITE); | 1290 | rc = smk_curacc_on_task(p, MAY_WRITE); |
| 1292 | return rc; | 1291 | return rc; |
| @@ -3005,7 +3004,8 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) | |||
| 3005 | { | 3004 | { |
| 3006 | char *sp = smack_from_secid(secid); | 3005 | char *sp = smack_from_secid(secid); |
| 3007 | 3006 | ||
| 3008 | *secdata = sp; | 3007 | if (secdata) |
| 3008 | *secdata = sp; | ||
| 3009 | *seclen = strlen(sp); | 3009 | *seclen = strlen(sp); |
| 3010 | return 0; | 3010 | return 0; |
| 3011 | } | 3011 | } |
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index c668b447c725..7556315c1978 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c | |||
| @@ -768,8 +768,10 @@ static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data) | |||
| 768 | return true; /* Do nothing if open(O_WRONLY). */ | 768 | return true; /* Do nothing if open(O_WRONLY). */ |
| 769 | memset(&head->r, 0, sizeof(head->r)); | 769 | memset(&head->r, 0, sizeof(head->r)); |
| 770 | head->r.print_this_domain_only = true; | 770 | head->r.print_this_domain_only = true; |
| 771 | head->r.eof = !domain; | 771 | if (domain) |
| 772 | head->r.domain = &domain->list; | 772 | head->r.domain = &domain->list; |
| 773 | else | ||
| 774 | head->r.eof = 1; | ||
| 773 | tomoyo_io_printf(head, "# select %s\n", data); | 775 | tomoyo_io_printf(head, "# select %s\n", data); |
| 774 | if (domain && domain->is_deleted) | 776 | if (domain && domain->is_deleted) |
| 775 | tomoyo_io_printf(head, "# This is a deleted domain.\n"); | 777 | tomoyo_io_printf(head, "# This is a deleted domain.\n"); |
| @@ -2051,13 +2053,22 @@ void tomoyo_check_profile(void) | |||
| 2051 | const u8 profile = domain->profile; | 2053 | const u8 profile = domain->profile; |
| 2052 | if (tomoyo_profile_ptr[profile]) | 2054 | if (tomoyo_profile_ptr[profile]) |
| 2053 | continue; | 2055 | continue; |
| 2056 | printk(KERN_ERR "You need to define profile %u before using it.\n", | ||
| 2057 | profile); | ||
| 2058 | printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ " | ||
| 2059 | "for more information.\n"); | ||
| 2054 | panic("Profile %u (used by '%s') not defined.\n", | 2060 | panic("Profile %u (used by '%s') not defined.\n", |
| 2055 | profile, domain->domainname->name); | 2061 | profile, domain->domainname->name); |
| 2056 | } | 2062 | } |
| 2057 | tomoyo_read_unlock(idx); | 2063 | tomoyo_read_unlock(idx); |
| 2058 | if (tomoyo_profile_version != 20090903) | 2064 | if (tomoyo_profile_version != 20090903) { |
| 2065 | printk(KERN_ERR "You need to install userland programs for " | ||
| 2066 | "TOMOYO 2.3 and initialize policy configuration.\n"); | ||
| 2067 | printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ " | ||
| 2068 | "for more information.\n"); | ||
| 2059 | panic("Profile version %u is not supported.\n", | 2069 | panic("Profile version %u is not supported.\n", |
| 2060 | tomoyo_profile_version); | 2070 | tomoyo_profile_version); |
| 2071 | } | ||
| 2061 | printk(KERN_INFO "TOMOYO: 2.3.0\n"); | 2072 | printk(KERN_INFO "TOMOYO: 2.3.0\n"); |
| 2062 | printk(KERN_INFO "Mandatory Access Control activated.\n"); | 2073 | printk(KERN_INFO "Mandatory Access Control activated.\n"); |
| 2063 | } | 2074 | } |
diff --git a/sound/core/control.c b/sound/core/control.c index 070aab490191..45a818002d99 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | /* max number of user-defined controls */ | 32 | /* max number of user-defined controls */ |
| 33 | #define MAX_USER_CONTROLS 32 | 33 | #define MAX_USER_CONTROLS 32 |
| 34 | #define MAX_CONTROL_COUNT 1028 | ||
| 34 | 35 | ||
| 35 | struct snd_kctl_ioctl { | 36 | struct snd_kctl_ioctl { |
| 36 | struct list_head list; /* list of all ioctls */ | 37 | struct list_head list; /* list of all ioctls */ |
| @@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control, | |||
| 195 | 196 | ||
| 196 | if (snd_BUG_ON(!control || !control->count)) | 197 | if (snd_BUG_ON(!control || !control->count)) |
| 197 | return NULL; | 198 | return NULL; |
| 199 | |||
| 200 | if (control->count > MAX_CONTROL_COUNT) | ||
| 201 | return NULL; | ||
| 202 | |||
| 198 | kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); | 203 | kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); |
| 199 | if (kctl == NULL) { | 204 | if (kctl == NULL) { |
| 200 | snd_printk(KERN_ERR "Cannot allocate control instance\n"); | 205 | snd_printk(KERN_ERR "Cannot allocate control instance\n"); |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index a7868ad4d530..cbbed0db9e56 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
| @@ -535,13 +535,15 @@ static int snd_rawmidi_release(struct inode *inode, struct file *file) | |||
| 535 | { | 535 | { |
| 536 | struct snd_rawmidi_file *rfile; | 536 | struct snd_rawmidi_file *rfile; |
| 537 | struct snd_rawmidi *rmidi; | 537 | struct snd_rawmidi *rmidi; |
| 538 | struct module *module; | ||
| 538 | 539 | ||
| 539 | rfile = file->private_data; | 540 | rfile = file->private_data; |
| 540 | rmidi = rfile->rmidi; | 541 | rmidi = rfile->rmidi; |
| 541 | rawmidi_release_priv(rfile); | 542 | rawmidi_release_priv(rfile); |
| 542 | kfree(rfile); | 543 | kfree(rfile); |
| 544 | module = rmidi->card->module; | ||
| 543 | snd_card_file_remove(rmidi->card, file); | 545 | snd_card_file_remove(rmidi->card, file); |
| 544 | module_put(rmidi->card->module); | 546 | module_put(module); |
| 545 | return 0; | 547 | return 0; |
| 546 | } | 548 | } |
| 547 | 549 | ||
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c index 1adb8a3c2b62..42d7844ecd0b 100644 --- a/sound/i2c/other/ak4xxx-adda.c +++ b/sound/i2c/other/ak4xxx-adda.c | |||
| @@ -900,7 +900,7 @@ static int proc_init(struct snd_akm4xxx *ak) | |||
| 900 | return 0; | 900 | return 0; |
| 901 | } | 901 | } |
| 902 | #else /* !CONFIG_PROC_FS */ | 902 | #else /* !CONFIG_PROC_FS */ |
| 903 | static int proc_init(struct snd_akm4xxx *ak) {} | 903 | static int proc_init(struct snd_akm4xxx *ak) { return 0; } |
| 904 | #endif | 904 | #endif |
| 905 | 905 | ||
| 906 | int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak) | 906 | int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak) |
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c index 92aa762ffb7e..07f803e6d203 100644 --- a/sound/oss/soundcard.c +++ b/sound/oss/soundcard.c | |||
| @@ -391,11 +391,11 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 391 | case SND_DEV_DSP: | 391 | case SND_DEV_DSP: |
| 392 | case SND_DEV_DSP16: | 392 | case SND_DEV_DSP16: |
| 393 | case SND_DEV_AUDIO: | 393 | case SND_DEV_AUDIO: |
| 394 | return audio_ioctl(dev, file, cmd, p); | 394 | ret = audio_ioctl(dev, file, cmd, p); |
| 395 | break; | 395 | break; |
| 396 | 396 | ||
| 397 | case SND_DEV_MIDIN: | 397 | case SND_DEV_MIDIN: |
| 398 | return MIDIbuf_ioctl(dev, file, cmd, p); | 398 | ret = MIDIbuf_ioctl(dev, file, cmd, p); |
| 399 | break; | 399 | break; |
| 400 | 400 | ||
| 401 | } | 401 | } |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 95148e58026c..c16c5ba0fda0 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -1747,6 +1747,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { | |||
| 1747 | "HP dv6", STAC_HP_DV5), | 1747 | "HP dv6", STAC_HP_DV5), |
| 1748 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, | 1748 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, |
| 1749 | "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ | 1749 | "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ |
| 1750 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x363e, | ||
| 1751 | "HP DV6", STAC_HP_DV5), | ||
| 1750 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, | 1752 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, |
| 1751 | "HP", STAC_HP_DV5), | 1753 | "HP", STAC_HP_DV5), |
| 1752 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, | 1754 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 4f1fa77c1feb..1950e19af1cf 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
| @@ -1017,7 +1017,7 @@ builtin-revert.o wt-status.o: wt-status.h | |||
| 1017 | # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So | 1017 | # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So |
| 1018 | # we depend the various files onto their directories. | 1018 | # we depend the various files onto their directories. |
| 1019 | DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h | 1019 | DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h |
| 1020 | $(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS))) | 1020 | $(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS))) |
| 1021 | # In the second step, we make a rule to actually create these directories | 1021 | # In the second step, we make a rule to actually create these directories |
| 1022 | $(sort $(dir $(DIRECTORY_DEPS))): | 1022 | $(sort $(dir $(DIRECTORY_DEPS))): |
| 1023 | $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null | 1023 | $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null |
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index ef7aa0a0c526..95aaf565c704 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
| @@ -73,6 +73,18 @@ void get_term_dimensions(struct winsize *ws); | |||
| 73 | #define cpu_relax() asm volatile("":::"memory") | 73 | #define cpu_relax() asm volatile("":::"memory") |
| 74 | #endif | 74 | #endif |
| 75 | 75 | ||
| 76 | #ifdef __mips__ | ||
| 77 | #include "../../arch/mips/include/asm/unistd.h" | ||
| 78 | #define rmb() asm volatile( \ | ||
| 79 | ".set mips2\n\t" \ | ||
| 80 | "sync\n\t" \ | ||
| 81 | ".set mips0" \ | ||
| 82 | : /* no output */ \ | ||
| 83 | : /* no input */ \ | ||
| 84 | : "memory") | ||
| 85 | #define cpu_relax() asm volatile("" ::: "memory") | ||
| 86 | #endif | ||
| 87 | |||
| 76 | #include <time.h> | 88 | #include <time.h> |
| 77 | #include <unistd.h> | 89 | #include <unistd.h> |
| 78 | #include <sys/types.h> | 90 | #include <sys/types.h> |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 7ea983acfaea..f7af2fca965d 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
| @@ -97,7 +97,7 @@ void setup_python_scripting(void) | |||
| 97 | register_python_scripting(&python_scripting_unsupported_ops); | 97 | register_python_scripting(&python_scripting_unsupported_ops); |
| 98 | } | 98 | } |
| 99 | #else | 99 | #else |
| 100 | struct scripting_ops python_scripting_ops; | 100 | extern struct scripting_ops python_scripting_ops; |
| 101 | 101 | ||
| 102 | void setup_python_scripting(void) | 102 | void setup_python_scripting(void) |
| 103 | { | 103 | { |
| @@ -158,7 +158,7 @@ void setup_perl_scripting(void) | |||
| 158 | register_perl_scripting(&perl_scripting_unsupported_ops); | 158 | register_perl_scripting(&perl_scripting_unsupported_ops); |
| 159 | } | 159 | } |
| 160 | #else | 160 | #else |
| 161 | struct scripting_ops perl_scripting_ops; | 161 | extern struct scripting_ops perl_scripting_ops; |
| 162 | 162 | ||
| 163 | void setup_perl_scripting(void) | 163 | void setup_perl_scripting(void) |
| 164 | { | 164 | { |
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index dafdf6775d77..6866aa4c41e0 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c | |||
| @@ -773,7 +773,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
| 773 | 773 | ||
| 774 | switch (key) { | 774 | switch (key) { |
| 775 | case 'a': | 775 | case 'a': |
| 776 | if (browser->selection->map == NULL && | 776 | if (browser->selection->map == NULL || |
| 777 | browser->selection->map->dso->annotate_warned) | 777 | browser->selection->map->dso->annotate_warned) |
| 778 | continue; | 778 | continue; |
| 779 | goto do_annotate; | 779 | goto do_annotate; |
